diff --git a/data/templates/frr/babeld.frr.j2 b/data/templates/frr/babeld.frr.j2 index 344a5f988..292bd9972 100644 --- a/data/templates/frr/babeld.frr.j2 +++ b/data/templates/frr/babeld.frr.j2 @@ -1,85 +1,83 @@ {% from 'frr/distribute_list_macro.j2' import render_distribute_list %} {% from 'frr/ipv6_distribute_list_macro.j2' import render_ipv6_distribute_list %} ! {# Interface specific configuration #} {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} interface {{ iface }} {% if iface_config.type is vyos_defined('wired') or iface_config.type is vyos_defined('wireless') %} babel {{ iface_config.type }} {% endif %} {% if iface_config.split_horizon is vyos_defined("enable") %} babel split-horizon {% elif iface_config.split_horizon is vyos_defined("disable") %} no babel split-horizon {% endif %} {% if iface_config.hello_interval is vyos_defined %} babel hello-interval {{ iface_config.hello_interval }} {% endif %} {% if iface_config.update_interval is vyos_defined %} babel update-interval {{ iface_config.update_interval }} {% endif %} {% if iface_config.rxcost is vyos_defined %} babel rxcost {{ iface_config.rxcost }} {% endif %} {% if iface_config.rtt_decay is vyos_defined %} babel rtt-decay {{ iface_config.rtt_decay }} {% endif %} {% if iface_config.rtt_min is vyos_defined %} babel rtt-min {{ iface_config.rtt_min }} {% endif %} {% if iface_config.rtt_max is vyos_defined %} babel rtt-max {{ iface_config.rtt_max }} {% endif %} {% if iface_config.max_rtt_penalty is vyos_defined %} babel max-rtt-penalty {{ iface_config.max_rtt_penalty }} {% endif %} {% if iface_config.enable_timestamps is vyos_defined %} babel enable-timestamps {% endif %} {% if iface_config.channel is vyos_defined %} babel channel {{ iface_config.channel | replace("non-interfering", "noninterfering") }} {% endif %} exit ! {% endfor %} {% endif %} ! -{# Babel configuration #} router babel {% if parameters.diversity is vyos_defined %} babel diversity {% endif %} {% if parameters.diversity_factor is vyos_defined %} babel diversity-factor {{ parameters.diversity_factor }} {% endif %} {% if parameters.resend_delay is vyos_defined %} babel resend-delay {{ parameters.resend_delay }} {% endif %} {% if parameters.smoothing_half_life is vyos_defined %} babel smoothing-half-life {{ parameters.smoothing_half_life }} {% endif %} {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} network {{ iface }} {% endfor %} {% endif %} {% if redistribute is vyos_defined %} {% for address_family in redistribute %} {% for protocol, protocol_config in redistribute[address_family].items() %} {% if protocol is vyos_defined('ospfv3') %} {% set protocol = 'ospf6' %} {% endif %} redistribute {{ address_family }} {{ protocol }} {% endfor %} {% endfor %} {% endif %} {% if distribute_list.ipv4 is vyos_defined %} {{ render_distribute_list(distribute_list.ipv4) }} {% endif %} {% if distribute_list.ipv6 is vyos_defined %} {{ render_ipv6_distribute_list(distribute_list.ipv6) }} {% endif %} exit ! -end diff --git a/data/templates/frr/bgpd.frr.j2 b/data/templates/frr/bgpd.frr.j2 index e5bfad59d..51a3f2564 100644 --- a/data/templates/frr/bgpd.frr.j2 +++ b/data/templates/frr/bgpd.frr.j2 @@ -1,664 +1,674 @@ {### MACRO definition for recurring peer patter, this can be either fed by a ###} {### peer-group or an individual BGP neighbor ###} {% macro bgp_neighbor(neighbor, config, peer_group=false) %} +{# BGP order of peer-group and remote-as placement must be honored #} {% if peer_group == true %} neighbor {{ neighbor }} peer-group -{% elif config.peer_group is vyos_defined %} - neighbor {{ neighbor }} peer-group {{ config.peer_group }} -{% endif %} -{% if config.remote_as is vyos_defined %} +{% if config.remote_as is vyos_defined %} + neighbor {{ neighbor }} remote-as {{ config.remote_as }} +{% endif %} +{% else %} +{% if config.remote_as is vyos_defined %} neighbor {{ neighbor }} remote-as {{ config.remote_as }} +{% endif %} +{% if config.peer_group is vyos_defined %} + neighbor {{ neighbor }} peer-group {{ config.peer_group }} +{% endif %} {% endif %} {% if config.local_role is vyos_defined %} {% for role, strict in config.local_role.items() %} neighbor {{ neighbor }} local-role {{ role }} {{ 'strict-mode' if strict }} {% endfor %} {% endif %} {% if config.interface.remote_as is vyos_defined %} neighbor {{ neighbor }} interface remote-as {{ config.interface.remote_as }} {% endif %} {% if config.advertisement_interval is vyos_defined %} neighbor {{ neighbor }} advertisement-interval {{ config.advertisement_interval }} {% endif %} {% if config.bfd is vyos_defined %} neighbor {{ neighbor }} bfd {% if config.bfd.check_control_plane_failure is vyos_defined %} neighbor {{ neighbor }} bfd check-control-plane-failure {% endif %} {% if config.bfd.profile is vyos_defined %} neighbor {{ neighbor }} bfd profile {{ config.bfd.profile }} {% endif %} {% endif %} {% if config.capability.dynamic is vyos_defined %} neighbor {{ neighbor }} capability dynamic {% endif %} {% if config.capability.extended_nexthop is vyos_defined %} neighbor {{ neighbor }} capability extended-nexthop {% endif %} {% if config.capability.software_version is vyos_defined %} neighbor {{ neighbor }} capability software-version {% endif %} {% if config.description is vyos_defined %} neighbor {{ neighbor }} description {{ config.description }} {% endif %} {% if config.disable_capability_negotiation is vyos_defined %} neighbor {{ neighbor }} dont-capability-negotiate {% endif %} {% if config.disable_connected_check is vyos_defined %} neighbor {{ neighbor }} disable-connected-check {% endif %} {% if config.ebgp_multihop is vyos_defined %} neighbor {{ neighbor }} ebgp-multihop {{ config.ebgp_multihop }} {% endif %} {% if config.graceful_restart is vyos_defined %} {% if config.graceful_restart is vyos_defined('enable') %} {% set graceful_restart = 'graceful-restart' %} {% elif config.graceful_restart is vyos_defined('disable') %} {% set graceful_restart = 'graceful-restart-disable' %} {% elif config.graceful_restart is vyos_defined('restart-helper') %} {% set graceful_restart = 'graceful-restart-helper' %} {% endif %} neighbor {{ neighbor }} {{ graceful_restart }} {% endif %} {% if config.local_as is vyos_defined %} {% for local_as, local_as_config in config.local_as.items() %} {# There can be only one local-as value, this is checked in the Python code #} neighbor {{ neighbor }} local-as {{ local_as }} {{ 'no-prepend' if local_as_config.no_prepend is vyos_defined }} {{ 'replace-as' if local_as_config.no_prepend is vyos_defined and local_as_config.no_prepend.replace_as is vyos_defined }} {% endfor %} {% endif %} {% if config.override_capability is vyos_defined %} neighbor {{ neighbor }} override-capability {% endif %} {% if config.passive is vyos_defined %} neighbor {{ neighbor }} passive {% endif %} {% if config.password is vyos_defined %} neighbor {{ neighbor }} password {{ config.password }} {% endif %} {% if config.path_attribute.discard is vyos_defined %} neighbor {{ neighbor }} path-attribute discard {{ config.path_attribute.discard | join(' ') }} {% endif %} {% if config.path_attribute.treat_as_withdraw is vyos_defined %} neighbor {{ neighbor }} path-attribute treat-as-withdraw {{ config.path_attribute.treat_as_withdraw }} {% endif %} {% if config.port is vyos_defined %} neighbor {{ neighbor }} port {{ config.port }} {% endif %} {% if config.shutdown is vyos_defined %} neighbor {{ neighbor }} shutdown {% endif %} {% if config.solo is vyos_defined %} neighbor {{ neighbor }} solo {% endif %} {% if config.enforce_first_as is vyos_defined %} neighbor {{ neighbor }} enforce-first-as {% endif %} {% if config.strict_capability_match is vyos_defined %} neighbor {{ neighbor }} strict-capability-match {% endif %} {% if config.ttl_security.hops is vyos_defined %} neighbor {{ neighbor }} ttl-security hops {{ config.ttl_security.hops }} {% endif %} {% if config.timers.connect is vyos_defined %} neighbor {{ neighbor }} timers connect {{ config.timers.connect }} {% endif %} {% if config.timers.keepalive is vyos_defined and config.timers.holdtime is vyos_defined %} neighbor {{ neighbor }} timers {{ config.timers.keepalive }} {{ config.timers.holdtime }} {% endif %} {% if config.update_source is vyos_defined %} neighbor {{ neighbor }} update-source {{ config.update_source }} {% endif %} {% if config.interface is vyos_defined %} {% if config.interface.peer_group is vyos_defined %} neighbor {{ neighbor }} interface peer-group {{ config.interface.peer_group }} {% endif %} {% if config.interface.source_interface is vyos_defined %} neighbor {{ neighbor }} interface {{ config.interface.source_interface }} {% endif %} {% if config.interface.v6only is vyos_defined %} {% if config.interface.v6only.peer_group is vyos_defined %} neighbor {{ neighbor }} interface v6only peer-group {{ config.interface.v6only.peer_group }} {% endif %} {% if config.interface.v6only.remote_as is vyos_defined %} neighbor {{ neighbor }} interface v6only remote-as {{ config.interface.v6only.remote_as }} {% endif %} {% endif %} {% endif %} ! {% if config.address_family is vyos_defined %} {% for afi, afi_config in config.address_family.items() %} {% if afi == 'ipv4_unicast' %} address-family ipv4 unicast {% elif afi == 'ipv4_multicast' %} address-family ipv4 multicast {% elif afi == 'ipv4_labeled_unicast' %} address-family ipv4 labeled-unicast {% elif afi == 'ipv4_vpn' %} address-family ipv4 vpn {% elif afi == 'ipv4_flowspec' %} address-family ipv4 flowspec {% elif afi == 'ipv6_unicast' %} address-family ipv6 unicast {% elif afi == 'ipv6_multicast' %} address-family ipv6 multicast {% elif afi == 'ipv6_labeled_unicast' %} address-family ipv6 labeled-unicast {% elif afi == 'ipv6_vpn' %} address-family ipv6 vpn {% elif afi == 'ipv6_flowspec' %} address-family ipv6 flowspec {% elif afi == 'l2vpn_evpn' %} address-family l2vpn evpn {% endif %} {% if afi_config.addpath_tx_all is vyos_defined %} neighbor {{ neighbor }} addpath-tx-all-paths {% endif %} {% if afi_config.addpath_tx_per_as is vyos_defined %} neighbor {{ neighbor }} addpath-tx-bestpath-per-AS {% endif %} {% if afi_config.allowas_in is vyos_defined %} neighbor {{ neighbor }} allowas-in {{ afi_config.allowas_in.number if afi_config.allowas_in.number is vyos_defined }} {% endif %} {% if afi_config.as_override is vyos_defined %} neighbor {{ neighbor }} as-override {% endif %} {% if afi_config.conditionally_advertise is vyos_defined %} {% if afi_config.conditionally_advertise.advertise_map is vyos_defined %} {% set exist_non_exist_map = 'exist-map' %} {% if afi_config.conditionally_advertise.exist_map is vyos_defined %} {% set exist_non_exist_map = 'exist-map ' ~ afi_config.conditionally_advertise.exist_map %} {% elif afi_config.conditionally_advertise.non_exist_map is vyos_defined %} {% set exist_non_exist_map = 'non-exist-map ' ~ afi_config.conditionally_advertise.non_exist_map %} {% endif %} neighbor {{ neighbor }} advertise-map {{ afi_config.conditionally_advertise.advertise_map }} {{ exist_non_exist_map }} {% endif %} {% endif %} {% if afi_config.remove_private_as is vyos_defined %} neighbor {{ neighbor }} remove-private-AS {{ 'all' if afi_config.remove_private_as.all is vyos_defined }} {% endif %} {% if afi_config.route_reflector_client is vyos_defined %} neighbor {{ neighbor }} route-reflector-client {% endif %} {% if afi_config.weight is vyos_defined %} neighbor {{ neighbor }} weight {{ afi_config.weight }} {% endif %} {% if afi_config.attribute_unchanged is vyos_defined %} neighbor {{ neighbor }} attribute-unchanged {{ 'as-path ' if afi_config.attribute_unchanged.as_path is vyos_defined }}{{ 'med ' if afi_config.attribute_unchanged.med is vyos_defined }}{{ 'next-hop ' if afi_config.attribute_unchanged.next_hop is vyos_defined }} {% endif %} {% if afi_config.capability.orf.prefix_list.send is vyos_defined %} neighbor {{ neighbor }} capability orf prefix-list send {% endif %} {% if afi_config.capability.orf.prefix_list.receive is vyos_defined %} neighbor {{ neighbor }} capability orf prefix-list receive {% endif %} {% if afi_config.default_originate is vyos_defined %} neighbor {{ neighbor }} default-originate {{ 'route-map ' ~ afi_config.default_originate.route_map if afi_config.default_originate.route_map is vyos_defined }} {% endif %} {% if afi_config.distribute_list.export is vyos_defined %} neighbor {{ neighbor }} distribute-list {{ afi_config.distribute_list.export }} out {% endif %} {% if afi_config.distribute_list.import is vyos_defined %} neighbor {{ neighbor }} distribute-list {{ afi_config.distribute_list.import }} in {% endif %} {% if afi_config.filter_list.export is vyos_defined %} neighbor {{ neighbor }} filter-list {{ afi_config.filter_list.export }} out {% endif %} {% if afi_config.filter_list.import is vyos_defined %} neighbor {{ neighbor }} filter-list {{ afi_config.filter_list.import }} in {% endif %} {% if afi_config.maximum_prefix is vyos_defined %} neighbor {{ neighbor }} maximum-prefix {{ afi_config.maximum_prefix }} {% endif %} {% if afi_config.maximum_prefix_out is vyos_defined %} neighbor {{ neighbor }} maximum-prefix-out {{ afi_config.maximum_prefix_out }} {% endif %} {% if afi_config.nexthop_self is vyos_defined %} neighbor {{ neighbor }} next-hop-self {{ 'force' if afi_config.nexthop_self.force is vyos_defined }} {% endif %} {% if afi_config.route_server_client is vyos_defined %} neighbor {{ neighbor }} route-server-client {% endif %} {% if afi_config.route_map.export is vyos_defined %} neighbor {{ neighbor }} route-map {{ afi_config.route_map.export }} out {% endif %} {% if afi_config.route_map.import is vyos_defined %} neighbor {{ neighbor }} route-map {{ afi_config.route_map.import }} in {% endif %} {% if afi_config.prefix_list.export is vyos_defined %} neighbor {{ neighbor }} prefix-list {{ afi_config.prefix_list.export }} out {% endif %} {% if afi_config.prefix_list.import is vyos_defined %} neighbor {{ neighbor }} prefix-list {{ afi_config.prefix_list.import }} in {% endif %} {% if afi_config.soft_reconfiguration.inbound is vyos_defined %} neighbor {{ neighbor }} soft-reconfiguration inbound {% endif %} {% if afi_config.unsuppress_map is vyos_defined %} neighbor {{ neighbor }} unsuppress-map {{ afi_config.unsuppress_map }} {% endif %} {% if afi_config.disable_send_community.extended is vyos_defined %} no neighbor {{ neighbor }} send-community extended {% endif %} {% if afi_config.disable_send_community.standard is vyos_defined %} no neighbor {{ neighbor }} send-community standard {% endif %} neighbor {{ neighbor }} activate exit-address-family ! +{# j2lint: disable=jinja-statements-delimeter #} {% endfor %} {% endif %} -{% endmacro %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endmacro -%} ! router bgp {{ system_as }} {{ 'vrf ' ~ vrf if vrf is vyos_defined }} {% if parameters.ebgp_requires_policy is vyos_defined %} bgp ebgp-requires-policy {% else %} no bgp ebgp-requires-policy {% endif %} {# Option must be set before any neighbor - see https://vyos.dev/T3463 #} no bgp default ipv4-unicast {# Workaround for T2100 until we have decided about a migration script #} no bgp network import-check {% if address_family is vyos_defined %} {% for afi, afi_config in address_family.items() %} ! {% if afi == 'ipv4_unicast' %} address-family ipv4 unicast {% elif afi == 'ipv4_multicast' %} address-family ipv4 multicast {% elif afi == 'ipv4_labeled_unicast' %} address-family ipv4 labeled-unicast {% elif afi == 'ipv4_vpn' %} address-family ipv4 vpn {% elif afi == 'ipv4_flowspec' %} address-family ipv4 flowspec {% elif afi == 'ipv6_unicast' %} address-family ipv6 unicast {% elif afi == 'ipv6_multicast' %} address-family ipv6 multicast {% elif afi == 'ipv6_labeled_unicast' %} address-family ipv6 labeled-unicast {% elif afi == 'ipv6_vpn' %} address-family ipv6 vpn {% elif afi == 'ipv6_flowspec' %} address-family ipv6 flowspec {% elif afi == 'l2vpn_evpn' %} address-family l2vpn evpn {% if afi_config.rd is vyos_defined %} rd {{ afi_config.rd }} {% endif %} {% endif %} {% if afi_config.aggregate_address is vyos_defined %} {% for aggregate, aggregate_config in afi_config.aggregate_address.items() %} aggregate-address {{ aggregate }} {{ 'as-set' if aggregate_config.as_set is vyos_defined }} {{ 'summary-only' if aggregate_config.summary_only is vyos_defined }} {{ 'route-map ' ~ aggregate_config.route_map if aggregate_config.route_map is vyos_defined }} {% endfor %} {% endif %} {% if afi_config.maximum_paths.ebgp is vyos_defined %} maximum-paths {{ afi_config.maximum_paths.ebgp }} {% endif %} {% if afi_config.maximum_paths.ibgp is vyos_defined %} maximum-paths ibgp {{ afi_config.maximum_paths.ibgp }} {% endif %} {% if afi_config.redistribute is vyos_defined %} {% for protocol, protocol_config in afi_config.redistribute.items() %} {% if protocol == 'table' %} redistribute table {{ protocol_config.table }} {% else %} {% set redistribution_protocol = protocol %} {% if protocol == 'ospfv3' %} {% set redistribution_protocol = 'ospf6' %} {% endif %} redistribute {{ redistribution_protocol }} {{ 'metric ' ~ protocol_config.metric if protocol_config.metric is vyos_defined }} {{ 'route-map ' ~ protocol_config.route_map if protocol_config.route_map is vyos_defined }} {####### we need this blank line!! #######} {% endif %} {% endfor %} {% endif %} {% if afi_config.network is vyos_defined %} {% for network, network_config in afi_config.network.items() %} network {{ network }} {{ 'route-map ' ~ network_config.route_map if network_config.route_map is vyos_defined }} {{ 'backdoor' if network_config.backdoor is vyos_defined }} {{ 'rd ' ~ network_config.rd if network_config.rd is vyos_defined }} {{ 'label ' ~ network_config.label if network_config.label is vyos_defined }} {####### we need this blank line!! #######} {% endfor %} {% endif %} {% if afi_config.advertise is vyos_defined %} {% for adv_afi, adv_afi_config in afi_config.advertise.items() %} {% if adv_afi_config.unicast is vyos_defined %} advertise {{ adv_afi }} unicast {{ 'route-map ' ~ adv_afi_config.unicast.route_map if adv_afi_config.unicast.route_map is vyos_defined }} {% endif %} {% endfor %} {% endif %} {% if afi_config.distance.external is vyos_defined and afi_config.distance.internal is vyos_defined and afi_config.distance.local is vyos_defined %} distance bgp {{ afi_config.distance.external }} {{ afi_config.distance.internal }} {{ afi_config.distance.local }} {% endif %} {% if afi_config.distance.prefix is vyos_defined %} {% for prefix in afi_config.distance.prefix %} distance {{ afi_config.distance.prefix[prefix].distance }} {{ prefix }} {% endfor %} {% endif %} {% if afi_config.export.vpn is vyos_defined %} export vpn {% endif %} {% if afi_config.import.vpn is vyos_defined %} import vpn {% endif %} {% if afi_config.import.vrf is vyos_defined %} {% for vrf in afi_config.import.vrf %} import vrf {{ vrf }} {% endfor %} {% endif %} {% if afi_config.label.vpn.export is vyos_defined %} label vpn export {{ afi_config.label.vpn.export }} {% endif %} {% if afi_config.label.vpn.allocation_mode.per_nexthop is vyos_defined %} label vpn export allocation-mode per-nexthop {% endif %} {% if afi_config.local_install is vyos_defined %} {% for interface in afi_config.local_install.interface %} local-install {{ interface }} {% endfor %} {% endif %} {% if afi_config.advertise_all_vni is vyos_defined %} advertise-all-vni {% endif %} {% if afi_config.advertise_default_gw is vyos_defined %} advertise-default-gw {% endif %} {% if afi_config.advertise_pip is vyos_defined %} advertise-pip ip {{ afi_config.advertise_pip }} {% endif %} {% if afi_config.advertise_svi_ip is vyos_defined %} advertise-svi-ip {% endif %} {% if afi_config.default_originate.ipv4 is vyos_defined %} default-originate ipv4 {% endif %} {% if afi_config.default_originate.ipv6 is vyos_defined %} default-originate ipv6 {% endif %} {% if afi_config.disable_ead_evi_rx is vyos_defined %} disable-ead-evi-rx {% endif %} {% if afi_config.disable_ead_evi_tx is vyos_defined %} disable-ead-evi-tx {% endif %} {% if afi_config.ead_es_frag.evi_limit is vyos_defined %} ead-es-frag evi-limit {{ afi_config.ead_es_frag.evi_limit }} {% endif %} {% if afi_config.ead_es_route_target.export is vyos_defined %} {% for route_target in afi_config.ead_es_route_target.export %} ead-es-route-target export {{ route_target }} {% endfor %} {% endif %} {% if afi_config.rt_auto_derive is vyos_defined %} autort rfc8365-compatible {% endif %} {% if afi_config.flooding.disable is vyos_defined %} flooding disable {% endif %} {% if afi_config.flooding.head_end_replication is vyos_defined %} flooding head-end-replication {% endif %} {% if afi_config.mac_vrf.soo is vyos_defined %} mac-vrf soo {{ afi_config.mac_vrf.soo }} {% endif %} {% if afi_config.nexthop.vpn.export is vyos_defined %} nexthop vpn export {{ afi_config.nexthop.vpn.export }} {% endif %} {% if afi_config.rd.vpn.export is vyos_defined %} rd vpn export {{ afi_config.rd.vpn.export }} {% endif %} {% if afi_config.route_target.vpn.both is vyos_defined %} route-target vpn both {{ afi_config.route_target.vpn.both }} {% else %} {% if afi_config.route_target.vpn.export is vyos_defined %} route-target vpn export {{ afi_config.route_target.vpn.export }} {% endif %} {% if afi_config.route_target.vpn.import is vyos_defined %} route-target vpn import {{ afi_config.route_target.vpn.import }} {% endif %} {% endif %} {% if afi_config.route_target.both is vyos_defined %} {% for route_target in afi_config.route_target.both %} route-target both {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_target.export is vyos_defined %} {% for route_target in afi_config.route_target.export %} route-target export {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_target.import is vyos_defined %} {% for route_target in afi_config.route_target.import %} route-target import {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_map.vpn.export is vyos_defined %} route-map vpn export {{ afi_config.route_map.vpn.export }} {% endif %} {% if afi_config.route_map.vpn.import is vyos_defined %} route-map vpn import {{ afi_config.route_map.vpn.import }} {% endif %} {% if afi_config.sid.vpn.export is vyos_defined %} sid vpn export {{ afi_config.sid.vpn.export }} {% endif %} {% if afi_config.vni is vyos_defined %} {% for vni, vni_config in afi_config.vni.items() %} vni {{ vni }} {% if vni_config.advertise_default_gw is vyos_defined %} advertise-default-gw {% endif %} {% if vni_config.advertise_svi_ip is vyos_defined %} advertise-svi-ip {% endif %} {% if vni_config.rd is vyos_defined %} rd {{ vni_config.rd }} {% endif %} {% if vni_config.route_target.both is vyos_defined %} {% for route_target in vni_config.route_target.both %} route-target both {{ route_target }} {% endfor %} {% endif %} {% if vni_config.route_target.export is vyos_defined %} {% for route_target in vni_config.route_target.export %} route-target export {{ route_target }} {% endfor %} {% endif %} {% if vni_config.route_target.import is vyos_defined %} {% for route_target in vni_config.route_target.import %} route-target import {{ route_target }} {% endfor %} {% endif %} exit-vni {% endfor %} {% endif %} exit-address-family ! {% endfor %} {% endif %} ! {% if bmp is vyos_defined %} {% if bmp.mirror_buffer_limit is vyos_defined %} bmp mirror buffer-limit {{ bmp.mirror_buffer_limit }} ! {% endif %} {% if bmp.target is vyos_defined %} {% for bmp, bmp_config in bmp.target.items() %} bmp targets {{ bmp }} {% if bmp_config.mirror is vyos_defined %} bmp mirror {% endif %} {% if bmp_config.monitor is vyos_defined %} {% if bmp_config.monitor.ipv4_unicast.pre_policy is vyos_defined %} bmp monitor ipv4 unicast pre-policy {% endif %} {% if bmp_config.monitor.ipv4_unicast.post_policy is vyos_defined %} bmp monitor ipv4 unicast post-policy {% endif %} {% if bmp_config.monitor.ipv6_unicast.pre_policy is vyos_defined %} bmp monitor ipv6 unicast pre-policy {% endif %} {% if bmp_config.monitor.ipv6_unicast.post_policy is vyos_defined %} bmp monitor ipv6 unicast post-policy {% endif %} {% endif %} {% if bmp_config.address is vyos_defined %} bmp connect {{ bmp_config.address }} port {{ bmp_config.port }} min-retry {{ bmp_config.min_retry }} max-retry {{ bmp_config.max_retry }} {% endif %} {% endfor %} exit {% endif %} {% endif %} {% if peer_group is vyos_defined %} {% for peer, config in peer_group.items() %} {{ bgp_neighbor(peer, config, true) }} -{% endfor %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% if neighbor is vyos_defined %} {% for peer, config in neighbor.items() %} {{ bgp_neighbor(peer, config) }} -{% endfor %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% if listen.limit is vyos_defined %} bgp listen limit {{ listen.limit }} {% endif %} {% if listen.range is vyos_defined %} {% for prefix, options in listen.range.items() %} {% if options.peer_group is vyos_defined %} bgp listen range {{ prefix }} peer-group {{ options.peer_group }} {% endif %} {% endfor %} {% endif %} {% if parameters.allow_martian_nexthop is vyos_defined %} bgp allow-martian-nexthop {% endif %} {% if parameters.disable_ebgp_connected_route_check is vyos_defined %} bgp disable-ebgp-connected-route-check {% endif %} {% if parameters.always_compare_med is vyos_defined %} bgp always-compare-med {% endif %} {% if parameters.bestpath.as_path is vyos_defined %} {% for option in parameters.bestpath.as_path %} {# replace is required for multipath-relax option #} bgp bestpath as-path {{ option | replace('_', '-') }} {% endfor %} {% endif %} {% if parameters.bestpath.bandwidth is vyos_defined %} bgp bestpath bandwidth {{ parameters.bestpath.bandwidth }} {% endif %} {% if parameters.bestpath.compare_routerid is vyos_defined %} bgp bestpath compare-routerid {% endif %} {% if parameters.bestpath.med is vyos_defined %} bgp bestpath med {{ parameters.bestpath.med | join(' ') | replace('_', '-') }} {% endif %} {% if parameters.bestpath.peer_type is vyos_defined %} bgp bestpath peer-type {{ 'multipath-relax' if parameters.bestpath.peer_type.multipath_relax is vyos_defined }} {% endif %} {% if parameters.cluster_id is vyos_defined %} bgp cluster-id {{ parameters.cluster_id }} {% endif %} {% if parameters.conditional_advertisement.timer is vyos_defined %} bgp conditional-advertisement timer {{ parameters.conditional_advertisement.timer }} {% endif %} {% if parameters.confederation.identifier is vyos_defined %} bgp confederation identifier {{ parameters.confederation.identifier }} {% endif %} {% if parameters.confederation.peers is vyos_defined %} bgp confederation peers {{ parameters.confederation.peers | join(' ') }} {% endif %} {% if parameters.dampening.half_life is vyos_defined %} {# Doesn't work in current FRR configuration; vtysh (bgp dampening 16 751 2001 61) #} bgp dampening {{ parameters.dampening.half_life }} {{ parameters.dampening.re_use if parameters.dampening.re_use is vyos_defined }} {{ parameters.dampening.start_suppress_time if parameters.dampening.start_suppress_time is vyos_defined }} {{ parameters.dampening.max_suppress_time if parameters.dampening.max_suppress_time is vyos_defined }} {% endif %} {% if parameters.default.local_pref is vyos_defined %} bgp default local-preference {{ parameters.default.local_pref }} {% endif %} {% if parameters.deterministic_med is vyos_defined %} bgp deterministic-med {% endif %} {% if parameters.distance.global.external is vyos_defined and parameters.distance.global.internal is vyos_defined and parameters.distance.global.local is vyos_defined %} distance bgp {{ parameters.distance.global.external }} {{ parameters.distance.global.internal }} {{ parameters.distance.global.local }} {% endif %} {% if parameters.distance.prefix is vyos_defined %} {% for prefix in parameters.distance.prefix %} distance {{ parameters.distance.prefix[prefix].distance }} {{ prefix }} {% endfor %} {% endif %} {% if parameters.fast_convergence is vyos_defined %} bgp fast-convergence {% endif %} {% if parameters.graceful_restart is vyos_defined %} bgp graceful-restart {{ 'stalepath-time ' ~ parameters.graceful_restart.stalepath_time if parameters.graceful_restart.stalepath_time is vyos_defined }} {% endif %} {% if parameters.graceful_shutdown is vyos_defined %} bgp graceful-shutdown {% endif %} {% if parameters.no_hard_administrative_reset is vyos_defined %} no bgp hard-administrative-reset {% endif %} {% if parameters.labeled_unicast is vyos_defined %} bgp labeled-unicast {{ parameters.labeled_unicast }} {% endif %} {% if parameters.log_neighbor_changes is vyos_defined %} bgp log-neighbor-changes {% endif %} {% if parameters.minimum_holdtime is vyos_defined %} bgp minimum-holdtime {{ parameters.minimum_holdtime }} {% endif %} {% if parameters.network_import_check is vyos_defined %} bgp network import-check {% endif %} {% if parameters.route_reflector_allow_outbound_policy is vyos_defined %} bgp route-reflector allow-outbound-policy {% endif %} {% if parameters.no_client_to_client_reflection is vyos_defined %} no bgp client-to-client reflection {% endif %} {% if parameters.no_fast_external_failover is vyos_defined %} no bgp fast-external-failover {% endif %} {% if parameters.no_suppress_duplicates is vyos_defined %} no bgp suppress-duplicates {% endif %} {% if parameters.reject_as_sets is vyos_defined %} bgp reject-as-sets {% endif %} {% if parameters.router_id is vyos_defined and parameters.router_id is not none %} bgp router-id {{ parameters.router_id }} {% endif %} {% if parameters.shutdown is vyos_defined %} bgp shutdown {% endif %} {% if parameters.suppress_fib_pending is vyos_defined %} bgp suppress-fib-pending {% endif %} {% if parameters.tcp_keepalive.idle is vyos_defined and parameters.tcp_keepalive.interval is vyos_defined and parameters.tcp_keepalive.probes is vyos_defined %} bgp tcp-keepalive {{ parameters.tcp_keepalive.idle }} {{ parameters.tcp_keepalive.interval }} {{ parameters.tcp_keepalive.probes }} {% endif %} {% if srv6.locator is vyos_defined %} segment-routing srv6 locator {{ srv6.locator }} exit {% endif %} {% if sid.vpn.per_vrf.export is vyos_defined %} sid vpn per-vrf export {{ sid.vpn.per_vrf.export }} {% endif %} {% if timers.keepalive is vyos_defined and timers.holdtime is vyos_defined %} timers bgp {{ timers.keepalive }} {{ timers.holdtime }} {% endif %} exit ! {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} interface {{ iface }} {% if iface_config.mpls.forwarding is vyos_defined %} mpls bgp forwarding {% endif %} exit ! {% endfor %} {% endif %} diff --git a/data/templates/frr/distribute_list_macro.j2 b/data/templates/frr/distribute_list_macro.j2 index c10bf732d..3e15ef100 100644 --- a/data/templates/frr/distribute_list_macro.j2 +++ b/data/templates/frr/distribute_list_macro.j2 @@ -1,30 +1,31 @@ {% macro render_distribute_list(distribute_list) %} {% if distribute_list.access_list.in is vyos_defined %} distribute-list {{ distribute_list.access_list.in }} in {% endif %} {% if distribute_list.access_list.out is vyos_defined %} distribute-list {{ distribute_list.access_list.out }} out {% endif %} {% if distribute_list.interface is vyos_defined %} {% for interface, interface_config in distribute_list.interface.items() %} {% if interface_config.access_list.in is vyos_defined %} distribute-list {{ interface_config.access_list.in }} in {{ interface }} {% endif %} {% if interface_config.access_list.out is vyos_defined %} distribute-list {{ interface_config.access_list.out }} out {{ interface }} {% endif %} {% if interface_config.prefix_list.in is vyos_defined %} distribute-list prefix {{ interface_config.prefix_list.in }} in {{ interface }} {% endif %} {% if interface_config.prefix_list.out is vyos_defined %} distribute-list prefix {{ interface_config.prefix_list.out }} out {{ interface }} {% endif %} {% endfor %} {% endif %} {% if distribute_list.prefix_list.in is vyos_defined %} distribute-list prefix {{ distribute_list.prefix_list.in }} in {% endif %} {% if distribute_list.prefix_list.out is vyos_defined %} distribute-list prefix {{ distribute_list.prefix_list.out }} out {% endif %} -{% endmacro %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endmacro -%} diff --git a/data/templates/frr/evpn.mh.frr.j2 b/data/templates/frr/evpn.mh.frr.j2 index 03aaac44b..2fd7b7c09 100644 --- a/data/templates/frr/evpn.mh.frr.j2 +++ b/data/templates/frr/evpn.mh.frr.j2 @@ -1,16 +1,20 @@ ! -interface {{ ifname }} -{% if evpn.es_df_pref is vyos_defined %} - evpn mh es-df-pref {{ evpn.es_df_pref }} -{% endif %} -{% if evpn.es_id is vyos_defined %} - evpn mh es-id {{ evpn.es_id }} -{% endif %} -{% if evpn.es_sys_mac is vyos_defined %} - evpn mh es-sys-mac {{ evpn.es_sys_mac }} -{% endif %} -{% if evpn.uplink is vyos_defined %} +{% if interfaces is vyos_defined %} +{% for if_name, if_config in interfaces.items() %} +interface {{ if_name }} +{% if if_config.evpn.es_df_pref is vyos_defined %} + evpn mh es-df-pref {{ if_config.evpn.es_df_pref }} +{% endif %} +{% if if_config.evpn.es_id is vyos_defined %} + evpn mh es-id {{ if_config.evpn.es_id }} +{% endif %} +{% if if_config.evpn.es_sys_mac is vyos_defined %} + evpn mh es-sys-mac {{ if_config.evpn.es_sys_mac }} +{% endif %} +{% if if_config.evpn.uplink is vyos_defined %} evpn mh uplink -{% endif %} +{% endif %} exit ! +{% endfor %} +{% endif %} diff --git a/data/templates/frr/fabricd.frr.j2 b/data/templates/frr/fabricd.frr.j2 index 8f2ae6466..3a0646eb8 100644 --- a/data/templates/frr/fabricd.frr.j2 +++ b/data/templates/frr/fabricd.frr.j2 @@ -1,72 +1,73 @@ ! {% for name, router_config in domain.items() %} {% if router_config.interface is vyos_defined %} {% for iface, iface_config in router_config.interface.items() %} interface {{ iface }} {% if iface_config.address_family.ipv4 is vyos_defined %} ip router openfabric {{ name }} {% endif %} {% if iface_config.address_family.ipv6 is vyos_defined %} ipv6 router openfabric {{ name }} {% endif %} {% if iface_config.csnp_interval is vyos_defined %} openfabric csnp-interval {{ iface_config.csnp_interval }} {% endif %} {% if iface_config.hello_interval is vyos_defined %} openfabric hello-interval {{ iface_config.hello_interval }} {% endif %} {% if iface_config.hello_multiplier is vyos_defined %} openfabric hello-multiplier {{ iface_config.hello_multiplier }} {% endif %} {% if iface_config.metric is vyos_defined %} openfabric metric {{ iface_config.metric }} {% endif %} {% if iface_config.passive is vyos_defined or iface == 'lo' %} openfabric passive {% endif %} {% if iface_config.password.md5 is vyos_defined %} openfabric password md5 {{ iface_config.password.md5 }} {% elif iface_config.password.plaintext_password is vyos_defined %} openfabric password clear {{ iface_config.password.plaintext_password }} {% endif %} {% if iface_config.psnp_interval is vyos_defined %} openfabric psnp-interval {{ iface_config.psnp_interval }} {% endif %} exit ! {% endfor %} {% endif %} router openfabric {{ name }} net {{ net }} {% if router_config.domain_password.md5 is vyos_defined %} domain-password md5 {{ router_config.domain_password.plaintext_password }} {% elif router_config.domain_password.plaintext_password is vyos_defined %} domain-password clear {{ router_config.domain_password.plaintext_password }} {% endif %} {% if router_config.log_adjacency_changes is vyos_defined %} log-adjacency-changes {% endif %} {% if router_config.set_overload_bit is vyos_defined %} set-overload-bit {% endif %} {% if router_config.purge_originator is vyos_defined %} purge-originator {% endif %} {% if router_config.fabric_tier is vyos_defined %} fabric-tier {{ router_config.fabric_tier }} {% endif %} {% if router_config.lsp_gen_interval is vyos_defined %} lsp-gen-interval {{ router_config.lsp_gen_interval }} {% endif %} {% if router_config.lsp_refresh_interval is vyos_defined %} lsp-refresh-interval {{ router_config.lsp_refresh_interval }} {% endif %} {% if router_config.max_lsp_lifetime is vyos_defined %} max-lsp-lifetime {{ router_config.max_lsp_lifetime }} {% endif %} {% if router_config.spf_interval is vyos_defined %} spf-interval {{ router_config.spf_interval }} {% endif %} exit ! {% endfor %} +! diff --git a/data/templates/frr/ipv6_distribute_list_macro.j2 b/data/templates/frr/ipv6_distribute_list_macro.j2 index c365fbdae..2f483b7d4 100644 --- a/data/templates/frr/ipv6_distribute_list_macro.j2 +++ b/data/templates/frr/ipv6_distribute_list_macro.j2 @@ -1,30 +1,31 @@ {% macro render_ipv6_distribute_list(distribute_list) %} {% if distribute_list.access_list.in is vyos_defined %} ipv6 distribute-list {{ distribute_list.access_list.in }} in {% endif %} {% if distribute_list.access_list.out is vyos_defined %} ipv6 distribute-list {{ distribute_list.access_list.out }} out {% endif %} {% if distribute_list.interface is vyos_defined %} {% for interface, interface_config in distribute_list.interface.items() %} {% if interface_config.access_list.in is vyos_defined %} ipv6 distribute-list {{ interface_config.access_list.in }} in {{ interface }} {% endif %} {% if interface_config.access_list.out is vyos_defined %} ipv6 distribute-list {{ interface_config.access_list.out }} out {{ interface }} {% endif %} {% if interface_config.prefix_list.in is vyos_defined %} ipv6 distribute-list prefix {{ interface_config.prefix_list.in }} in {{ interface }} {% endif %} {% if interface_config.prefix_list.out is vyos_defined %} ipv6 distribute-list prefix {{ interface_config.prefix_list.out }} out {{ interface }} {% endif %} {% endfor %} {% endif %} {% if distribute_list.prefix_list.in is vyos_defined %} ipv6 distribute-list prefix {{ distribute_list.prefix_list.in }} in {% endif %} {% if distribute_list.prefix_list.out is vyos_defined %} ipv6 distribute-list prefix {{ distribute_list.prefix_list.out }} out {% endif %} -{% endmacro %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endmacro -%} diff --git a/data/templates/frr/ospfd.frr.j2 b/data/templates/frr/ospfd.frr.j2 index ab074b6a2..bc2c74b10 100644 --- a/data/templates/frr/ospfd.frr.j2 +++ b/data/templates/frr/ospfd.frr.j2 @@ -1,262 +1,265 @@ ! {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} interface {{ iface }} {% if iface_config.authentication.plaintext_password is vyos_defined %} ip ospf authentication-key {{ iface_config.authentication.plaintext_password }} {% elif iface_config.authentication.md5 is vyos_defined %} ip ospf authentication message-digest {% if iface_config.authentication.md5.key_id is vyos_defined %} {% for key, key_config in iface_config.authentication.md5.key_id.items() %} ip ospf message-digest-key {{ key }} md5 {{ key_config.md5_key }} {% endfor %} {% endif %} {% endif %} {% if iface_config.area is vyos_defined %} ip ospf area {{ iface_config.area }} {% endif %} {% if iface_config.bandwidth is vyos_defined %} bandwidth {{ iface_config.bandwidth }} {% endif %} {% if iface_config.cost is vyos_defined %} ip ospf cost {{ iface_config.cost }} {% endif %} {% if iface_config.priority is vyos_defined %} ip ospf priority {{ iface_config.priority }} {% endif %} {% if iface_config.hello_interval is vyos_defined %} ip ospf hello-interval {{ iface_config.hello_interval }} {% endif %} {% if iface_config.retransmit_interval is vyos_defined %} ip ospf retransmit-interval {{ iface_config.retransmit_interval }} {% endif %} +{% if iface_config.retransmit_window is vyos_defined %} + ip ospf retransmit-window {{ iface_config.retransmit_window }} +{% endif %} {% if iface_config.transmit_delay is vyos_defined %} ip ospf transmit-delay {{ iface_config.transmit_delay }} {% endif %} {% if iface_config.dead_interval is vyos_defined %} ip ospf dead-interval {{ iface_config.dead_interval }} {% elif iface_config.hello_multiplier is vyos_defined %} ip ospf dead-interval minimal hello-multiplier {{ iface_config.hello_multiplier }} {% endif %} {% if iface_config.bfd is vyos_defined %} ip ospf bfd {% endif %} {% if iface_config.bfd.profile is vyos_defined %} ip ospf bfd profile {{ iface_config.bfd.profile }} {% endif %} {% if iface_config.ldp_sync.disable is vyos_defined %} no ip ospf mpls ldp-sync {% elif iface_config.ldp_sync.holddown is vyos_defined %} ip ospf mpls ldp-sync ip ospf mpls ldp-sync holddown {{ iface_config.ldp_sync.holddown }} {% endif %} {% if iface_config.mtu_ignore is vyos_defined %} ip ospf mtu-ignore {% endif %} {% if iface_config.network is vyos_defined %} ip ospf network {{ iface_config.network }} {% endif %} {% if iface_config.passive is vyos_defined %} {{ 'no ' if iface_config.passive.disable is vyos_defined }}ip ospf passive {% endif %} exit ! {% endfor %} {% endif %} ! router ospf {{ 'vrf ' ~ vrf if vrf is vyos_defined }} {% if access_list is vyos_defined %} {% for acl, acl_config in access_list.items() %} {% for protocol in acl_config.export if acl_config.export is vyos_defined %} distribute-list {{ acl }} out {{ protocol }} {% endfor %} {% endfor %} {% endif %} {% if aggregation.timer is vyos_defined %} aggregation timer {{ aggregation.timer }} {% endif %} {% if area is vyos_defined %} {% for area_id, area_config in area.items() %} {% if area_config.area_type is vyos_defined %} {% for type, type_config in area_config.area_type.items() if type != 'normal' %} area {{ area_id }} {{ type }} {{ 'no-summary' if type_config.no_summary is vyos_defined }} {% if type_config.default_cost is vyos_defined %} area {{ area_id }} default-cost {{ type_config.default_cost }} {% endif %} {% endfor %} {% endif %} {% if area_config.authentication is vyos_defined %} area {{ area_id }} authentication {{ 'message-digest' if area_config.authentication is vyos_defined('md5') }} {% endif %} {% for network in area_config.network if area_config.network is vyos_defined %} network {{ network }} area {{ area_id }} {% endfor %} {% if area_config.range is vyos_defined %} {% for range, range_config in area_config.range.items() %} {% if range_config.not_advertise is vyos_defined %} area {{ area_id }} range {{ range }} not-advertise {% else %} area {{ area_id }} range {{ range }} {% endif %} {% if range_config.cost is vyos_defined %} area {{ area_id }} range {{ range }} cost {{ range_config.cost }} {% endif %} {% if range_config.substitute is vyos_defined %} area {{ area_id }} range {{ range }} substitute {{ range_config.substitute }} {% endif %} {% endfor %} {% endif %} {% if area_config.export_list is vyos_defined %} area {{ area_id }} export-list {{ area_config.export_list }} {% endif %} {% if area_config.import_list is vyos_defined %} area {{ area_id }} import-list {{ area_config.import_list }} {% endif %} {% if area_config.shortcut is vyos_defined %} area {{ area_id }} shortcut {{ area_config.shortcut }} {% endif %} {% if area_config.virtual_link is vyos_defined %} {% for link, link_config in area_config.virtual_link.items() %} {% if link_config.authentication.plaintext_password is vyos_defined %} area {{ area_id }} virtual-link {{ link }} authentication-key {{ link_config.authentication.plaintext_password }} {% elif link_config.authentication.md5.key_id is vyos_defined %} {% for key, key_config in link_config.authentication.md5.key_id.items() %} area {{ area_id }} virtual-link {{ link }} message-digest-key {{ key }} md5 {{ key_config.md5_key }} {% endfor %} {% endif %} {# The following values are default values #} - area {{ area_id }} virtual-link {{ link }} hello-interval {{ link_config.hello_interval }} retransmit-interval {{ link_config.retransmit_interval }} transmit-delay {{ link_config.transmit_delay }} dead-interval {{ link_config.dead_interval }} + area {{ area_id }} virtual-link {{ link }} hello-interval {{ link_config.hello_interval }} retransmit-interval {{ link_config.retransmit_interval }} retransmit-window {{ link_config.retransmit_window }} transmit-delay {{ link_config.transmit_delay }} dead-interval {{ link_config.dead_interval }} {% endfor %} {% endif %} {% endfor %} {% endif %} {% if auto_cost.reference_bandwidth is vyos_defined %} auto-cost reference-bandwidth {{ auto_cost.reference_bandwidth }} {% endif %} {% if capability.opaque is vyos_defined %} capability opaque {% endif %} {% if default_information.originate is vyos_defined %} default-information originate {{ 'always' if default_information.originate.always is vyos_defined }} {{ 'metric ' + default_information.originate.metric if default_information.originate.metric is vyos_defined }} {{ 'metric-type ' + default_information.originate.metric_type if default_information.originate.metric_type is vyos_defined }} {{ 'route-map ' + default_information.originate.route_map if default_information.originate.route_map is vyos_defined }} {% endif %} {% if default_metric is vyos_defined %} default-metric {{ default_metric }} {% endif %} {% if maximum_paths is vyos_defined %} maximum-paths {{ maximum_paths }} {% endif %} {% if ldp_sync.holddown is vyos_defined %} mpls ldp-sync holddown {{ ldp_sync.holddown }} {% elif ldp_sync is vyos_defined %} mpls ldp-sync {% endif %} {% if distance.global is vyos_defined %} distance {{ distance.global }} {% endif %} {% if distance.ospf is vyos_defined %} distance ospf {{ 'intra-area ' + distance.ospf.intra_area if distance.ospf.intra_area is vyos_defined }} {{ 'inter-area ' + distance.ospf.inter_area if distance.ospf.inter_area is vyos_defined }} {{ 'external ' + distance.ospf.external if distance.ospf.external is vyos_defined }} {% endif %} {% if graceful_restart is vyos_defined %} {% if graceful_restart.grace_period is vyos_defined %} graceful-restart grace-period {{ graceful_restart.grace_period }} {% endif %} {% if graceful_restart.helper.enable.router_id is vyos_defined %} {% for router_id in graceful_restart.helper.enable.router_id %} graceful-restart helper enable {{ router_id }} {% endfor %} {% elif graceful_restart.helper.enable is vyos_defined %} graceful-restart helper enable {% endif %} {% if graceful_restart.helper.planned_only is vyos_defined %} graceful-restart helper planned-only {% endif %} {% if graceful_restart.helper.no_strict_lsa_checking is vyos_defined %} no graceful-restart helper strict-lsa-checking {% endif %} {% if graceful_restart.helper.supported_grace_time is vyos_defined %} graceful-restart helper supported-grace-time {{ graceful_restart.helper.supported_grace_time }} {% endif %} {% endif %} {% if log_adjacency_changes is vyos_defined %} log-adjacency-changes {{ "detail" if log_adjacency_changes.detail is vyos_defined }} {% endif %} {% if max_metric.router_lsa.administrative is vyos_defined %} max-metric router-lsa administrative {% endif %} {% if max_metric.router_lsa.on_shutdown is vyos_defined %} max-metric router-lsa on-shutdown {{ max_metric.router_lsa.on_shutdown }} {% endif %} {% if max_metric.router_lsa.on_startup is vyos_defined %} max-metric router-lsa on-startup {{ max_metric.router_lsa.on_startup }} {% endif %} {% if mpls_te.enable is vyos_defined %} mpls-te on mpls-te router-address {{ mpls_te.router_address }} {% endif %} {% if neighbor is vyos_defined %} {% for address, address_config in neighbor.items() %} neighbor {{ address }} {{ 'priority ' + address_config.priority if address_config.priority is vyos_defined }} {{ 'poll-interval ' + address_config.poll_interval if address_config.poll_interval is vyos_defined }} {% endfor %} {% endif %} {% if parameters.abr_type is vyos_defined %} ospf abr-type {{ parameters.abr_type }} {% endif %} {% if parameters.opaque_lsa is vyos_defined %} ospf opaque-lsa {% endif %} {% if parameters.rfc1583_compatibility is vyos_defined %} ospf rfc1583compatibility {% endif %} {% if parameters.router_id is vyos_defined %} ospf router-id {{ parameters.router_id }} {% endif %} {% if passive_interface is vyos_defined('default') %} passive-interface default {% endif %} {% if redistribute is vyos_defined %} {% for protocol, options in redistribute.items() %} {% if protocol == 'table' %} {% for table, table_options in options.items() %} redistribute {{ protocol }} {{ table }} {{ 'metric ' ~ table_options.metric if table_options.metric is vyos_defined }} {{ 'metric-type ' ~ table_options.metric_type if table_options.metric_type is vyos_defined }} {{ 'route-map ' ~ table_options.route_map if table_options.route_map is vyos_defined }} {% endfor %} {% else %} redistribute {{ protocol }} {{ 'metric ' ~ options.metric if options.metric is vyos_defined }} {{ 'metric-type ' ~ options.metric_type if options.metric_type is vyos_defined }} {{ 'route-map ' ~ options.route_map if options.route_map is vyos_defined }} {% endif %} {% endfor %} {% endif %} {% if refresh.timers is vyos_defined %} refresh timer {{ refresh.timers }} {% endif %} {% if summary_address is vyos_defined %} {% for prefix, prefix_options in summary_address.items() %} summary-address {{ prefix }} {{ 'tag ' + prefix_options.tag if prefix_options.tag is vyos_defined }}{{ 'no-advertise' if prefix_options.no_advertise is vyos_defined }} {% endfor %} {% endif %} {% if segment_routing is vyos_defined %} + segment-routing on {% if segment_routing.maximum_label_depth is vyos_defined %} segment-routing node-msd {{ segment_routing.maximum_label_depth }} {% endif %} {% if segment_routing.global_block is vyos_defined %} {% if segment_routing.local_block is vyos_defined %} segment-routing global-block {{ segment_routing.global_block.low_label_value }} {{ segment_routing.global_block.high_label_value }} local-block {{ segment_routing.local_block.low_label_value }} {{ segment_routing.local_block.high_label_value }} {% else %} segment-routing global-block {{ segment_routing.global_block.low_label_value }} {{ segment_routing.global_block.high_label_value }} {% endif %} {% endif %} {% if segment_routing.prefix is vyos_defined %} {% for prefix, prefix_config in segment_routing.prefix.items() %} {% if prefix_config.index is vyos_defined %} {% if prefix_config.index.value is vyos_defined %} segment-routing prefix {{ prefix }} index {{ prefix_config.index.value }} {{ 'explicit-null' if prefix_config.index.explicit_null is vyos_defined }} {{ 'no-php-flag' if prefix_config.index.no_php_flag is vyos_defined }} {% endif %} {% endif %} {% endfor %} {% endif %} - segment-routing on {% endif %} {% if timers.throttle.spf.delay is vyos_defined and timers.throttle.spf.initial_holdtime is vyos_defined and timers.throttle.spf.max_holdtime is vyos_defined %} {# Timer values have default values #} timers throttle spf {{ timers.throttle.spf.delay }} {{ timers.throttle.spf.initial_holdtime }} {{ timers.throttle.spf.max_holdtime }} {% endif %} exit ! diff --git a/data/templates/frr/pim6d.frr.j2 b/data/templates/frr/pim6d.frr.j2 index bac716fcc..d4144a2f9 100644 --- a/data/templates/frr/pim6d.frr.j2 +++ b/data/templates/frr/pim6d.frr.j2 @@ -1,81 +1,84 @@ ! {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} ! interface {{ iface }} ipv6 pim {% if iface_config.no_bsm is vyos_defined %} no ipv6 pim bsm {% endif %} {% if iface_config.dr_priority is vyos_defined %} ipv6 pim drpriority {{ iface_config.dr_priority }} {% endif %} {% if iface_config.hello is vyos_defined %} ipv6 pim hello {{ iface_config.hello }} {% endif %} {% if iface_config.no_unicast_bsm is vyos_defined %} no ipv6 pim unicast-bsm {% endif %} {% if iface_config.passive is vyos_defined %} ipv6 pim passive {% endif %} {% if iface_config.mld is vyos_defined and iface_config.mld.disable is not vyos_defined %} ipv6 mld {% if iface_config.mld.version is vyos_defined %} ipv6 mld version {{ iface_config.mld.version }} {% endif %} {% if iface_config.mld.interval is vyos_defined %} ipv6 mld query-interval {{ iface_config.mld.interval }} {% endif %} {% if iface_config.mld.max_response_time is vyos_defined %} ipv6 mld query-max-response-time {{ iface_config.mld.max_response_time // 100 }} {% endif %} {% if iface_config.mld.last_member_query_count is vyos_defined %} ipv6 mld last-member-query-count {{ iface_config.mld.last_member_query_count }} {% endif %} {% if iface_config.mld.last_member_query_interval is vyos_defined %} ipv6 mld last-member-query-interval {{ iface_config.mld.last_member_query_interval // 100 }} {% endif %} {% if iface_config.mld.join is vyos_defined %} {% for group, group_config in iface_config.mld.join.items() %} {% if group_config.source is vyos_defined %} {% for source in group_config.source %} - ipv6 mld join {{ group }} {{ source }} + ipv6 mld join-group {{ group }} {{ source }} {% endfor %} {% else %} - ipv6 mld join {{ group }} + ipv6 mld join-group {{ group }} {% endif %} {% endfor %} {% endif %} {% endif %} exit {% endfor %} {% endif %} ! +router pim6 {% if join_prune_interval is vyos_defined %} -ipv6 pim join-prune-interval {{ join_prune_interval }} + join-prune-interval {{ join_prune_interval }} {% endif %} {% if keep_alive_timer is vyos_defined %} -ipv6 pim keep-alive-timer {{ keep_alive_timer }} + keep-alive-timer {{ keep_alive_timer }} {% endif %} {% if packets is vyos_defined %} -ipv6 pim packets {{ packets }} + packets {{ packets }} {% endif %} {% if register_suppress_time is vyos_defined %} -ipv6 pim register-suppress-time {{ register_suppress_time }} + register-suppress-time {{ register_suppress_time }} {% endif %} {% if rp.address is vyos_defined %} {% for address, address_config in rp.address.items() %} {% if address_config.group is vyos_defined %} {% for group in address_config.group %} -ipv6 pim rp {{ address }} {{ group }} + rp {{ address }} {{ group }} {% endfor %} {% endif %} {% if address_config.prefix_list6 is vyos_defined %} -ipv6 pim rp {{ address }} prefix-list {{ address_config.prefix_list6 }} + rp {{ address }} prefix-list {{ address_config.prefix_list6 }} {% endif %} {% endfor %} {% endif %} {% if rp.keep_alive_timer is vyos_defined %} -ipv6 pim rp keep-alive-timer {{ rp.keep_alive_timer }} + rp keep-alive-timer {{ rp.keep_alive_timer }} {% endif %} +exit +! diff --git a/data/templates/frr/pimd.frr.j2 b/data/templates/frr/pimd.frr.j2 index 68edf4a5c..d474d8495 100644 --- a/data/templates/frr/pimd.frr.j2 +++ b/data/templates/frr/pimd.frr.j2 @@ -1,95 +1,97 @@ {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} ! interface {{ iface }} ip pim {% if iface_config.bfd is vyos_defined %} ip pim bfd {{ 'profile ' ~ iface_config.bfd.profile if iface_config.bfd.profile is vyos_defined }} {% endif %} {% if iface_config.no_bsm is vyos_defined %} no ip pim bsm {% endif %} {% if iface_config.dr_priority is vyos_defined %} ip pim drpriority {{ iface_config.dr_priority }} {% endif %} {% if iface_config.hello is vyos_defined %} ip pim hello {{ iface_config.hello }} {% endif %} {% if iface_config.no_unicast_bsm is vyos_defined %} no ip pim unicast-bsm {% endif %} {% if iface_config.passive is vyos_defined %} ip pim passive {% endif %} {% if iface_config.source_address is vyos_defined %} ip pim use-source {{ iface_config.source_address }} {% endif %} {% if iface_config.igmp is vyos_defined and iface_config.igmp.disable is not vyos_defined %} ip igmp {% if iface_config.igmp.query_interval %} ip igmp query-interval {{ iface_config.igmp.query_interval }} {% endif %} {% if iface_config.igmp.query_max_response_time %} ip igmp query-max-response-time {{ iface_config.igmp.query_max_response_time }} {% endif %} {% if iface_config.igmp.version is vyos_defined %} ip igmp version {{ iface_config.igmp.version }} {% endif %} {% if iface_config.igmp.join is vyos_defined %} {% for join, join_config in iface_config.igmp.join.items() %} {% if join_config.source_address is vyos_defined %} {% for source_address in join_config.source_address %} - ip igmp join {{ join }} {{ source_address }} + ip igmp join-group {{ join }} {{ source_address }} {% endfor %} {% else %} - ip igmp join {{ join }} + ip igmp join-group {{ join }} {% endif %} {% endfor %} {% endif %} {% endif %} exit {% endfor %} {% endif %} ! +{% if igmp.watermark_warning is vyos_defined %} +ip igmp watermark-warn {{ igmp.watermark_warning }} +{% endif %} +! +router pim {% if ecmp is vyos_defined %} -ip pim ecmp {{ 'rebalance' if ecmp.rebalance is vyos_defined }} + ecmp {{ 'rebalance' if ecmp.rebalance is vyos_defined }} {% endif %} {% if join_prune_interval is vyos_defined %} -ip pim join-prune-interval {{ join_prune_interval }} + join-prune-interval {{ join_prune_interval }} {% endif %} {% if keep_alive_timer is vyos_defined %} -ip pim keep-alive-timer {{ keep_alive_timer }} + keep-alive-timer {{ keep_alive_timer }} {% endif %} {% if packets is vyos_defined %} -ip pim packets {{ packets }} + packets {{ packets }} {% endif %} {% if register_accept_list.prefix_list is vyos_defined %} -ip pim register-accept-list {{ register_accept_list.prefix_list }} + register-accept-list {{ register_accept_list.prefix_list }} {% endif %} {% if register_suppress_time is vyos_defined %} -ip pim register-suppress-time {{ register_suppress_time }} + register-suppress-time {{ register_suppress_time }} {% endif %} {% if rp.address is vyos_defined %} {% for address, address_config in rp.address.items() %} {% for group in address_config.group %} -ip pim rp {{ address }} {{ group }} + rp {{ address }} {{ group }} {% endfor %} {% endfor %} {% endif %} {% if rp.keep_alive_timer is vyos_defined %} -ip pim rp keep-alive-timer {{ rp.keep_alive_timer }} + rp keep-alive-timer {{ rp.keep_alive_timer }} {% endif %} {% if no_v6_secondary is vyos_defined %} -no ip pim send-v6-secondary + no send-v6-secondary {% endif %} {% if spt_switchover.infinity_and_beyond is vyos_defined %} -ip pim spt-switchover infinity-and-beyond {{ 'prefix-list ' ~ spt_switchover.infinity_and_beyond.prefix_list if spt_switchover.infinity_and_beyond.prefix_list is defined }} + spt-switchover infinity-and-beyond {{ 'prefix-list ' ~ spt_switchover.infinity_and_beyond.prefix_list if spt_switchover.infinity_and_beyond.prefix_list is defined }} {% endif %} {% if ssm.prefix_list is vyos_defined %} -ip pim ssm prefix-list {{ ssm.prefix_list }} -{% endif %} -! -{% if igmp.watermark_warning is vyos_defined %} -ip igmp watermark-warn {{ igmp.watermark_warning }} + ssm prefix-list {{ ssm.prefix_list }} {% endif %} +exit ! diff --git a/data/templates/frr/rpki.frr.j2 b/data/templates/frr/rpki.frr.j2 index 59724102c..59d5bf0ac 100644 --- a/data/templates/frr/rpki.frr.j2 +++ b/data/templates/frr/rpki.frr.j2 @@ -1,24 +1,24 @@ ! {# as FRR does not support deleting the entire rpki section we leave it in place even when it's empty #} rpki {% if cache is vyos_defined %} {% for peer, peer_config in cache.items() %} {# port is mandatory and preference uses a default value #} {% if peer_config.ssh.username is vyos_defined %} - rpki cache {{ peer | replace('_', '-') }} {{ peer_config.port }} {{ peer_config.ssh.username }} {{ peer_config.ssh.private_key_file }} {{ peer_config.ssh.public_key_file }} preference {{ peer_config.preference }} + rpki cache ssh {{ peer | replace('_', '-') }} {{ peer_config.port }} {{ peer_config.ssh.username }} {{ peer_config.ssh.private_key_file }} {{ peer_config.ssh.public_key_file }} preference {{ peer_config.preference }} {% else %} - rpki cache {{ peer | replace('_', '-') }} {{ peer_config.port }} preference {{ peer_config.preference }} + rpki cache tcp {{ peer | replace('_', '-') }} {{ peer_config.port }} preference {{ peer_config.preference }} {% endif %} {% endfor %} {% endif %} {% if expire_interval is vyos_defined %} rpki expire_interval {{ expire_interval }} {% endif %} {% if polling_period is vyos_defined %} rpki polling_period {{ polling_period }} {% endif %} {% if retry_interval is vyos_defined %} rpki retry_interval {{ retry_interval }} {% endif %} exit ! diff --git a/data/templates/frr/static_mcast.frr.j2 b/data/templates/frr/static_mcast.frr.j2 deleted file mode 100644 index 54b2790b0..000000000 --- a/data/templates/frr/static_mcast.frr.j2 +++ /dev/null @@ -1,11 +0,0 @@ -! -{% for route_gr in mroute %} -{% for nh in mroute[route_gr] %} -{% if mroute[route_gr][nh] %} -ip mroute {{ route_gr }} {{ nh }} {{ mroute[route_gr][nh] }} -{% else %} -ip mroute {{ route_gr }} {{ nh }} -{% endif %} -{% endfor %} -{% endfor %} -! diff --git a/data/templates/frr/static_routes_macro.j2 b/data/templates/frr/static_routes_macro.j2 deleted file mode 100644 index db31700c5..000000000 --- a/data/templates/frr/static_routes_macro.j2 +++ /dev/null @@ -1,31 +0,0 @@ -{% macro static_routes(ip_ipv6, prefix, prefix_config, table=None) %} -{% if prefix_config.blackhole is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} blackhole {{ prefix_config.blackhole.distance if prefix_config.blackhole.distance is vyos_defined }} {{ 'tag ' ~ prefix_config.blackhole.tag if prefix_config.blackhole.tag is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined and table is not none }} -{% endif %} -{% if prefix_config.reject is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} reject {{ prefix_config.reject.distance if prefix_config.reject.distance is vyos_defined }} {{ 'tag ' ~ prefix_config.reject.tag if prefix_config.reject.tag is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% endif %} -{% if prefix_config.dhcp_interface is vyos_defined %} -{% for dhcp_interface in prefix_config.dhcp_interface %} -{% set next_hop = dhcp_interface | get_dhcp_router %} -{% if next_hop is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ dhcp_interface }} {{ 'table ' ~ table if table is vyos_defined }} -{% endif %} -{% endfor %} -{% endif %} -{% if prefix_config.interface is vyos_defined %} -{% for interface, interface_config in prefix_config.interface.items() if interface_config.disable is not defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ interface }} {{ interface_config.distance if interface_config.distance is vyos_defined }} {{ 'nexthop-vrf ' ~ interface_config.vrf if interface_config.vrf is vyos_defined }} {{ 'segments ' ~ interface_config.segments if interface_config.segments is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% endfor %} -{% endif %} -{% if prefix_config.next_hop is vyos_defined and prefix_config.next_hop is not none %} -{% for next_hop, next_hop_config in prefix_config.next_hop.items() if next_hop_config.disable is not defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ next_hop_config.interface if next_hop_config.interface is vyos_defined }} {{ next_hop_config.distance if next_hop_config.distance is vyos_defined }} {{ 'nexthop-vrf ' ~ next_hop_config.vrf if next_hop_config.vrf is vyos_defined }} {{ 'bfd profile ' ~ next_hop_config.bfd.profile if next_hop_config.bfd.profile is vyos_defined }} {{ 'segments ' ~ next_hop_config.segments if next_hop_config.segments is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% if next_hop_config.bfd.multi_hop.source is vyos_defined %} -{% for source, source_config in next_hop_config.bfd.multi_hop.source.items() %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} bfd multi-hop source {{ source }} profile {{ source_config.profile }} -{% endfor %} -{% endif %} -{% endfor %} -{% endif %} -{% endmacro %} diff --git a/data/templates/frr/staticd.frr.j2 b/data/templates/frr/staticd.frr.j2 index 992a0435c..90d17ec14 100644 --- a/data/templates/frr/staticd.frr.j2 +++ b/data/templates/frr/staticd.frr.j2 @@ -1,64 +1,143 @@ -{% from 'frr/static_routes_macro.j2' import static_routes %} +{# Common macro for recurroiing options for a static route #} +{% macro route_options(route, interface_or_next_hop, config, table) %} +{# j2lint: disable=jinja-statements-delimeter #} +{% set ip_route = route ~ ' ' ~ interface_or_next_hop %} +{% if config.interface is vyos_defined %} +{% set ip_route = ip_route ~ ' ' ~ config.interface %} +{% endif %} +{% if config.tag is vyos_defined %} +{% set ip_route = ip_route ~ ' tag ' ~ config.tag %} +{% endif %} +{% if config.distance is vyos_defined %} +{% set ip_route = ip_route ~ ' ' ~ config.distance %} +{% endif %} +{% if config.bfd is vyos_defined %} +{% set ip_route = ip_route ~ ' bfd' %} +{% if config.bfd.multi_hop is vyos_defined %} +{% set ip_route = ip_route ~ ' multi-hop' %} +{% if config.bfd.multi_hop.source_address is vyos_defined %} +{% set ip_route = ip_route ~ ' source ' ~ config.bfd.multi_hop.source_address %} +{% endif %} +{% endif %} +{% if config.bfd.profile is vyos_defined %} +{% set ip_route = ip_route ~ ' profile ' ~ config.bfd.profile %} +{% endif %} +{% endif %} +{% if config.vrf is vyos_defined %} +{% set ip_route = ip_route ~ ' nexthop-vrf ' ~ config.vrf %} +{% endif %} +{% if config.segments is vyos_defined %} +{# Segments used in/for SRv6 #} +{% set ip_route = ip_route ~ ' segments ' ~ config.segments %} +{% endif %} +{# Routing table to configure #} +{% if table is vyos_defined %} +{% set ip_route = ip_route ~ ' table ' ~ table %} +{% endif %} +{{ ip_route }} +{%- endmacro -%} +{# Build static IPv4/IPv6 route #} +{% macro static_routes(ip_ipv6, prefix, prefix_config, table=None) %} +{% set route = ip_ipv6 ~ 'route ' ~ prefix %} +{% if prefix_config.interface is vyos_defined %} +{% for interface, interface_config in prefix_config.interface.items() if interface_config.disable is not defined %} +{{ route_options(route, interface, interface_config, table) }} +{% endfor %} +{% endif %} +{% if prefix_config.next_hop is vyos_defined and prefix_config.next_hop is not none %} +{% for next_hop, next_hop_config in prefix_config.next_hop.items() if next_hop_config.disable is not defined %} +{{ route_options(route, next_hop, next_hop_config, table) }} +{% endfor %} +{% endif %} +{% if prefix_config.dhcp_interface is vyos_defined %} +{% for dhcp_interface in prefix_config.dhcp_interface %} +{% set next_hop = dhcp_interface | get_dhcp_router %} +{% if next_hop is vyos_defined %} +{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ dhcp_interface }} {{ 'table ' ~ table if table is vyos_defined }} +{% endif %} +{% endfor %} +{% endif %} +{% if prefix_config.blackhole is vyos_defined %} +{{ route_options(route, 'blackhole', prefix_config.blackhole, table) }} +{% elif prefix_config.reject is vyos_defined %} +{{ route_options(route, 'reject', prefix_config.reject, table) }} +{% endif %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endmacro -%} ! -{% set ip_prefix = 'ip' %} -{% set ipv6_prefix = 'ipv6' %} +{% set ip_prefix = 'ip ' %} +{% set ipv6_prefix = 'ipv6 ' %} {% if vrf is vyos_defined %} {# We need to add an additional whitespace in front of the prefix #} {# when VRFs are in use, thus we use a variable for prefix handling #} -{% set ip_prefix = ' ip' %} -{% set ipv6_prefix = ' ipv6' %} +{% set ip_prefix = ' ip ' %} +{% set ipv6_prefix = ' ipv6 ' %} vrf {{ vrf }} {% endif %} {# IPv4 routing #} {% if route is vyos_defined %} {% for prefix, prefix_config in route.items() %} {{ static_routes(ip_prefix, prefix, prefix_config) }} -{% endfor %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} {# IPv4 default routes from DHCP interfaces #} {% if dhcp is vyos_defined %} {% for interface, interface_config in dhcp.items() if interface_config.dhcp_options.no_default_route is not vyos_defined %} {% set next_hop = interface | get_dhcp_router %} {% if next_hop is vyos_defined %} {{ ip_prefix }} route 0.0.0.0/0 {{ next_hop }} {{ interface }} tag 210 {{ interface_config.dhcp_options.default_route_distance if interface_config.dhcp_options.default_route_distance is vyos_defined }} {% endif %} {% endfor %} {% endif %} {# IPv4 default routes from PPPoE interfaces #} {% if pppoe is vyos_defined %} {% for interface, interface_config in pppoe.items() if interface_config.no_default_route is not vyos_defined %} {{ ip_prefix }} route 0.0.0.0/0 {{ interface }} tag 210 {{ interface_config.default_route_distance if interface_config.default_route_distance is vyos_defined }} -{% endfor %} +{%- endfor %} {% endif %} {# IPv6 routing #} {% if route6 is vyos_defined %} {% for prefix, prefix_config in route6.items() %} {{ static_routes(ipv6_prefix, prefix, prefix_config) }} -{% endfor %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} {% if vrf is vyos_defined %} exit-vrf {% endif %} ! {# Policy route tables #} {% if table is vyos_defined %} {% for table_id, table_config in table.items() %} {% if table_config.route is vyos_defined %} {% for prefix, prefix_config in table_config.route.items() %} -{{ static_routes('ip', prefix, prefix_config, table_id) }} -{% endfor %} +{{ static_routes('ip ', prefix, prefix_config, table_id) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% if table_config.route6 is vyos_defined %} {% for prefix, prefix_config in table_config.route6.items() %} -{{ static_routes('ipv6', prefix, prefix_config, table_id) }} -{% endfor %} +{{ static_routes('ipv6 ', prefix, prefix_config, table_id) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% endfor %} {% endif %} ! +{# Multicast route #} +{% if mroute is vyos_defined %} +{% set ip_prefix = 'ip m' %} +{# IPv4 multicast routing #} +{% for prefix, prefix_config in mroute.items() %} +{{ static_routes(ip_prefix, prefix, prefix_config) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} +{% endif %} +! {% if route_map is vyos_defined %} ip protocol static route-map {{ route_map }} ! {% endif %} diff --git a/data/templates/frr/zebra.route-map.frr.j2 b/data/templates/frr/zebra.route-map.frr.j2 index 669d58354..70a810f43 100644 --- a/data/templates/frr/zebra.route-map.frr.j2 +++ b/data/templates/frr/zebra.route-map.frr.j2 @@ -1,14 +1,16 @@ ! +{{ 'no ' if disable_forwarding is vyos_defined }}{{ afi }} forwarding +! {% if nht.no_resolve_via_default is vyos_defined %} no {{ afi }} nht resolve-via-default {% endif %} ! {% if protocol is vyos_defined %} {% for protocol_name, protocol_config in protocol.items() %} {% if protocol_name is vyos_defined('ospfv3') %} {% set protocol_name = 'ospf6' %} {% endif %} {{ afi }} protocol {{ protocol_name }} route-map {{ protocol_config.route_map }} {% endfor %} {% endif %} ! diff --git a/data/templates/frr/zebra.vrf.route-map.frr.j2 b/data/templates/frr/zebra.vrf.route-map.frr.j2 index 8ebb82511..656b31deb 100644 --- a/data/templates/frr/zebra.vrf.route-map.frr.j2 +++ b/data/templates/frr/zebra.vrf.route-map.frr.j2 @@ -1,30 +1,30 @@ ! {% if name is vyos_defined %} {% for vrf, vrf_config in name.items() %} vrf {{ vrf }} {% if vrf_config.ip.nht.no_resolve_via_default is vyos_defined %} no ip nht resolve-via-default {% endif %} {% if vrf_config.ipv6.nht.no_resolve_via_default is vyos_defined %} no ipv6 nht resolve-via-default {% endif %} {% if vrf_config.ip.protocol is vyos_defined %} {% for protocol_name, protocol_config in vrf_config.ip.protocol.items() %} ip protocol {{ protocol_name }} route-map {{ protocol_config.route_map }} {% endfor %} {% endif %} {% if vrf_config.ipv6.protocol is vyos_defined %} {% for protocol_name, protocol_config in vrf_config.ipv6.protocol.items() %} {% if protocol_name is vyos_defined('ospfv3') %} {% set protocol_name = 'ospf6' %} {% endif %} ipv6 protocol {{ protocol_name }} route-map {{ protocol_config.route_map }} {% endfor %} {% endif %} {% if vrf_config.vni is vyos_defined %} vni {{ vrf_config.vni }} {% endif %} exit-vrf -{% endfor %} ! +{% endfor %} {% endif %} diff --git a/debian/control b/debian/control index a19461412..76ca83dcd 100644 --- a/debian/control +++ b/debian/control @@ -1,390 +1,390 @@ Source: vyos-1x Section: contrib/net Priority: extra Maintainer: VyOS Package Maintainers <maintainers@vyos.net> Build-Depends: debhelper (>= 9), dh-python, fakeroot, gcc, iproute2, libvyosconfig0 (>= 0.0.7), libzmq3-dev, python3 (>= 3.10), # For QA pylint, # For generating command definitions python3-lxml, python3-xmltodict, # For running tests python3-coverage, python3-hurry.filesize, python3-netaddr, python3-netifaces, python3-nose, python3-jinja2, python3-paramiko, python3-passlib, python3-psutil, python3-requests, python3-setuptools, python3-tabulate, python3-zmq, quilt, whois Standards-Version: 3.9.6 Package: vyos-1x Architecture: amd64 arm64 Pre-Depends: libpam-runtime [amd64], libnss-tacplus [amd64], libpam-tacplus [amd64], libpam-radius-auth [amd64] Depends: ## Fundamentals ${python3:Depends} (>= 3.10), dialog, libvyosconfig0, libpam-cap, bash-completion, ipvsadm, udev, less, at, rsync, vyatta-bash, vyatta-biosdevname, vyatta-cfg, vyos-http-api-tools, vyos-utils, ## End of Fundamentals ## Python libraries used in multiple modules and scripts python3, python3-cryptography, python3-hurry.filesize, python3-inotify, python3-jinja2, python3-jmespath, python3-netaddr, python3-netifaces, python3-paramiko, python3-passlib, python3-pyroute2, python3-psutil, python3-pyhumps, python3-pystache, python3-pyudev, python3-six, python3-tabulate, python3-voluptuous, python3-xmltodict, python3-zmq, ## End of Python libraries ## Basic System services and utilities coreutils, sudo, systemd, bsdmainutils, openssl, curl, dbus, file, iproute2 (>= 6.0.0), linux-cpupower, # ipaddrcheck is widely used in IP value validators ipaddrcheck, ethtool (>= 6.10), lm-sensors, procps, netplug, sed, ssl-cert, tuned, beep, wide-dhcpv6-client, # Generic colorizer grc, ## End of System services and utilities ## For the installer fdisk, gdisk, mdadm, efibootmgr, libefivar1, dosfstools, grub-efi-amd64-signed [amd64], grub-efi-arm64-bin [arm64], mokutil [amd64], shim-signed [amd64], sbsigntool [amd64], # Image signature verification tool minisign, # Live filesystem tools squashfs-tools, fuse-overlayfs, ## End installer auditd, iputils-arping, iputils-ping, isc-dhcp-client, # For "vpn pptp", "vpn l2tp", "vpn sstp", "service ipoe-server" accel-ppp, # End "vpn pptp", "vpn l2tp", "vpn sstp", "service ipoe-server" avahi-daemon, conntrack, conntrackd, ## Conf mode features # For "interfaces wireless" hostapd, hsflowd, iw, wireless-regdb, wpasupplicant (>= 0.6.7), # End "interfaces wireless" # For "interfaces wwan" modemmanager, usb-modeswitch, libqmi-utils, # End "interfaces wwan" # For "interfaces openvpn" openvpn, openvpn-auth-ldap, openvpn-auth-radius, openvpn-otp, openvpn-dco, libpam-google-authenticator, # End "interfaces openvpn" # For "interfaces wireguard" wireguard-tools, qrencode, # End "interfaces wireguard" # For "interfaces pppoe" pppoe, # End "interfaces pppoe" # For "interfaces sstpc" sstp-client, # End "interfaces sstpc" # For "protocols *" - frr (>= 9.1), + frr (>= 10.2), frr-pythontools, frr-rpki-rtrlib, frr-snmp, # End "protocols *" # For "protocols nhrp" (part of DMVPN) opennhrp, # End "protocols nhrp" # For "protocols igmp-proxy" igmpproxy, # End "protocols igmp-proxy" # For "pki" certbot, # End "pki" # For "service console-server" conserver-client, conserver-server, console-data, dropbear, # End "service console-server" # For "service aws glb" aws-gwlbtun, # For "service dns dynamic" ddclient (>= 3.11.1), # End "service dns dynamic" # # For "service ids" fastnetmon [amd64], suricata, suricata-update, # End "service ids" # # For "service ndp-proxy" ndppd, # End "service ndp-proxy" # For "service router-advert" radvd, # End "service route-advert" # For "load-balancing haproxy" haproxy, # End "load-balancing haproxy" # For "load-balancing wan" vyatta-wanloadbalance, # End "load-balancing wan" # For "service dhcp-relay" isc-dhcp-relay, # For "service dhcp-server" kea, # End "service dhcp-server" # For "service lldp" lldpd, # End "service lldp" # For "service https" nginx-light, # End "service https" # For "service ssh" openssh-server, sshguard, # End "service ssh" # For "service salt-minion" salt-minion, # End "service salt-minion" # For "service snmp" snmp, snmpd, # End "service snmp" # For "service webproxy" squid, squidclient, squidguard, # End "service webproxy" # For "service monitoring node-exporter" node-exporter, # End "service monitoring node-exporter" # For "service monitoring frr-exporter" frr-exporter, # End "service monitoring frr-exporter" # For "service monitoring telegraf" telegraf (>= 1.20), # End "service monitoring telegraf" # For "service monitoring zabbix-agent" zabbix-agent2, # End "service monitoring zabbix-agent" # For "service tftp-server" tftpd-hpa, # End "service tftp-server" # For "service dns forwarding" pdns-recursor, # End "service dns forwarding" # For "service sla owamp" owamp-client, owamp-server, # End "service sla owamp" # For "service sla twamp" twamp-client, twamp-server, # End "service sla twamp" # For "service broadcast-relay" udp-broadcast-relay, # End "service broadcast-relay" # For "high-availability vrrp" keepalived (>=2.0.5), # End "high-availability-vrrp" # For "system console" util-linux, # End "system console" # For "system task-scheduler" cron, # End "system task-scheduler" # For "system lcd" lcdproc, lcdproc-extra-drivers, # End "system lcd" # For "system config-management commit-archive" git, # End "system config-management commit-archive" # For firewall libndp-tools, libnetfilter-conntrack3, libnfnetlink0, nfct, nftables (>= 0.9.3), # For "vpn ipsec" strongswan (>= 5.9), strongswan-swanctl (>= 5.9), charon-systemd, libcharon-extra-plugins (>=5.9), libcharon-extauth-plugins (>=5.9), libstrongswan-extra-plugins (>=5.9), libstrongswan-standard-plugins (>=5.9), python3-vici (>= 5.7.2), # End "vpn ipsec" # For "nat64" jool, # End "nat64" # For "system conntrack modules rtsp" nat-rtsp, # End "system conntrack modules rtsp" # For "service ntp" chrony, # End "system ntp" # For "vpn openconnect" ocserv, # End "vpn openconnect" # For "system flow-accounting" pmacct (>= 1.6.0), # End "system flow-accounting" # For "system syslog" rsyslog, # End "system syslog" # For "system option keyboard-layout" kbd, # End "system option keyboard-layout" # For "container" podman (>=4.9.5), netavark, aardvark-dns, # iptables is only used for containers now, not the the firewall CLI iptables, # End container ## End Configuration mode ## Operational mode # Used for hypervisor model in "run show version" hvinfo, # For "run traceroute" traceroute, # For "run monitor traffic" tcpdump, # End "run monitor traffic" # For "show hardware dmi" dmidecode, # For "run show hardware storage smart" smartmontools, # For "run show hardware scsi" lsscsi, # For "run show hardware pci" pciutils, # For "show hardware usb" usbutils, # For "run show hardware storage nvme" nvme-cli, # For "run monitor bandwidth-test" iperf, iperf3, # End "run monitor bandwidth-test" # For "run wake-on-lan" etherwake, # For "run force ipv6-nd" ndisc6, # For "run monitor bandwidth" bmon, # For "run format disk" parted, # End Operational mode ## TPM tools cryptsetup, tpm2-tools, ## End TPM tools ## Optional utilities easy-rsa, tcptraceroute, mtr-tiny, telnet, stunnel4, uidmap ## End optional utilities Description: VyOS configuration scripts and data VyOS configuration scripts, interface definitions, and everything Package: vyos-1x-vmware Architecture: amd64 Depends: vyos-1x, open-vm-tools Description: VyOS configuration scripts and data for VMware Adds configuration files required for VyOS running on VMware hosts. Package: vyos-1x-smoketest Architecture: all Depends: skopeo, snmp, vyos-1x Description: VyOS build sanity checking toolkit diff --git a/debian/rules b/debian/rules index c15fcab11..d7c427b0d 100755 --- a/debian/rules +++ b/debian/rules @@ -1,146 +1,148 @@ #!/usr/bin/make -f DIR := debian/tmp VYOS_SBIN_DIR := usr/sbin VYOS_BIN_DIR := usr/bin VYOS_LIBEXEC_DIR := usr/libexec/vyos VYOS_DATA_DIR := usr/share/vyos VYOS_CFG_TMPL_DIR := opt/vyatta/share/vyatta-cfg/templates VYOS_OP_TMPL_DIR := opt/vyatta/share/vyatta-op/templates VYOS_MIBS_DIR := usr/share/snmp/mibs VYOS_LOCALUI_DIR := srv/localui VYCONF_CONFIG_DIR := $(VYOS_LIBEXEC_DIR)/vyconf/config MIGRATION_SCRIPTS_DIR := opt/vyatta/etc/config-migrate/migrate ACTIVATION_SCRIPTS_DIR := usr/libexec/vyos/activate SYSTEM_SCRIPTS_DIR := usr/libexec/vyos/system SERVICES_DIR := usr/libexec/vyos/services DEB_TARGET_ARCH := $(shell dpkg-architecture -qDEB_TARGET_ARCH) %: dh $@ --with python3, --with quilt # Skip dh_strip_nondeterminism - this is very time consuming # and we have no non deterministic output (yet) override_dh_strip_nondeterminism: override_dh_gencontrol: dh_gencontrol -- -v$(shell (git describe --tags --long --match 'vyos/*' --match '1.4.*' --dirty 2>/dev/null || echo 0.0-no.git.tag) | sed -E 's%vyos/%%' | sed -E 's%-dirty%+dirty%') override_dh_auto_build: make all override_dh_auto_install: dh_auto_install cd python; python3 setup.py install --install-layout=deb --root ../$(DIR); cd .. # Install scripts mkdir -p $(DIR)/$(VYOS_SBIN_DIR) mkdir -p $(DIR)/$(VYOS_BIN_DIR) cp -r src/utils/* $(DIR)/$(VYOS_BIN_DIR) cp src/shim/vyshim $(DIR)/$(VYOS_SBIN_DIR) # Install conf mode scripts mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/conf_mode cp -r src/conf_mode/* $(DIR)/$(VYOS_LIBEXEC_DIR)/conf_mode # Install op mode scripts mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/op_mode cp -r src/op_mode/* $(DIR)/$(VYOS_LIBEXEC_DIR)/op_mode # Install op mode scripts mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/init cp -r src/init/* $(DIR)/$(VYOS_LIBEXEC_DIR)/init # Install validators mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/validators cp -r src/validators/* $(DIR)/$(VYOS_LIBEXEC_DIR)/validators # Install completion helpers mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/completion cp -r src/completion/* $(DIR)/$(VYOS_LIBEXEC_DIR)/completion # Install helper scripts cp -r src/helpers/* $(DIR)/$(VYOS_LIBEXEC_DIR)/ # Install migration scripts mkdir -p $(DIR)/$(MIGRATION_SCRIPTS_DIR) cp -r src/migration-scripts/* $(DIR)/$(MIGRATION_SCRIPTS_DIR) # Install activation scripts mkdir -p $(DIR)/$(ACTIVATION_SCRIPTS_DIR) cp -r src/activation-scripts/* $(DIR)/$(ACTIVATION_SCRIPTS_DIR) # Install system scripts mkdir -p $(DIR)/$(SYSTEM_SCRIPTS_DIR) cp -r src/system/* $(DIR)/$(SYSTEM_SCRIPTS_DIR) # Install system services mkdir -p $(DIR)/$(SERVICES_DIR) cp -r src/services/* $(DIR)/$(SERVICES_DIR) # Install configuration command definitions mkdir -p $(DIR)/$(VYOS_CFG_TMPL_DIR) cp -r templates-cfg/* $(DIR)/$(VYOS_CFG_TMPL_DIR) # Install operational command definitions mkdir -p $(DIR)/$(VYOS_OP_TMPL_DIR) cp -r templates-op/* $(DIR)/$(VYOS_OP_TMPL_DIR) # Install data files mkdir -p $(DIR)/$(VYCONF_CONFIG_DIR) cp -r data/reftree.cache $(DIR)/$(VYCONF_CONFIG_DIR) mkdir -p $(DIR)/$(VYOS_DATA_DIR) cp -r data/* $(DIR)/$(VYOS_DATA_DIR) + # Remove j2lint comments / linter configuration which would insert additional new-lines + find $(DIR)/$(VYOS_DATA_DIR) -name "*.j2" -type f | xargs sed -i -e '/^{#.*#}/d' # Create localui dir mkdir -p $(DIR)/$(VYOS_LOCALUI_DIR) # Install SNMP MIBs mkdir -p $(DIR)/$(VYOS_MIBS_DIR) cp -d mibs/* $(DIR)/$(VYOS_MIBS_DIR) # Install etc configuration files mkdir -p $(DIR)/etc cp -r src/etc/* $(DIR)/etc # Install legacy Vyatta files mkdir -p $(DIR)/opt cp -r src/opt/* $(DIR)/opt # Install PAM configuration snippets mkdir -p $(DIR)/usr/share/pam-configs cp -r src/pam-configs/* $(DIR)/usr/share/pam-configs # Install systemd service units mkdir -p $(DIR)/lib/systemd/system cp -r src/systemd/* $(DIR)/lib/systemd/system # Make directory for generated configuration file mkdir -p $(DIR)/etc/vyos # Install smoke test scripts mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/smoke/ cp -r smoketest/scripts/* $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/smoke # Install smoke test configs mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/config/ cp -r smoketest/configs/* $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/config # Install smoke test config tests mkdir -p $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/config-tests/ cp -r smoketest/config-tests/* $(DIR)/$(VYOS_LIBEXEC_DIR)/tests/config-tests # Install system programs mkdir -p $(DIR)/$(VYOS_BIN_DIR) cp -r smoketest/bin/* $(DIR)/$(VYOS_BIN_DIR) # Install udev script mkdir -p $(DIR)/usr/lib/udev cp src/helpers/vyos_net_name $(DIR)/usr/lib/udev override_dh_installsystemd: dh_installsystemd -pvyos-1x --name vyos-router vyos-router.service dh_installsystemd -pvyos-1x --name vyos vyos.target diff --git a/debian/vyos-1x.postinst b/debian/vyos-1x.postinst index d83634cfc..ff5a91e09 100644 --- a/debian/vyos-1x.postinst +++ b/debian/vyos-1x.postinst @@ -1,269 +1,274 @@ #!/bin/bash # Turn off Debian default for %sudo sed -i -e '/^%sudo/d' /etc/sudoers || true # Add minion user for salt-minion if ! grep -q '^minion' /etc/passwd; then adduser --quiet --firstuid 100 --system --disabled-login --ingroup vyattacfg \ --gecos "salt minion user" --shell /bin/vbash minion adduser --quiet minion frrvty adduser --quiet minion sudo adduser --quiet minion adm adduser --quiet minion dip adduser --quiet minion disk adduser --quiet minion users adduser --quiet minion frr fi # OpenVPN should get its own user if ! grep -q '^openvpn' /etc/passwd; then adduser --quiet --firstuid 100 --system --group --shell /usr/sbin/nologin openvpn fi # node_exporter should get its own user if ! grep -q '^node_exporter' /etc/passwd; then adduser --quiet --firstuid 100 --system --group --shell /bin/false node_exporter fi # We need to have a group for RADIUS service users to use it inside PAM rules if ! grep -q '^radius' /etc/group; then addgroup --firstgid 1000 --quiet radius fi # Remove TACACS user added by base package - we use our own UID range and group # assignments - see below if grep -q '^tacacs' /etc/passwd; then if [ $(id -u tacacs0) -ge 1000 ]; then level=0 vyos_group=vyattaop while [ $level -lt 16 ]; do userdel tacacs${level} || true rm -rf /home/tacacs${level} || true level=$(( level+1 )) done 2>&1 fi fi # Remove TACACS+ PAM default profile if [[ -e /usr/share/pam-configs/tacplus ]]; then rm /usr/share/pam-configs/tacplus fi # Add TACACS system users required for TACACS based system authentication if ! grep -q '^tacacs' /etc/passwd; then # Add the tacacs group and all 16 possible tacacs privilege-level users to # the password file, home directories, etc. The accounts are not enabled # for local login, since they are only used to provide uid/gid/homedir for # the mapped TACACS+ logins (and lookups against them). The tacacs15 user # is also added to the sudo group, and vyattacfg group rather than vyattaop # (used for tacacs0-14). level=0 vyos_group=vyattaop while [ $level -lt 16 ]; do adduser --quiet --system --firstuid 900 --disabled-login --ingroup tacacs \ --no-create-home --gecos "TACACS+ mapped user at privilege level ${level}" \ --shell /bin/vbash tacacs${level} adduser --quiet tacacs${level} frrvty adduser --quiet tacacs${level} adm adduser --quiet tacacs${level} dip adduser --quiet tacacs${level} users if [ $level -lt 15 ]; then adduser --quiet tacacs${level} vyattaop adduser --quiet tacacs${level} operator else adduser --quiet tacacs${level} vyattacfg adduser --quiet tacacs${level} sudo adduser --quiet tacacs${level} disk adduser --quiet tacacs${level} frr adduser --quiet tacacs${level} _kea fi level=$(( level+1 )) done 2>&1 | grep -v "User tacacs${level} already exists" fi # Add RADIUS operator user for RADIUS authenticated users to map to if ! grep -q '^radius_user' /etc/passwd; then adduser --quiet --firstuid 1000 --disabled-login --ingroup radius \ --no-create-home --gecos "RADIUS mapped user at privilege level operator" \ --shell /sbin/radius_shell radius_user adduser --quiet radius_user frrvty adduser --quiet radius_user vyattaop adduser --quiet radius_user operator adduser --quiet radius_user adm adduser --quiet radius_user dip adduser --quiet radius_user users fi # Add RADIUS admin user for RADIUS authenticated users to map to if ! grep -q '^radius_priv_user' /etc/passwd; then adduser --quiet --firstuid 1000 --disabled-login --ingroup radius \ --no-create-home --gecos "RADIUS mapped user at privilege level admin" \ --shell /sbin/radius_shell radius_priv_user adduser --quiet radius_priv_user frrvty adduser --quiet radius_priv_user vyattacfg adduser --quiet radius_priv_user sudo adduser --quiet radius_priv_user adm adduser --quiet radius_priv_user dip adduser --quiet radius_priv_user disk adduser --quiet radius_priv_user users adduser --quiet radius_priv_user frr adduser --quiet radius_priv_user _kea fi # add hostsd group for vyos-hostsd if ! grep -q '^hostsd' /etc/group; then addgroup --quiet --system hostsd fi # Add _kea user for kea-dhcp{4,6}-server to vyattacfg # The user should exist via kea-common installed as transitive dependency if grep -q '^_kea' /etc/passwd; then adduser --quiet _kea vyattacfg fi # ensure the proxy user has a proper shell chsh -s /bin/sh proxy # Set file capabilities setcap cap_net_admin=pe /sbin/ethtool setcap cap_net_admin=pe /sbin/tc setcap cap_net_admin=pe /bin/ip setcap cap_net_admin=pe /sbin/xtables-legacy-multi setcap cap_net_admin=pe /sbin/xtables-nft-multi setcap cap_net_admin=pe /usr/sbin/conntrack setcap cap_net_admin=pe /usr/sbin/arp setcap cap_net_raw=pe /usr/bin/tcpdump setcap cap_net_admin,cap_sys_admin=pe /sbin/sysctl setcap cap_sys_module=pe /bin/kmod setcap cap_sys_time=pe /bin/date # create needed directories mkdir -p /var/log/user mkdir -p /var/core mkdir -p /opt/vyatta/etc/config/auth mkdir -p /opt/vyatta/etc/config/scripts mkdir -p /opt/vyatta/etc/config/user-data mkdir -p /opt/vyatta/etc/config/support chown -R root:vyattacfg /opt/vyatta/etc/config chmod -R 775 /opt/vyatta/etc/config mkdir -p /opt/vyatta/etc/logrotate mkdir -p /opt/vyatta/etc/netdevice.d touch /etc/environment if [ ! -f /etc/bash_completion ]; then echo "source /etc/bash_completion.d/10vyatta-op" > /etc/bash_completion echo "source /etc/bash_completion.d/20vyatta-cfg" >> /etc/bash_completion fi sed -i 's/^set /builtin set /' /etc/bash_completion # Fix up PAM configuration for login so that invalid users are prompted # for password sed -i 's/requisite[ \t][ \t]*pam_securetty.so/required pam_securetty.so/' $rootfsdir/etc/pam.d/login # Change default shell for new accounts sed -i -e ':^DSHELL:s:/bin/bash:/bin/vbash:' /etc/adduser.conf # Do not allow users to change full name field (controlled by vyos-1x) sed -i -e 's/^CHFN_RESTRICT/#&/' /etc/login.defs # Only allow root to use passwd command if ! grep -q 'pam_succeed_if.so' /etc/pam.d/passwd ; then sed -i -e '/^@include/i \ password requisite pam_succeed_if.so user = root ' /etc/pam.d/passwd fi # remove unnecessary ddclient script in /etc/ppp/ip-up.d/ # this logs unnecessary messages trying to start ddclient rm -f /etc/ppp/ip-up.d/ddclient # create /opt/vyatta/etc/config/scripts/vyos-preconfig-bootup.script PRECONFIG_SCRIPT=/opt/vyatta/etc/config/scripts/vyos-preconfig-bootup.script if [ ! -x $PRECONFIG_SCRIPT ]; then mkdir -p $(dirname $PRECONFIG_SCRIPT) touch $PRECONFIG_SCRIPT chmod 755 $PRECONFIG_SCRIPT cat <<EOF >>$PRECONFIG_SCRIPT #!/bin/sh # This script is executed at boot time before VyOS configuration is applied. # Any modifications required to work around unfixed bugs or use # services not available through the VyOS CLI system can be placed here. EOF fi # create /opt/vyatta/etc/config/scripts/vyos-postconfig-bootup.script POSTCONFIG_SCRIPT=/opt/vyatta/etc/config/scripts/vyos-postconfig-bootup.script if [ ! -x $POSTCONFIG_SCRIPT ]; then mkdir -p $(dirname $POSTCONFIG_SCRIPT) touch $POSTCONFIG_SCRIPT chmod 755 $POSTCONFIG_SCRIPT cat <<EOF >>$POSTCONFIG_SCRIPT #!/bin/sh # This script is executed at boot time after VyOS configuration is fully applied. # Any modifications required to work around unfixed bugs # or use services not available through the VyOS CLI system can be placed here. EOF fi # symlink destination is deleted during ISO assembly - this generates some noise # when the system boots: systemd-sysv-generator[1881]: stat() failed on # /etc/init.d/README, ignoring: No such file or directory. Thus we simply drop # the file. if [ -L /etc/init.d/README ]; then rm -f /etc/init.d/README fi # Remove unwanted daemon files from /etc # conntackd # pmacct # fastnetmon # ntp DELETE="/etc/logrotate.d/conntrackd.distrib /etc/init.d/conntrackd /etc/default/conntrackd /etc/default/pmacctd /etc/pmacct /etc/networks_list /etc/networks_whitelist /etc/fastnetmon.conf /etc/ntp.conf /etc/default/ssh /etc/avahi/avahi-daemon.conf /etc/avahi/hosts /etc/powerdns /etc/default/pdns-recursor /etc/ppp/ip-up.d/0000usepeerdns /etc/ppp/ip-down.d/0000usepeerdns" for tmp in $DELETE; do if [ -e ${tmp} ]; then rm -rf ${tmp} fi done # Remove logrotate items controlled via CLI and VyOS defaults sed -i '/^\/var\/log\/messages$/d' /etc/logrotate.d/rsyslog sed -i '/^\/var\/log\/auth.log$/d' /etc/logrotate.d/rsyslog # Fix FRR pam.d "vtysh_pam" vtysh_pam: Failed in account validation T5110 if test -f /etc/pam.d/frr; then if grep -q 'pam_rootok.so' /etc/pam.d/frr; then sed -i -re 's/rootok/permit/' /etc/pam.d/frr fi fi # Enable Cloud-init pre-configuration service systemctl enable vyos-config-cloud-init.service # Enable Podman API systemctl enable podman.service # Generate API GraphQL schema /usr/libexec/vyos/services/api/graphql/generate/generate_schema.py # Update XML cache python3 /usr/lib/python3/dist-packages/vyos/xml_ref/update_cache.py # Generate hardlinks for systemd units for multi VRF support # as softlinks will fail in systemd: # symlink target name type "ssh.service" does not match source, rejecting. if [ ! -f /lib/systemd/system/ssh@.service ]; then ln /lib/systemd/system/ssh.service /lib/systemd/system/ssh@.service fi # T4287 - as we have a non-signed kernel use the upstream wireless reulatory database update-alternatives --set regulatory.db /lib/firmware/regulatory.db-upstream + +# Restart vyos-configd to apply changes in Python scripts/templates +if systemctl is-active --quiet vyos-configd; then + systemctl restart vyos-configd +fi diff --git a/interface-definitions/include/ospf/protocol-common-config.xml.i b/interface-definitions/include/ospf/protocol-common-config.xml.i index c4778e126..cef832381 100644 --- a/interface-definitions/include/ospf/protocol-common-config.xml.i +++ b/interface-definitions/include/ospf/protocol-common-config.xml.i @@ -1,959 +1,961 @@ <!-- include start from ospf/protocol-common-config.xml.i --> <node name="aggregation"> <properties> <help>External route aggregation</help> </properties> <children> <leafNode name="timer"> <properties> <help>Delay timer</help> <valueHelp> <format>u32:5-1800</format> <description>Timer interval in seconds</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 5-1800"/> </constraint> </properties> <defaultValue>5</defaultValue> </leafNode> </children> </node> <tagNode name="access-list"> <properties> <help>Access list to filter networks in routing updates</help> <completionHelp> <path>policy access-list</path> </completionHelp> <valueHelp> <format>u32</format> <description>Access-list number</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> </constraint> </properties> <children> <leafNode name="export"> <properties> <help>Filter for outgoing routing update</help> <completionHelp> <list>bgp connected kernel rip static</list> </completionHelp> <valueHelp> <format>bgp</format> <description>Filter BGP routes</description> </valueHelp> <valueHelp> <format>connected</format> <description>Filter connected routes</description> </valueHelp> <valueHelp> <format>isis</format> <description>Filter IS-IS routes</description> </valueHelp> <valueHelp> <format>kernel</format> <description>Filter Kernel routes</description> </valueHelp> <valueHelp> <format>rip</format> <description>Filter RIP routes</description> </valueHelp> <valueHelp> <format>static</format> <description>Filter static routes</description> </valueHelp> <constraint> <regex>(bgp|connected|isis|kernel|rip|static)</regex> </constraint> <constraintErrorMessage>Must be bgp, connected, kernel, rip, or static</constraintErrorMessage> <multi/> </properties> </leafNode> </children> </tagNode> <tagNode name="area"> <properties> <help>OSPF area settings</help> <valueHelp> <format>u32</format> <description>OSPF area number in decimal notation</description> </valueHelp> <valueHelp> <format>ipv4</format> <description>OSPF area number in dotted decimal notation</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> <validator name="ip-address"/> </constraint> </properties> <children> <node name="area-type"> <properties> <help>Area type</help> </properties> <children> <leafNode name="normal"> <properties> <help>Normal OSPF area</help> <valueless/> </properties> </leafNode> <node name="nssa"> <properties> <help>Not-So-Stubby OSPF area</help> </properties> <children> <leafNode name="default-cost"> <properties> <help>Summary-default cost of an NSSA area</help> <valueHelp> <format>u32:0-16777215</format> <description>Summary default cost</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-16777215"/> </constraint> </properties> </leafNode> <leafNode name="no-summary"> <properties> <help>Do not inject inter-area routes into stub</help> <valueless/> </properties> </leafNode> <leafNode name="translate"> <properties> <help>Configure NSSA-ABR</help> <completionHelp> <list>always candidate never</list> </completionHelp> <valueHelp> <format>always</format> <description>Always translate LSA types</description> </valueHelp> <valueHelp> <format>candidate</format> <description>Translate for election</description> </valueHelp> <valueHelp> <format>never</format> <description>Never translate LSA types</description> </valueHelp> <constraint> <regex>(always|candidate|never)</regex> </constraint> </properties> <defaultValue>candidate</defaultValue> </leafNode> </children> </node> <node name="stub"> <properties> <help>Stub OSPF area</help> </properties> <children> <leafNode name="default-cost"> <properties> <help>Summary-default cost</help> <valueHelp> <format>u32:0-16777215</format> <description>Summary default cost</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-16777215"/> </constraint> </properties> </leafNode> <leafNode name="no-summary"> <properties> <help>Do not inject inter-area routes into the stub</help> <valueless/> </properties> </leafNode> </children> </node> </children> </node> <leafNode name="authentication"> <properties> <help>OSPF area authentication type</help> <completionHelp> <list>plaintext-password md5</list> </completionHelp> <valueHelp> <format>plaintext-password</format> <description>Use plain-text authentication</description> </valueHelp> <valueHelp> <format>md5</format> <description>Use MD5 authentication</description> </valueHelp> <constraint> <regex>(plaintext-password|md5)</regex> </constraint> </properties> </leafNode> <leafNode name="network"> <properties> <help>OSPF network</help> <valueHelp> <format>ipv4net</format> <description>OSPF network</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> <multi/> </properties> </leafNode> <tagNode name="range"> <properties> <help>Summarize routes matching a prefix (border routers only)</help> <valueHelp> <format>ipv4net</format> <description>Area range prefix</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> <children> <leafNode name="cost"> <properties> <help>Metric for this range</help> <valueHelp> <format>u32:0-16777215</format> <description>Metric for this range</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-16777215"/> </constraint> </properties> </leafNode> <leafNode name="not-advertise"> <properties> <help>Do not advertise this range</help> <valueless/> </properties> </leafNode> <leafNode name="substitute"> <properties> <help>Advertise area range as another prefix</help> <valueHelp> <format>ipv4net</format> <description>Advertise area range as another prefix</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> </leafNode> </children> </tagNode> <leafNode name="shortcut"> <properties> <help>Area shortcut mode</help> <completionHelp> <list>default disable enable</list> </completionHelp> <valueHelp> <format>default</format> <description>Set default</description> </valueHelp> <valueHelp> <format>disable</format> <description>Disable shortcutting mode</description> </valueHelp> <valueHelp> <format>enable</format> <description>Enable shortcutting mode</description> </valueHelp> <constraint> <regex>(default|disable|enable)</regex> </constraint> </properties> </leafNode> <leafNode name="export-list"> <properties> <help>Set the filter for networks announced to other areas</help> <completionHelp> <path>policy access-list</path> </completionHelp> <valueHelp> <format>u32</format> <description>Access-list number</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> </constraint> </properties> </leafNode> <leafNode name="import-list"> <properties> <help>Set the filter for networks from other areas announced</help> <completionHelp> <path>policy access-list</path> </completionHelp> <valueHelp> <format>u32</format> <description>Access-list number</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> </constraint> </properties> </leafNode> <tagNode name="virtual-link"> <properties> <help>Virtual link</help> <valueHelp> <format>ipv4</format> <description>OSPF area in dotted decimal notation</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> <validator name="ip-address"/> </constraint> </properties> <children> #include <include/ospf/authentication.xml.i> #include <include/ospf/intervals.xml.i> + #include <include/ospf/retransmit-window.xml.i> </children> </tagNode> </children> </tagNode> #include <include/ospf/auto-cost.xml.i> <node name="capability"> <properties> <help>Enable specific OSPF features</help> </properties> <children> <leafNode name="opaque"> <properties> <help>Opaque LSA</help> <valueless/> </properties> </leafNode> </children> </node> #include <include/ospf/default-information.xml.i> <leafNode name="default-metric"> <properties> <help>Metric of redistributed routes</help> <valueHelp> <format>u32:0-16777214</format> <description>Metric of redistributed routes</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-16777214"/> </constraint> </properties> </leafNode> #include <include/ospf/graceful-restart.xml.i> <node name="graceful-restart"> <children> <node name="helper"> <children> <leafNode name="no-strict-lsa-checking"> <properties> <help>Disable strict LSA check</help> <valueless/> </properties> </leafNode> </children> </node> </children> </node> <leafNode name="maximum-paths"> <properties> <help>Maximum multiple paths (ECMP)</help> <valueHelp> <format>u32:1-64</format> <description>Maximum multiple paths (ECMP)</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-64"/> </constraint> </properties> </leafNode> #include <include/isis/ldp-sync-protocol.xml.i> <node name="distance"> <properties> <help>Administrative distance</help> </properties> <children> #include <include/ospf/distance-global.xml.i> <node name="ospf"> <properties> <help>OSPF administrative distance</help> </properties> <children> #include <include/ospf/distance-per-protocol.xml.i> </children> </node> </children> </node> <tagNode name="interface"> <properties> <help>Interface configuration</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> <valueHelp> <format>txt</format> <description>Interface name</description> </valueHelp> <constraint> #include <include/constraint/interface-name.xml.i> </constraint> </properties> <children> <leafNode name="area"> <properties> <help>Enable OSPF on this interface</help> <completionHelp> <path>protocols ospf area</path> </completionHelp> <valueHelp> <format>u32</format> <description>OSPF area ID as decimal notation</description> </valueHelp> <valueHelp> <format>ipv4</format> <description>OSPF area ID in IP address notation</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-4294967295"/> <validator name="ip-address"/> </constraint> </properties> </leafNode> #include <include/ospf/authentication.xml.i> #include <include/ospf/intervals.xml.i> + #include <include/ospf/retransmit-window.xml.i> #include <include/ospf/interface-common.xml.i> #include <include/isis/ldp-sync-interface.xml.i> <leafNode name="bandwidth"> <properties> <help>Interface bandwidth (Mbit/s)</help> <valueHelp> <format>u32:1-100000</format> <description>Bandwidth in Megabit/sec (for calculating OSPF cost)</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-100000"/> </constraint> </properties> </leafNode> <leafNode name="hello-multiplier"> <properties> <help>Hello multiplier factor</help> <valueHelp> <format>u32:1-10</format> <description>Number of Hellos to send each second</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-10"/> </constraint> </properties> </leafNode> <leafNode name="network"> <properties> <help>Network type</help> <completionHelp> <list>broadcast non-broadcast point-to-multipoint point-to-point</list> </completionHelp> <valueHelp> <format>broadcast</format> <description>Broadcast network type</description> </valueHelp> <valueHelp> <format>non-broadcast</format> <description>Non-broadcast network type</description> </valueHelp> <valueHelp> <format>point-to-multipoint</format> <description>Point-to-multipoint network type</description> </valueHelp> <valueHelp> <format>point-to-point</format> <description>Point-to-point network type</description> </valueHelp> <constraint> <regex>(broadcast|non-broadcast|point-to-multipoint|point-to-point)</regex> </constraint> <constraintErrorMessage>Must be broadcast, non-broadcast, point-to-multipoint or point-to-point</constraintErrorMessage> </properties> </leafNode> <node name="passive"> <properties> <help>Suppress routing updates on an interface</help> </properties> <children> #include <include/generic-disable-node.xml.i> </children> </node> </children> </tagNode> #include <include/ospf/log-adjacency-changes.xml.i> <node name="max-metric"> <properties> <help>OSPF maximum and infinite-distance metric</help> </properties> <children> <node name="router-lsa"> <properties> <help>Advertise own Router-LSA with infinite distance (stub router)</help> </properties> <children> <leafNode name="administrative"> <properties> <help>Administratively apply, for an indefinite period</help> <valueless/> </properties> </leafNode> <leafNode name="on-shutdown"> <properties> <help>Advertise stub-router prior to full shutdown of OSPF</help> <valueHelp> <format>u32:5-100</format> <description>Time (seconds) to advertise self as stub-router</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 5-100"/> </constraint> </properties> </leafNode> <leafNode name="on-startup"> <properties> <help>Automatically advertise stub Router-LSA on startup of OSPF</help> <valueHelp> <format>u32:5-86400</format> <description>Time (seconds) to advertise self as stub-router</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 5-86400"/> </constraint> </properties> </leafNode> </children> </node> </children> </node> <node name="mpls-te"> <properties> <help>MultiProtocol Label Switching-Traffic Engineering (MPLS-TE) parameters</help> </properties> <children> <leafNode name="enable"> <properties> <help>Enable MPLS-TE functionality</help> <valueless/> </properties> </leafNode> <leafNode name="router-address"> <properties> <help>Stable IP address of the advertising router</help> <valueHelp> <format>ipv4</format> <description>Stable IP address of the advertising router</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> <defaultValue>0.0.0.0</defaultValue> </leafNode> </children> </node> <tagNode name="neighbor"> <properties> <help>Specify neighbor router</help> <valueHelp> <format>ipv4</format> <description>Neighbor IP address</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> <children> <leafNode name="poll-interval"> <properties> <help>Dead neighbor polling interval</help> <valueHelp> <format>u32:1-65535</format> <description>Seconds between dead neighbor polling interval</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-65535"/> </constraint> </properties> <defaultValue>60</defaultValue> </leafNode> <leafNode name="priority"> <properties> <help>Neighbor priority in seconds</help> <valueHelp> <format>u32:0-255</format> <description>Neighbor priority</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-255"/> </constraint> </properties> <defaultValue>0</defaultValue> </leafNode> </children> </tagNode> <node name="parameters"> <properties> <help>OSPF specific parameters</help> </properties> <children> <leafNode name="abr-type"> <properties> <help>OSPF ABR type</help> <completionHelp> <list>cisco ibm shortcut standard</list> </completionHelp> <valueHelp> <format>cisco</format> <description>Cisco ABR type</description> </valueHelp> <valueHelp> <format>ibm</format> <description>IBM ABR type</description> </valueHelp> <valueHelp> <format>shortcut</format> <description>Shortcut ABR type</description> </valueHelp> <valueHelp> <format>standard</format> <description>Standard ABR type</description> </valueHelp> <constraint> <regex>(cisco|ibm|shortcut|standard)</regex> </constraint> </properties> <defaultValue>cisco</defaultValue> </leafNode> <leafNode name="opaque-lsa"> <properties> <help>Enable the Opaque-LSA capability (rfc2370)</help> <valueless/> </properties> </leafNode> <leafNode name="rfc1583-compatibility"> <properties> <help>Enable RFC1583 criteria for handling AS external routes</help> <valueless/> </properties> </leafNode> #include <include/router-id.xml.i> </children> </node> <leafNode name="passive-interface"> <properties> <help>Suppress routing updates on an interface</help> <completionHelp> <list>default</list> </completionHelp> <valueHelp> <format>default</format> <description>Default to suppress routing updates on all interfaces</description> </valueHelp> <constraint> <regex>(default)</regex> </constraint> </properties> </leafNode> <node name="segment-routing"> <properties> <help>Segment-Routing (SPRING) settings</help> </properties> <children> <node name="global-block"> <properties> <help>Segment Routing Global Block label range</help> </properties> <children> #include <include/segment-routing-label-value.xml.i> </children> </node> <node name="local-block"> <properties> <help>Segment Routing Local Block label range</help> </properties> <children> #include <include/segment-routing-label-value.xml.i> </children> </node> <leafNode name="maximum-label-depth"> <properties> <help>Maximum MPLS labels allowed for this router</help> <valueHelp> <format>u32:1-16</format> <description>MPLS label depth</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-16"/> </constraint> </properties> </leafNode> <tagNode name="prefix"> <properties> <help>Static IPv4 prefix segment/label mapping</help> <valueHelp> <format>ipv4net</format> <description>IPv4 prefix segment</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> <children> <node name="index"> <properties> <help>Specify the index value of prefix segment/label ID</help> </properties> <children> <leafNode name="value"> <properties> <help>Specify the index value of prefix segment/label ID</help> <valueHelp> <format>u32:0-65535</format> <description>The index segment/label ID value</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-65535"/> </constraint> </properties> </leafNode> <leafNode name="explicit-null"> <properties> <help>Request upstream neighbor to replace segment/label with explicit null label</help> <valueless/> </properties> </leafNode> <leafNode name="no-php-flag"> <properties> <help>Do not request penultimate hop popping for segment/label</help> <valueless/> </properties> </leafNode> </children> </node> </children> </tagNode> </children> </node> <node name="redistribute"> <properties> <help>Redistribute information from another routing protocol</help> </properties> <children> <node name="bgp"> <properties> <help>Redistribute BGP routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="connected"> <properties> <help>Redistribute connected routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="isis"> <properties> <help>Redistribute IS-IS routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="kernel"> <properties> <help>Redistribute Kernel routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="rip"> <properties> <help>Redistribute RIP routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="babel"> <properties> <help>Redistribute Babel routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <node name="static"> <properties> <help>Redistribute statically configured routes</help> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </node> <tagNode name="table"> <properties> <help>Redistribute non-main Kernel Routing Table</help> <completionHelp> <path>protocols static table</path> </completionHelp> <valueHelp> <format>u32:1-200</format> <description>Policy route table number</description> </valueHelp> </properties> <children> #include <include/ospf/metric.xml.i> #include <include/ospf/metric-type.xml.i> #include <include/route-map.xml.i> </children> </tagNode> </children> </node> <node name="refresh"> <properties> <help>Adjust refresh parameters</help> </properties> <children> <leafNode name="timers"> <properties> <help>Refresh timer</help> <valueHelp> <format>u32:10-1800</format> <description>Timer value in seconds</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 10-1800"/> </constraint> </properties> </leafNode> </children> </node> <tagNode name="summary-address"> <properties> <help>External summary address</help> <valueHelp> <format>ipv4net</format> <description>OSPF area number in dotted decimal notation</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> <children> <leafNode name="no-advertise"> <properties> <help>Don not advertise summary route</help> <valueless/> </properties> </leafNode> <leafNode name="tag"> <properties> <help>Router tag</help> <valueHelp> <format>u32:1-4294967295</format> <description>Router tag value</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-4294967295"/> </constraint> </properties> </leafNode> </children> </tagNode> <node name="timers"> <properties> <help>Adjust routing timers</help> </properties> <children> <node name="throttle"> <properties> <help>Throttling adaptive timers</help> </properties> <children> <node name="spf"> <properties> <help>OSPF SPF timers</help> </properties> <children> <leafNode name="delay"> <properties> <help>Delay from the first change received to SPF calculation</help> <valueHelp> <format>u32:0-600000</format> <description>Delay in milliseconds</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-600000"/> </constraint> </properties> <defaultValue>200</defaultValue> </leafNode> <leafNode name="initial-holdtime"> <properties> <help>Initial hold time between consecutive SPF calculations</help> <valueHelp> <format>u32:0-600000</format> <description>Initial hold time in milliseconds</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-600000"/> </constraint> </properties> <defaultValue>1000</defaultValue> </leafNode> <leafNode name="max-holdtime"> <properties> <help>Maximum hold time</help> <valueHelp> <format>u32:0-600000</format> <description>Max hold time in milliseconds</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 0-600000"/> </constraint> </properties> <defaultValue>10000</defaultValue> </leafNode> </children> </node> </children> </node> </children> </node> <!-- include end --> diff --git a/interface-definitions/include/ospf/retransmit-window.xml.i b/interface-definitions/include/ospf/retransmit-window.xml.i new file mode 100644 index 000000000..a5e20f522 --- /dev/null +++ b/interface-definitions/include/ospf/retransmit-window.xml.i @@ -0,0 +1,15 @@ +<!-- include start from ospf/retransmit-window.xml.i --> +<leafNode name="retransmit-window"> + <properties> + <help>Window for LSA retransmit</help> + <valueHelp> + <format>u32:20-1000</format> + <description>Retransmit LSAs expiring in this window (milliseconds)</description> + </valueHelp> + <constraint> + <validator name="numeric" argument="--range 20-1000"/> + </constraint> + </properties> + <defaultValue>50</defaultValue> +</leafNode> +<!-- include end --> diff --git a/interface-definitions/include/source-address-ipv4.xml.i b/interface-definitions/include/source-address-ipv4.xml.i index 052678113..aa0b083c7 100644 --- a/interface-definitions/include/source-address-ipv4.xml.i +++ b/interface-definitions/include/source-address-ipv4.xml.i @@ -1,17 +1,17 @@ <!-- include start from source-address-ipv4.xml.i --> <leafNode name="source-address"> <properties> - <help>IPv4 source address used to initiate connection</help> + <help>IPv4 address used to initiate connection</help> <completionHelp> <script>${vyos_completion_dir}/list_local_ips.sh --ipv4</script> </completionHelp> <valueHelp> <format>ipv4</format> <description>IPv4 source address</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> </leafNode> <!-- include end --> diff --git a/interface-definitions/include/source-address-ipv6.xml.i b/interface-definitions/include/source-address-ipv6.xml.i new file mode 100644 index 000000000..a27955b0c --- /dev/null +++ b/interface-definitions/include/source-address-ipv6.xml.i @@ -0,0 +1,17 @@ +<!-- include start from source-address-ipv6.xml.i --> +<leafNode name="source-address"> + <properties> + <help>IPv6 address used to initiate connection</help> + <completionHelp> + <script>${vyos_completion_dir}/list_local_ips.sh --ipv6</script> + </completionHelp> + <valueHelp> + <format>ipv6</format> + <description>IPv6 source address</description> + </valueHelp> + <constraint> + <validator name="ipv6-address"/> + </constraint> + </properties> +</leafNode> +<!-- include end --> diff --git a/interface-definitions/include/static/static-route-bfd.xml.i b/interface-definitions/include/static/static-route-bfd.xml.i deleted file mode 100644 index d588b369f..000000000 --- a/interface-definitions/include/static/static-route-bfd.xml.i +++ /dev/null @@ -1,36 +0,0 @@ -<!-- include start from static/static-route-bfd.xml.i --> -<node name="bfd"> - <properties> - <help>BFD monitoring</help> - </properties> - <children> - #include <include/bfd/profile.xml.i> - <node name="multi-hop"> - <properties> - <help>Use BFD multi hop session</help> - </properties> - <children> - <tagNode name="source"> - <properties> - <help>Use source for BFD session</help> - <valueHelp> - <format>ipv4</format> - <description>IPv4 source address</description> - </valueHelp> - <valueHelp> - <format>ipv6</format> - <description>IPv6 source address</description> - </valueHelp> - <constraint> - <validator name="ip-address"/> - </constraint> - </properties> - <children> - #include <include/bfd/profile.xml.i> - </children> - </tagNode> - </children> - </node> - </children> -</node> -<!-- include end --> diff --git a/interface-definitions/include/static/static-route.xml.i b/interface-definitions/include/static/static-route.xml.i index 51e45c6f7..fd7366286 100644 --- a/interface-definitions/include/static/static-route.xml.i +++ b/interface-definitions/include/static/static-route.xml.i @@ -1,60 +1,74 @@ <!-- include start from static/static-route.xml.i --> <tagNode name="route"> <properties> <help>Static IPv4 route</help> <valueHelp> <format>ipv4net</format> <description>IPv4 static route</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> <children> #include <include/static/static-route-blackhole.xml.i> #include <include/static/static-route-reject.xml.i> #include <include/dhcp-interface-multi.xml.i> #include <include/generic-description.xml.i> <tagNode name="interface"> <properties> <help>Next-hop IPv4 router interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> <valueHelp> <format>txt</format> <description>Gateway interface name</description> </valueHelp> <constraint> #include <include/constraint/interface-name.xml.i> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-vrf.xml.i> </children> </tagNode> <tagNode name="next-hop"> <properties> <help>Next-hop IPv4 router address</help> <valueHelp> <format>ipv4</format> <description>Next-hop router address</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-interface.xml.i> #include <include/static/static-route-vrf.xml.i> - #include <include/static/static-route-bfd.xml.i> + <node name="bfd"> + <properties> + <help>BFD monitoring</help> + </properties> + <children> + #include <include/bfd/profile.xml.i> + <node name="multi-hop"> + <properties> + <help>Configure BFD multi-hop session</help> + </properties> + <children> + #include <include/source-address-ipv4.xml.i> + </children> + </node> + </children> + </node> </children> </tagNode> </children> </tagNode> <!-- include end --> - diff --git a/interface-definitions/include/static/static-route6.xml.i b/interface-definitions/include/static/static-route6.xml.i index 4468c8025..6fcc18b8a 100644 --- a/interface-definitions/include/static/static-route6.xml.i +++ b/interface-definitions/include/static/static-route6.xml.i @@ -1,60 +1,75 @@ <!-- include start from static/static-route6.xml.i --> <tagNode name="route6"> <properties> <help>Static IPv6 route</help> <valueHelp> <format>ipv6net</format> <description>IPv6 static route</description> </valueHelp> <constraint> <validator name="ipv6-prefix"/> </constraint> </properties> <children> #include <include/static/static-route-blackhole.xml.i> #include <include/static/static-route-reject.xml.i> #include <include/generic-description.xml.i> <tagNode name="interface"> <properties> <help>IPv6 gateway interface name</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> <valueHelp> <format>txt</format> <description>Gateway interface name</description> </valueHelp> <constraint> #include <include/constraint/interface-name.xml.i> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-segments.xml.i> #include <include/static/static-route-vrf.xml.i> </children> </tagNode> <tagNode name="next-hop"> <properties> <help>IPv6 gateway address</help> <valueHelp> <format>ipv6</format> <description>Next-hop IPv6 router</description> </valueHelp> <constraint> <validator name="ipv6-address"/> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> - #include <include/static/static-route-bfd.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-interface.xml.i> #include <include/static/static-route-segments.xml.i> #include <include/static/static-route-vrf.xml.i> + <node name="bfd"> + <properties> + <help>BFD monitoring</help> + </properties> + <children> + #include <include/bfd/profile.xml.i> + <node name="multi-hop"> + <properties> + <help>Configure BFD multi-hop session</help> + </properties> + <children> + #include <include/source-address-ipv6.xml.i> + </children> + </node> + </children> + </node> </children> </tagNode> </children> </tagNode> <!-- include end --> diff --git a/interface-definitions/include/version/quagga-version.xml.i b/interface-definitions/include/version/quagga-version.xml.i index 23d884cd4..10ca2816e 100644 --- a/interface-definitions/include/version/quagga-version.xml.i +++ b/interface-definitions/include/version/quagga-version.xml.i @@ -1,3 +1,3 @@ <!-- include start from include/version/quagga-version.xml.i --> -<syntaxVersion component='quagga' version='11'></syntaxVersion> +<syntaxVersion component='quagga' version='12'></syntaxVersion> <!-- include end --> diff --git a/interface-definitions/protocols_static.xml.in b/interface-definitions/protocols_static.xml.in index ca4ca2d74..d8e0ee56b 100644 --- a/interface-definitions/protocols_static.xml.in +++ b/interface-definitions/protocols_static.xml.in @@ -1,44 +1,93 @@ <?xml version="1.0"?> <interfaceDefinition> <node name="protocols"> <properties> <help>Routing protocols</help> </properties> <children> <node name="static" owner="${vyos_conf_scripts_dir}/protocols_static.py"> <properties> <help>Static Routing</help> <priority>480</priority> </properties> <children> + <tagNode name="mroute"> + <properties> + <help>Static IPv4 route for Multicast RIB</help> + <valueHelp> + <format>ipv4net</format> + <description>Network</description> + </valueHelp> + <constraint> + <validator name="ipv4-prefix"/> + </constraint> + </properties> + <children> + <tagNode name="next-hop"> + <properties> + <help>Next-hop IPv4 router address</help> + <valueHelp> + <format>ipv4</format> + <description>Next-hop router address</description> + </valueHelp> + <constraint> + <validator name="ipv4-address"/> + </constraint> + </properties> + <children> + #include <include/generic-disable-node.xml.i> + #include <include/static/static-route-distance.xml.i> + </children> + </tagNode> + <tagNode name="interface"> + <properties> + <help>Next-hop IPv4 router interface</help> + <completionHelp> + <script>${vyos_completion_dir}/list_interfaces</script> + </completionHelp> + <valueHelp> + <format>txt</format> + <description>Gateway interface name</description> + </valueHelp> + <constraint> + #include <include/constraint/interface-name.xml.i> + </constraint> + </properties> + <children> + #include <include/generic-disable-node.xml.i> + #include <include/static/static-route-distance.xml.i> + </children> + </tagNode> + </children> + </tagNode> #include <include/route-map.xml.i> #include <include/static/static-route.xml.i> #include <include/static/static-route6.xml.i> <tagNode name="table"> <properties> <help>Policy route table number</help> <valueHelp> <format>u32:1-200</format> <description>Policy route table number</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-200"/> </constraint> </properties> <children> <!-- iproute2 only considers the first "word" until whitespace in the name field but does not complain about special characters. We put an artificial limit here to make table descriptions potentially valid node names to avoid quoting and simplify future syntax changes if we decide to make any. --> #include <include/generic-description.xml.i> #include <include/static/static-route.xml.i> #include <include/static/static-route6.xml.i> </children> </tagNode> </children> </node> </children> </node> </interfaceDefinition> diff --git a/interface-definitions/protocols_static_multicast.xml.in b/interface-definitions/protocols_static_multicast.xml.in deleted file mode 100644 index caf95ed7c..000000000 --- a/interface-definitions/protocols_static_multicast.xml.in +++ /dev/null @@ -1,95 +0,0 @@ -<?xml version="1.0"?> -<interfaceDefinition> - <node name="protocols"> - <children> - <node name="static"> - <children> - <node name="multicast" owner="${vyos_conf_scripts_dir}/protocols_static_multicast.py"> - <properties> - <help>Multicast static route</help> - <priority>481</priority> - </properties> - <children> - <tagNode name="route"> - <properties> - <help>Configure static unicast route into MRIB for multicast RPF lookup</help> - <valueHelp> - <format>ipv4net</format> - <description>Network</description> - </valueHelp> - <constraint> - <validator name="ip-prefix"/> - </constraint> - </properties> - <children> - <tagNode name="next-hop"> - <properties> - <help>Nexthop IPv4 address</help> - <valueHelp> - <format>ipv4</format> - <description>Nexthop IPv4 address</description> - </valueHelp> - <constraint> - <validator name="ipv4-address"/> - </constraint> - </properties> - <children> - <leafNode name="distance"> - <properties> - <help>Distance value for this route</help> - <valueHelp> - <format>u32:1-255</format> - <description>Distance for this route</description> - </valueHelp> - <constraint> - <validator name="numeric" argument="--range 1-255"/> - </constraint> - </properties> - </leafNode> - </children> - </tagNode> - </children> - </tagNode> - <tagNode name="interface-route"> - <properties> - <help>Multicast interface based route</help> - <valueHelp> - <format>ipv4net</format> - <description>Network</description> - </valueHelp> - <constraint> - <validator name="ip-prefix"/> - </constraint> - </properties> - <children> - <tagNode name="next-hop-interface"> - <properties> - <help>Next-hop interface</help> - <completionHelp> - <script>${vyos_completion_dir}/list_interfaces</script> - </completionHelp> - </properties> - <children> - <leafNode name="distance"> - <properties> - <help>Distance value for this route</help> - <valueHelp> - <format>u32:1-255</format> - <description>Distance for this route</description> - </valueHelp> - <constraint> - <validator name="numeric" argument="--range 1-255"/> - </constraint> - </properties> - </leafNode> - </children> - </tagNode> - </children> - </tagNode> - </children> - </node> - </children> - </node> - </children> - </node> -</interfaceDefinition> diff --git a/op-mode-definitions/monitor-log.xml.in b/op-mode-definitions/monitor-log.xml.in index 6a2b7e53b..66b7ecb2f 100644 --- a/op-mode-definitions/monitor-log.xml.in +++ b/op-mode-definitions/monitor-log.xml.in @@ -1,413 +1,419 @@ <?xml version="1.0"?> <interfaceDefinition> <node name="monitor"> <properties> <help>Monitor system information</help> </properties> <children> <node name="log"> <properties> <help>Monitor last lines of messages file</help> </properties> <command>SYSTEMD_LOG_COLOR=false journalctl --no-hostname --follow --boot</command> <children> <node name="color"> <properties> <help>Output log in a colored fashion</help> </properties> <command>SYSTEMD_LOG_COLOR=false grc journalctl --no-hostname --follow --boot</command> </node> <node name="ids"> <properties> <help>Monitor Intrusion Detection System log</help> </properties> <children> <leafNode name="ddos-protection"> <properties> <help>Monitor last lines of DDOS protection</help> </properties> <command>journalctl --no-hostname --follow --boot --unit fastnetmon.service</command> </leafNode> </children> </node> <leafNode name="certbot"> <properties> <help>Monitor last lines of certbot log</help> </properties> <command>if sudo test -f /var/log/letsencrypt/letsencrypt.log; then sudo tail --follow=name /var/log/letsencrypt/letsencrypt.log; else echo "Cerbot log does not exist"; fi</command> </leafNode> <leafNode name="conntrack-sync"> <properties> <help>Monitor last lines of conntrack-sync log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit conntrackd.service</command> </leafNode> <leafNode name="console-server"> <properties> <help>Monitor last lines of console server log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit conserver-server.service</command> </leafNode> <node name="dhcp"> <properties> <help>Monitor last lines of Dynamic Host Control Protocol log</help> </properties> <children> <node name="server"> <properties> <help>Monitor last lines of DHCP server log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit kea-dhcp4-server.service</command> </node> <node name="client"> <properties> <help>Monitor last lines of DHCP client log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit "dhclient@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show DHCP client log on specific interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces --broadcast</script> </completionHelp> </properties> <command>journalctl --no-hostname --follow --boot --unit "dhclient@$6.service"</command> </tagNode> </children> </node> </children> </node> <node name="dhcpv6"> <properties> <help>Monitor last lines of Dynamic Host Control Protocol IPv6 log</help> </properties> <children> <node name="server"> <properties> <help>Monitor last lines of DHCPv6 server log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit kea-dhcp6-server.service</command> </node> <node name="client"> <properties> <help>Monitor last lines of DHCPv6 client log</help> </properties> <command>journalctl --no-hostname --follow --boot --unit "dhcp6c@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show DHCPv6 client log on specific interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> </properties> <command>journalctl --no-hostname --follow --boot --unit "dhcp6c@$6.service"</command> </tagNode> </children> </node> </children> </node> <leafNode name="flow-accounting"> <properties> <help>Monitor last lines of flow-accounting log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit uacctd.service</command> </leafNode> <leafNode name="ipoe-server"> <properties> <help>Monitor last lines of IP over Ethernet server log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit accel-ppp@ipoe.service</command> </leafNode> <leafNode name="kernel"> <properties> <help>Monitor last lines of Linux Kernel log</help> </properties> <command>journalctl --no-hostname --boot --follow --dmesg</command> </leafNode> <leafNode name="ndp-proxy"> <properties> <help>Monitor last lines of Neighbor Discovery Protocol (NDP) Proxy</help> </properties> <command>journalctl --no-hostname --boot --follow --unit ndppd.service</command> </leafNode> <leafNode name="nhrp"> <properties> <help>Monitor last lines of Next Hop Resolution Protocol log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit opennhrp.service</command> </leafNode> <leafNode name="ntp"> <properties> <help>Monitor last lines of Network Time Protocol log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit chrony.service</command> </leafNode> <node name="openvpn"> <properties> <help>Monitor last lines of OpenVPN log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit openvpn@*.service</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of specific OpenVPN interface log</help> <completionHelp> <path>interfaces openvpn</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit openvpn@$5.service</command> </tagNode> </children> </node> <node name="pppoe"> <properties> <help>Monitor last lines of PPPoE interface log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit "ppp@pppoe*.service"</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of PPPoE log for specific interface</help> <completionHelp> <path>interfaces pppoe</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --follow --unit "ppp@$5.service"</command> </tagNode> </children> </node> <leafNode name="pppoe-server"> <properties> <help>Monitor last lines of PPPoE server log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit accel-ppp@pppoe.service</command> </leafNode> <node name="protocol"> <properties> <help>Monitor routing protocol logs</help> </properties> <children> <leafNode name="ospf"> <properties> <help>Monitor log for OSPF</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/ospfd</command> </leafNode> <leafNode name="ospfv3"> <properties> <help>Monitor log for OSPF for IPv6</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/ospf6d</command> </leafNode> <leafNode name="bgp"> <properties> <help>Monitor log for BGP</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/bgpd</command> </leafNode> <leafNode name="rip"> <properties> <help>Monitor log for RIP</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/ripd</command> </leafNode> <leafNode name="ripng"> <properties> <help>Monitor log for RIPng</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/ripngd</command> </leafNode> <leafNode name="static"> <properties> <help>Monitor log for static route</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/staticd</command> </leafNode> <leafNode name="multicast"> <properties> <help>Monitor log for Multicast protocol</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/pimd</command> </leafNode> <leafNode name="isis"> <properties> <help>Monitor log for ISIS</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/isisd</command> </leafNode> <leafNode name="openfabric"> <properties> <help>Monitor log for OpenFabric</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/fabricd</command> </leafNode> <leafNode name="nhrp"> <properties> <help>Monitor log for NHRP</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/nhrpd</command> </leafNode> <leafNode name="bfd"> <properties> <help>Monitor log for BFD</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/bfdd</command> </leafNode> <leafNode name="mpls"> <properties> <help>Monitor log for MPLS</help> </properties> <command>journalctl --follow --no-hostname --boot /usr/lib/frr/ldpd</command> </leafNode> </children> </node> <node name="macsec"> <properties> <help>Monitor last lines of MACsec</help> </properties> <command>journalctl --no-hostname --boot --follow --unit "wpa_supplicant-macsec@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of specific MACsec interface</help> <completionHelp> <path>interfaces macsec</path> </completionHelp> </properties> <command>SRC=$(cli-shell-api returnValue interfaces macsec "$5" source-interface); journalctl --no-hostname --boot --follow --unit "wpa_supplicant-macsec@$SRC.service"</command> </tagNode> </children> </node> <leafNode name="router-advert"> <properties> <help>Monitor last lines of Router Advertisement Daemon log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit radvd.service</command> </leafNode> <leafNode name="snmp"> <properties> <help>Monitor last lines of Simple Network Monitoring Protocol log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit snmpd.service</command> </leafNode> <node name="ssh"> <properties> <help>Monitor last lines of Secure Shell log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit ssh.service</command> <children> <node name="dynamic-protection"> <properties> <help>Monitor last lines of SSH guard log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit sshguard.service</command> </node> </children> </node> <leafNode name="vpn"> <properties> <help>Monitor last lines of ALL Virtual Private Network services</help> </properties> <command>journalctl --no-hostname --boot --follow --unit strongswan.service --unit accel-ppp@*.service --unit ocserv.service</command> </leafNode> <leafNode name="ipsec"> <properties> <help>Monitor last lines of IPsec log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit strongswan.service</command> </leafNode> <leafNode name="l2tp"> <properties> <help>Monitor last lines of L2TP log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit accel-ppp@l2tp.service</command> </leafNode> <leafNode name="openconnect"> <properties> <help>Monitor last lines of OpenConnect log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit ocserv.service</command> </leafNode> <leafNode name="pptp"> <properties> <help>Monitor last lines of PPTP log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit accel-ppp@pptp.service</command> </leafNode> <leafNode name="sstp"> <properties> <help>Monitor last lines of Secure Socket Tunneling Protocol server</help> </properties> <command>journalctl --no-hostname --boot --follow --unit accel-ppp@sstp.service</command> </leafNode> <node name="sstpc"> <properties> <help>Monitor last lines of Secure Socket Tunneling Protocol client</help> </properties> <command>journalctl --no-hostname --boot --follow --unit "ppp@sstpc*.service"</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of SSTP client log for specific interface</help> <completionHelp> <path>interfaces sstpc</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --follow --unit "ppp@$5.service"</command> </tagNode> </children> </node> <leafNode name="vrrp"> <properties> <help>Monitor last lines of Virtual Router Redundancy Protocol log</help> </properties> <command>journalctl --no-hostname --boot --follow --unit keepalived.service</command> </leafNode> + <leafNode name="vyos-configd"> + <properties> + <help>Monitor last lines of VyOS configuration daemon log</help> + </properties> + <command>journalctl --no-hostname --boot --follow --unit vyos-configd.service</command> + </leafNode> <node name="wireless"> <properties> <help>Monitor last lines of Wireless interface log</help> </properties> <children> <node name="wpa-supplicant"> <properties> <help>Monitor last lines of WPA supplicant</help> </properties> <command>if cli-shell-api existsActive interfaces wireless; then journalctl --no-hostname --boot --follow --unit "wpa_supplicant@*.service"; else echo "No wireless interface configured!"; fi</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of specific wireless interface supplicant</help> <completionHelp> <path>interfaces wireless</path> </completionHelp> </properties> <command>if [[ $(cli-shell-api returnActiveValue interfaces wireless $6 type) == "station" ]]; then journalctl --no-hostname --boot --follow --unit "wpa_supplicant@$6.service"; else echo "Wireless interface $6 not configured as station!"; fi</command> </tagNode> </children> </node> <node name="hostapd"> <properties> <help>Monitor last lines of host access point daemon</help> </properties> <command>if cli-shell-api existsActive interfaces wireless; then journalctl --no-hostname --boot --follow --unit "hostapd@*.service"; else echo "No wireless interface configured!"; fi</command> <children> <tagNode name="interface"> <properties> <help>Monitor last lines of specific host access point interface</help> <completionHelp> <path>interfaces wireless</path> </completionHelp> </properties> <command>if [[ $(cli-shell-api returnActiveValue interfaces wireless $6 type) == "access-point" ]]; then journalctl --no-hostname --boot --follow --unit "hostapd@$6.service"; else echo "Wireless interface $6 not configured as access-point!"; fi</command> </tagNode> </children> </node> </children> </node> </children> </node> </children> </node> </interfaceDefinition> diff --git a/op-mode-definitions/show-log.xml.in b/op-mode-definitions/show-log.xml.in index c2504686d..b032f5e38 100755 --- a/op-mode-definitions/show-log.xml.in +++ b/op-mode-definitions/show-log.xml.in @@ -1,947 +1,953 @@ <?xml version="1.0"?> <interfaceDefinition> <node name="show"> <properties> <help>Show system information</help> </properties> <children> <tagNode name="log"> <properties> <help>Show last number of messages in master logging buffer</help> <completionHelp> <list><1-9999></list> </completionHelp> </properties> <command>if ${vyos_validators_dir}/numeric --range 1-9999 "$3"; then journalctl --no-hostname --boot --lines "$3"; fi</command> </tagNode> <node name="log"> <properties> <help>Show contents of current master logging buffer</help> </properties> <command>journalctl --no-hostname --boot</command> <children> <leafNode name="audit"> <properties> <help>Show audit logs</help> </properties> <command>cat /var/log/audit/audit.log</command> </leafNode> <leafNode name="all"> <properties> <help>Show contents of all master log files</help> </properties> <command>sudo bash -c 'eval $(lesspipe); less $_vyatta_less_options --prompt=".logm, file %i of %m., page %dt of %D" -- `printf "%s\n" /var/log/messages* | sort -nr`'</command> </leafNode> <leafNode name="authorization"> <properties> <help>Show listing of authorization attempts</help> </properties> <command>journalctl --no-hostname --boot --quiet SYSLOG_FACILITY=10 SYSLOG_FACILITY=4</command> </leafNode> <leafNode name="certbot"> <properties> <help>Show log for certbot</help> </properties> <command>if sudo test -f /var/log/letsencrypt/letsencrypt.log; then sudo cat /var/log/letsencrypt/letsencrypt.log; else echo "Cerbot log does not exist"; fi</command> </leafNode> <leafNode name="cluster"> <properties> <help>Show log for Cluster</help> </properties> <command>cat $(printf "%s\n" /var/log/messages* | sort -nr) | grep -e heartbeat -e cl_status -e mach_down -e ha_log</command> </leafNode> <leafNode name="conntrack-sync"> <properties> <help>Show log for Conntrack-sync</help> </properties> <command>journalctl --no-hostname --boot --unit conntrackd.service</command> </leafNode> <leafNode name="console-server"> <properties> <help>Show log for console server</help> </properties> <command>journalctl --no-hostname --boot --unit conserver-server.service</command> </leafNode> <node name="ids"> <properties> <help>Show log for for Intrusion Detection System</help> </properties> <children> <leafNode name="ddos-protection"> <properties> <help>Show log for DDOS protection</help> </properties> <command>journalctl --no-hostname --boot --unit fastnetmon.service</command> </leafNode> </children> </node> <node name="dhcp"> <properties> <help>Show log for Dynamic Host Control Protocol (DHCP)</help> </properties> <children> <node name="server"> <properties> <help>Show log for DHCP server</help> </properties> <command>journalctl --no-hostname --boot --unit kea-dhcp4-server.service</command> </node> <node name="client"> <properties> <help>Show DHCP client logs</help> </properties> <command>journalctl --no-hostname --boot --unit "dhclient@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show DHCP client log on specific interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces --broadcast</script> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit "dhclient@$6.service"</command> </tagNode> </children> </node> </children> </node> <node name="dhcpv6"> <properties> <help>Show log for Dynamic Host Control Protocol IPv6 (DHCPv6)</help> </properties> <children> <node name="server"> <properties> <help>Show log for DHCPv6 server</help> </properties> <command>journalctl --no-hostname --boot --unit kea-dhcp6-server.service</command> </node> <node name="client"> <properties> <help>Show DHCPv6 client logs</help> </properties> <command>journalctl --no-hostname --boot --unit "dhcp6c@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show DHCPv6 client log on specific interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit "dhcp6c@$6.service"</command> </tagNode> </children> </node> </children> </node> <node name="firewall"> <properties> <help>Show log for Firewall</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "(ipv[46]|bri)-(FWD|INP|OUT|NAM)"</command> <children> <node name="bridge"> <properties> <help>Show firewall bridge log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "bri-(FWD|INP|OUT|NAM)"</command> <children> <node name="forward"> <properties> <help>Show Bridge forward firewall log</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-FWD</command> <children> <node name="filter"> <properties> <help>Show Bridge firewall forward filter</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-FWD-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall bridge forward filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[bri-FWD-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <node name="input"> <properties> <help>Show Bridge input firewall log</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-INP</command> <children> <node name="filter"> <properties> <help>Show Bridge firewall input filter</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-INP-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall bridge input filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[bri-INP-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <node name="output"> <properties> <help>Show Bridge output firewall log</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-OUT</command> <children> <node name="filter"> <properties> <help>Show Bridge firewall output filter</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-OUT-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall bridge output filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[bri-OUT-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <node name="prerouting"> <properties> <help>Show Bridge prerouting firewall log</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-PRE</command> <children> <node name="filter"> <properties> <help>Show Bridge firewall prerouting filter</help> </properties> <command>journalctl --no-hostname --boot -k | grep bri-PRE-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall bridge prerouting filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[bri-PRE-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <tagNode name="name"> <properties> <help>Show custom Bridge firewall log</help> <completionHelp> <path>firewall bridge name</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | grep bri-NAM-$6</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall bridge name ${COMP_WORDS[5]} rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[bri-NAM-$6-$8-[ADRJC]\]"</command> </tagNode> </children> </tagNode> </children> </node> <node name="ipv4"> <properties> <help>Show firewall IPv4 log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "ipv4-(FWD|INP|OUT|NAM)"</command> <children> <node name="forward"> <properties> <help>Show firewall IPv4 forward log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-FWD</command> <children> <node name="filter"> <properties> <help>Show firewall IPv4 forward filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-FWD-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv4 forward filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv4-FWD-filter-$8-[ADRJCO]\]"</command> </tagNode> </children> </node> </children> </node> <node name="input"> <properties> <help>Show firewall IPv4 input log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-INP</command> <children> <node name="filter"> <properties> <help>Show firewall IPv4 input filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-INP-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv4 input filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv4-INP-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <tagNode name="name"> <properties> <help>Show custom IPv4 firewall log</help> <completionHelp> <path>firewall ipv4 name</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-NAM-$6</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv4 name ${COMP_WORDS[5]} rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv4-NAM-$6-$8-[ADRJC]\]"</command> </tagNode> </children> </tagNode> <node name="output"> <properties> <help>Show firewall IPv4 output log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-OUT</command> <children> <node name="filter"> <properties> <help>Show firewall IPv4 output filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-OUT-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv4 output filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv4-OUT-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <node name="prerouting"> <properties> <help>Show firewall IPv4 prerouting log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-PRE</command> <children> <node name="raw"> <properties> <help>Show firewall IPv4 prerouting raw log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv4-PRE-raw</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv4 prerouting raw rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv4-PRE-raw-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> </children> </node> <node name="ipv6"> <properties> <help>Show firewall IPv6 log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "ipv6-(FWD|INP|OUT|NAM)"</command> <children> <node name="forward"> <properties> <help>Show firewall IPv6 forward log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-FWD</command> <children> <node name="filter"> <properties> <help>Show firewall IPv6 forward filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-FWD-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv6 forward filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv6-FWD-filter-$8-[ADRJCO]\]"</command> </tagNode> </children> </node> </children> </node> <node name="input"> <properties> <help>Show firewall IPv6 input log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-INP</command> <children> <node name="filter"> <properties> <help>Show firewall IPv6 input filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-INP-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv6 input filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv6-INP-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <tagNode name="name"> <properties> <help>Show custom IPv6 firewall log</help> <completionHelp> <path>firewall ipv6 name</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-NAM-$6</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv6 name ${COMP_WORDS[5]} rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv6-NAM-$6-$8-[ADRJC]\]"</command> </tagNode> </children> </tagNode> <node name="output"> <properties> <help>Show firewall IPv6 output log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-OUT</command> <children> <node name="filter"> <properties> <help>Show firewall IPv6 output filter log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-OUT-filter</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv6 output filter rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv6-OUT-filter-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> <node name="prerouting"> <properties> <help>Show firewall IPv6 prerouting log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-PRE</command> <children> <node name="raw"> <properties> <help>Show firewall IPv6 prerouting raw log</help> </properties> <command>journalctl --no-hostname --boot -k | grep ipv6-PRE-raw</command> <children> <tagNode name="rule"> <properties> <help>Show log for a rule in the specified firewall</help> <completionHelp> <path>firewall ipv6 prerouting raw rule</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[ipv6-PRE-raw-$8-[ADRJC]\]"</command> </tagNode> </children> </node> </children> </node> </children> </node> </children> </node> <leafNode name="flow-accounting"> <properties> <help>Show log for flow-accounting</help> </properties> <command>journalctl --no-hostname --boot --unit uacctd.service</command> </leafNode> <leafNode name="https"> <properties> <help>Show log for HTTPs</help> </properties> <command>journalctl --no-hostname --boot --unit nginx.service</command> </leafNode> <tagNode name="image"> <properties> <help>Show contents of master log file for image</help> <completionHelp> <script>compgen -f /lib/live/mount/persistence/boot/ | grep -v grub | sed -e s@/lib/live/mount/persistence/boot/@@</script> </completionHelp> </properties> <command>less $_vyatta_less_options --prompt=".log, page %dt of %D" -- /lib/live/mount/persistence/boot/$4/rw/var/log/messages</command> <children> <leafNode name="all"> <properties> <help>Show contents of all master log files for image</help> </properties> <command>eval $(lesspipe); less $_vyatta_less_options --prompt=".log?m, file %i of %m., page %dt of %D" -- `printf "%s\n" /lib/live/mount/persistence/boot/$4/rw/var/log/messages* | sort -nr`</command> </leafNode> <leafNode name="authorization"> <properties> <help>Show listing of authorization attempts for image</help> </properties> <command>less $_vyatta_less_options --prompt=".log, page %dt of %D" -- /lib/live/mount/persistence/boot/$4/rw/var/log/auth.log</command> </leafNode> <tagNode name="tail"> <properties> <help>Show last changes to messages</help> <completionHelp> <list><NUMBER></list> </completionHelp> </properties> <command>tail -n "$6" /lib/live/mount/persistence/boot/$4/rw/var/log/messages | ${VYATTA_PAGER:-cat}</command> </tagNode> </children> </tagNode> <leafNode name="ipoe-server"> <properties> <help>Show log for IPoE server</help> </properties> <command>journalctl --no-hostname --boot --unit accel-ppp@ipoe.service</command> </leafNode> <leafNode name="kernel"> <properties> <help>Show log for Linux Kernel</help> </properties> <command>journalctl --no-hostname --boot --dmesg</command> </leafNode> <leafNode name="lldp"> <properties> <help>Show log for Link Layer Discovery Protocol (LLDP)</help> </properties> <command>journalctl --no-hostname --boot --unit lldpd.service</command> </leafNode> <node name="nat"> <properties> <help>Show log for Network Address Translation (NAT)</help> </properties> <children> <node name="destination"> <properties> <help>Show NAT destination log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[DST-NAT-[0-9]+\]"</command> <children> <tagNode name="rule"> <properties> <help>Show NAT destination log for specified rule</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[DST-NAT-$6\]"</command> </tagNode> </children> </node> <node name="source"> <properties> <help>Show NAT source log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[SRC-NAT-[0-9]+(-MASQ)?\]"""</command> <children> <tagNode name="rule"> <properties> <help>Show NAT source log for specified rule</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[SRC-NAT-$6(-MASQ)?\]"</command> </tagNode> </children> </node> <node name="static"> <properties> <help>Show NAT static log</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[STATIC-(SRC|DST)-NAT-[0-9]+\]"</command> <children> <tagNode name="rule"> <properties> <help>Show NAT static log for specified rule</help> </properties> <command>journalctl --no-hostname --boot -k | egrep "\[STATIC-(SRC|DST)-NAT-$6\]"</command> </tagNode> </children> </node> </children> <command>journalctl --no-hostname --boot -k | egrep "\[(STATIC-)?(DST|SRC)-NAT-[0-9]+(-MASQ)?\]"</command> </node> <leafNode name="ndp-proxy"> <properties> <help>Show log for Neighbor Discovery Protocol (NDP) Proxy</help> </properties> <command>journalctl --no-hostname --boot --unit ndppd.service</command> </leafNode> <leafNode name="nhrp"> <properties> <help>Show log for Next Hop Resolution Protocol (NHRP)</help> </properties> <command>journalctl --no-hostname --boot --unit opennhrp.service</command> </leafNode> <leafNode name="ntp"> <properties> <help>Show log for Network Time Protocol (NTP)</help> </properties> <command>journalctl --no-hostname --boot --unit chrony.service</command> </leafNode> <node name="macsec"> <properties> <help>Show log for MACsec</help> </properties> <command>journalctl --no-hostname --boot --unit "wpa_supplicant-macsec@*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show MACsec log on specific interface</help> <completionHelp> <path>interfaces macsec</path> </completionHelp> </properties> <command>SRC=$(cli-shell-api returnValue interfaces macsec "$5" source-interface); journalctl --no-hostname --boot --unit "wpa_supplicant-macsec@$SRC.service"</command> </tagNode> </children> </node> <node name="openvpn"> <properties> <help>Show log for OpenVPN</help> </properties> <command>journalctl --no-hostname --boot --unit openvpn@*.service</command> <children> <tagNode name="interface"> <properties> <help>Show OpenVPN log on specific interface</help> <completionHelp> <path>interfaces openvpn</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit openvpn@$5.service</command> </tagNode> </children> </node> <node name="pppoe"> <properties> <help>Show log for PPPoE interface</help> </properties> <command>journalctl --no-hostname --boot --unit "ppp@pppoe*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show PPPoE log on specific interface</help> <completionHelp> <path>interfaces pppoe</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit "ppp@$5.service"</command> </tagNode> </children> </node> <leafNode name="pppoe-server"> <properties> <help>Show log for PPPoE server</help> </properties> <command>journalctl --no-hostname --boot --unit accel-ppp@pppoe.service</command> </leafNode> <node name="protocol"> <properties> <help>Show log for Routing Protocol</help> </properties> <children> <leafNode name="ospf"> <properties> <help>Show log for OSPF</help> </properties> <command>journalctl --boot /usr/lib/frr/ospfd</command> </leafNode> <leafNode name="ospfv3"> <properties> <help>Show log for OSPF for IPv6</help> </properties> <command>journalctl --boot /usr/lib/frr/ospf6d</command> </leafNode> <leafNode name="bgp"> <properties> <help>Show log for BGP</help> </properties> <command>journalctl --boot /usr/lib/frr/bgpd</command> </leafNode> <leafNode name="rip"> <properties> <help>Show log for RIP</help> </properties> <command>journalctl --boot /usr/lib/frr/ripd</command> </leafNode> <leafNode name="ripng"> <properties> <help>Show log for RIPng</help> </properties> <command>journalctl --boot /usr/lib/frr/ripngd</command> </leafNode> <leafNode name="static"> <properties> <help>Show log for static route</help> </properties> <command>journalctl --boot /usr/lib/frr/staticd</command> </leafNode> <leafNode name="multicast"> <properties> <help>Show log for Multicast protocol</help> </properties> <command>journalctl --boot /usr/lib/frr/pimd</command> </leafNode> <leafNode name="isis"> <properties> <help>Show log for ISIS</help> </properties> <command>journalctl --boot /usr/lib/frr/isisd</command> </leafNode> <leafNode name="openfabric"> <properties> <help>Show log for OpenFabric</help> </properties> <command>journalctl --boot /usr/lib/frr/fabricd</command> </leafNode> <leafNode name="nhrp"> <properties> <help>Show log for NHRP</help> </properties> <command>journalctl --boot /usr/lib/frr/nhrpd</command> </leafNode> <leafNode name="bfd"> <properties> <help>Show log for BFD</help> </properties> <command>journalctl --boot /usr/lib/frr/bfdd</command> </leafNode> <leafNode name="mpls"> <properties> <help>Show log for MPLS</help> </properties> <command>journalctl --boot /usr/lib/frr/ldpd</command> </leafNode> </children> </node> <leafNode name="router-advert"> <properties> <help>Show log for Router Advertisement Daemon (radvd)</help> </properties> <command>journalctl --no-hostname --boot --unit radvd.service</command> </leafNode> <leafNode name="snmp"> <properties> <help>Show log for Simple Network Monitoring Protocol (SNMP)</help> </properties> <command>journalctl --no-hostname --boot --unit snmpd.service</command> </leafNode> <node name="ssh"> <properties> <help>Show log for Secure Shell (SSH)</help> </properties> <command>journalctl --no-hostname --boot --unit ssh.service</command> <children> <node name="dynamic-protection"> <properties> <help>Show SSH guard log</help> </properties> <command>journalctl --no-hostname --boot --unit sshguard.service</command> </node> </children> </node> <tagNode name="tail"> <properties> <help>Show last n changes to messages</help> <completionHelp> <list><NUMBER></list> </completionHelp> </properties> <command>tail -n "$4" /var/log/messages | ${VYATTA_PAGER:-cat}</command> </tagNode> <node name="tail"> <properties> <help>Show last 10 lines of /var/log/messages file</help> </properties> <command>tail -n 10 /var/log/messages</command> </node> <leafNode name="vpn"> <properties> - <help>Monitor last lines of ALL Virtual Private Network services</help> + <help>Show log for ALL Virtual Private Network services</help> </properties> <command>journalctl --no-hostname --boot --unit strongswan.service --unit accel-ppp@*.service --unit ocserv.service</command> </leafNode> <leafNode name="ipsec"> <properties> <help>Show log for IPsec</help> </properties> <command>journalctl --no-hostname --boot --unit strongswan.service</command> </leafNode> <leafNode name="l2tp"> <properties> <help>Show log for L2TP</help> </properties> <command>journalctl --no-hostname --boot --unit accel-ppp@l2tp.service</command> </leafNode> <leafNode name="openconnect"> <properties> <help>Show log for OpenConnect</help> </properties> <command>journalctl --no-hostname --boot --unit ocserv.service</command> </leafNode> <leafNode name="pptp"> <properties> <help>Show log for PPTP</help> </properties> <command>journalctl --no-hostname --boot --unit accel-ppp@pptp.service</command> </leafNode> <leafNode name="sstp"> <properties> <help>Show log for Secure Socket Tunneling Protocol (SSTP) server</help> </properties> <command>journalctl --no-hostname --boot --unit accel-ppp@sstp.service</command> </leafNode> <node name="sstpc"> <properties> <help>Show log for Secure Socket Tunneling Protocol (SSTP) client</help> </properties> <command>journalctl --no-hostname --boot --unit "ppp@sstpc*.service"</command> <children> <tagNode name="interface"> <properties> <help>Show SSTP client log on specific interface</help> <completionHelp> <path>interfaces sstpc</path> </completionHelp> </properties> <command>journalctl --no-hostname --boot --unit "ppp@$5.service"</command> </tagNode> </children> </node> <leafNode name="vrrp"> <properties> <help>Show log for Virtual Router Redundancy Protocol (VRRP)</help> </properties> <command>journalctl --no-hostname --boot --unit keepalived.service</command> </leafNode> + <leafNode name="vyos-configd"> + <properties> + <help>Show log for VyOS configuration daemon</help> + </properties> + <command>journalctl --no-hostname --boot --unit vyos-configd.service</command> + </leafNode> <node name="wireless"> <properties> <help>Show log for Wireless interface</help> </properties> <children> <node name="wpa-supplicant"> <properties> <help>Show log for WPA supplicant</help> </properties> <command>if cli-shell-api existsActive interfaces wireless; then journalctl --no-hostname --boot --unit "wpa_supplicant@*.service"; else echo "No wireless interface configured!"; fi</command> <children> <tagNode name="interface"> <properties> <help>Show log for specific wireless interface supplicant</help> <completionHelp> <path>interfaces wireless</path> </completionHelp> </properties> <command>if [[ $(cli-shell-api returnActiveValue interfaces wireless $6 type) == "station" ]]; then journalctl --no-hostname --boot --unit "wpa_supplicant@$6.service"; else echo "Wireless interface $6 not configured as station!"; fi</command> </tagNode> </children> </node> <node name="hostapd"> <properties> <help>Show log for host access point daemon</help> </properties> <command>if cli-shell-api existsActive interfaces wireless; then journalctl --no-hostname --boot --unit "hostapd@*.service"; else echo "No wireless interface configured!"; fi</command> <children> <tagNode name="interface"> <properties> <help>Show log for specific host access point daemon interface</help> <completionHelp> <path>interfaces wireless</path> </completionHelp> </properties> <command>if [[ $(cli-shell-api returnActiveValue interfaces wireless $6 type) == "access-point" ]]; then journalctl --no-hostname --boot --unit "hostapd@$6.service"; else echo "Wireless interface $6 not configured as access-point!"; fi</command> </tagNode> </children> </node> </children> </node> <leafNode name="webproxy"> <properties> <help>Show log for Webproxy</help> </properties> <command>journalctl --no-hostname --boot --unit squid.service</command> </leafNode> </children> </node> </children> </node> </interfaceDefinition> diff --git a/python/vyos/configdict.py b/python/vyos/configdict.py index 5a353b110..cbcbf9f72 100644 --- a/python/vyos/configdict.py +++ b/python/vyos/configdict.py @@ -1,666 +1,1161 @@ # Copyright 2019-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. """ A library for retrieving value dicts from VyOS configs in a declarative fashion. """ import os import json +from vyos.defaults import frr_debug_enable from vyos.utils.dict import dict_search from vyos.utils.process import cmd def retrieve_config(path_hash, base_path, config): """ Retrieves a VyOS config as a dict according to a declarative description The description dict, passed in the first argument, must follow this format: ``field_name : <path, type, [inner_options_dict]>``. Supported types are: ``str`` (for normal nodes), ``list`` (returns a list of strings, for multi nodes), ``bool`` (returns True if valueless node exists), ``dict`` (for tag nodes, returns a dict indexed by node names, according to description in the third item of the tuple). Args: path_hash (dict): Declarative description of the config to retrieve base_path (list): A base path to prepend to all option paths config (vyos.config.Config): A VyOS config object Returns: dict: config dict """ config_hash = {} for k in path_hash: if type(path_hash[k]) != tuple: raise ValueError("In field {0}: expected a tuple, got a value {1}".format(k, str(path_hash[k]))) if len(path_hash[k]) < 2: raise ValueError("In field {0}: field description must be a tuple of at least two items, path (list) and type".format(k)) path = path_hash[k][0] if type(path) != list: raise ValueError("In field {0}: path must be a list, not a {1}".format(k, type(path))) typ = path_hash[k][1] if type(typ) != type: raise ValueError("In field {0}: type must be a type, not a {1}".format(k, type(typ))) path = base_path + path path_str = " ".join(path) if typ == str: config_hash[k] = config.return_value(path_str) elif typ == list: config_hash[k] = config.return_values(path_str) elif typ == bool: config_hash[k] = config.exists(path_str) elif typ == dict: try: inner_hash = path_hash[k][2] except IndexError: raise ValueError("The type of the \'{0}\' field is dict, but inner options hash is missing from the tuple".format(k)) config_hash[k] = {} nodes = config.list_nodes(path_str) for node in nodes: config_hash[k][node] = retrieve_config(inner_hash, path + [node], config) return config_hash def dict_merge(source, destination): """ Merge two dictionaries. Only keys which are not present in destination will be copied from source, anything else will be kept untouched. Function will return a new dict which has the merged key/value pairs. """ from copy import deepcopy tmp = deepcopy(destination) for key, value in source.items(): if key not in tmp: tmp[key] = value elif isinstance(source[key], dict): tmp[key] = dict_merge(source[key], tmp[key]) return tmp def list_diff(first, second): """ Diff two dictionaries and return only unique items """ second = set(second) return [item for item in first if item not in second] def is_node_changed(conf, path): """ Check if any key under path has been changed and return True. If nothing changed, return false """ from vyos.configdiff import get_config_diff D = get_config_diff(conf, key_mangling=('-', '_')) return D.is_node_changed(path) def leaf_node_changed(conf, path): """ Check if a leaf node was altered. If it has been altered - values has been changed, or it was added/removed, we will return a list containing the old value(s). If nothing has been changed, None is returned. NOTE: path must use the real CLI node name (e.g. with a hyphen!) """ from vyos.configdiff import get_config_diff D = get_config_diff(conf, key_mangling=('-', '_')) (new, old) = D.get_value_diff(path) if new != old: if isinstance(old, dict): # valueLess nodes return {} if node is deleted return True if old is None and isinstance(new, dict): # valueLess nodes return {} if node was added return True if old is None: return [] if isinstance(old, str): return [old] if isinstance(old, list): if isinstance(new, str): new = [new] elif isinstance(new, type(None)): new = [] return list_diff(old, new) return None def node_changed(conf, path, key_mangling=None, recursive=False, expand_nodes=None) -> list: """ Check if node under path (or anything under path if recursive=True) was changed. By default we only check if a node or subnode (recursive) was deleted from path. If expand_nodes is set to Diff.ADD we can also check if something was added to the path. If nothing changed, an empty list is returned. """ from vyos.configdiff import get_config_diff from vyos.configdiff import Diff # to prevent circular dependencies we assign the default here if not expand_nodes: expand_nodes = Diff.DELETE D = get_config_diff(conf, key_mangling) # get_child_nodes_diff() will return dict_keys() tmp = D.get_child_nodes_diff(path, expand_nodes=expand_nodes, recursive=recursive) output = [] if expand_nodes & Diff.DELETE: output.extend(list(tmp['delete'].keys())) if expand_nodes & Diff.ADD: output.extend(list(tmp['add'].keys())) # remove duplicate keys from list, this happens when a node (e.g. description) is altered output = list(dict.fromkeys(output)) return output def get_removed_vlans(conf, path, dict): """ Common function to parse a dictionary retrieved via get_config_dict() and determine any added/removed VLAN interfaces - be it 802.1q or Q-in-Q. """ from vyos.configdiff import get_config_diff, Diff # Check vif, vif-s/vif-c VLAN interfaces for removal D = get_config_diff(conf, key_mangling=('-', '_')) D.set_level(conf.get_level()) # get_child_nodes() will return dict_keys(), mangle this into a list with PEP448 keys = D.get_child_nodes_diff(path + ['vif'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_remove'] = [*keys] # get_child_nodes() will return dict_keys(), mangle this into a list with PEP448 keys = D.get_child_nodes_diff(path + ['vif-s'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_s_remove'] = [*keys] for vif in dict.get('vif_s', {}).keys(): keys = D.get_child_nodes_diff(path + ['vif-s', vif, 'vif-c'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_s'][vif]['vif_c_remove'] = [*keys] return dict def is_member(conf, interface, intftype=None): """ Checks if passed interface is member of other interface of specified type. intftype is optional, if not passed it will search all known types (currently bridge and bonding) Returns: dict empty -> Interface is not a member key -> Interface is a member of this interface """ ret_val = {} intftypes = ['bonding', 'bridge'] if intftype not in intftypes + [None]: raise ValueError(( f'unknown interface type "{intftype}" or it cannot ' f'have member interfaces')) intftype = intftypes if intftype == None else [intftype] for iftype in intftype: base = ['interfaces', iftype] for intf in conf.list_nodes(base): member = base + [intf, 'member', 'interface', interface] if conf.exists(member): tmp = conf.get_config_dict(member, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) ret_val.update({intf : tmp}) return ret_val def is_mirror_intf(conf, interface, direction=None): """ Check whether the passed interface is used for port mirroring. Direction is optional, if not passed it will search all known direction (currently ingress and egress) Returns: None -> Interface is not a monitor interface Array() -> This interface is a monitor interface of interfaces """ from vyos.ifconfig import Section directions = ['ingress', 'egress'] if direction not in directions + [None]: raise ValueError(f'Unknown interface mirror direction "{direction}"') direction = directions if direction == None else [direction] ret_val = None base = ['interfaces'] for dir in direction: for iftype in conf.list_nodes(base): iftype_base = base + [iftype] for intf in conf.list_nodes(iftype_base): mirror = iftype_base + [intf, 'mirror', dir, interface] if conf.exists(mirror): path = ['interfaces', Section.section(intf), intf] tmp = conf.get_config_dict(path, key_mangling=('-', '_'), get_first_key=True) ret_val = {intf : tmp} return ret_val def has_address_configured(conf, intf): """ Checks if interface has an address configured. Checks the following config nodes: 'address', 'ipv6 address eui64', 'ipv6 address autoconf' Returns True if interface has address configured, False if it doesn't. """ from vyos.ifconfig import Section ret = False old_level = conf.get_level() conf.set_level([]) intfpath = ['interfaces', Section.get_config_path(intf)] if (conf.exists([intfpath, 'address']) or conf.exists([intfpath, 'ipv6', 'address', 'autoconf']) or conf.exists([intfpath, 'ipv6', 'address', 'eui64'])): ret = True conf.set_level(old_level) return ret def has_vrf_configured(conf, intf): """ Checks if interface has a VRF configured. Returns True if interface has VRF configured, False if it doesn't. """ from vyos.ifconfig import Section ret = False old_level = conf.get_level() conf.set_level([]) if conf.exists(['interfaces', Section.get_config_path(intf), 'vrf']): ret = True conf.set_level(old_level) return ret def has_vlan_subinterface_configured(conf, intf): """ Checks if interface has an VLAN subinterface configured. Checks the following config nodes: 'vif', 'vif-s' Return True if interface has VLAN subinterface configured. """ from vyos.ifconfig import Section ret = False intfpath = ['interfaces', Section.section(intf), intf] if (conf.exists(intfpath + ['vif']) or conf.exists(intfpath + ['vif-s'])): ret = True return ret def is_source_interface(conf, interface, intftype=None): """ Checks if passed interface is configured as source-interface of other interfaces of specified type. intftype is optional, if not passed it will search all known types (currently pppoe, macsec, pseudo-ethernet, tunnel and vxlan) Returns: None -> Interface is not a member interface name -> Interface is a member of this interface False -> interface type cannot have members """ ret_val = None intftypes = ['macsec', 'pppoe', 'pseudo-ethernet', 'tunnel', 'vxlan'] if not intftype: intftype = intftypes if isinstance(intftype, str): intftype = [intftype] elif not isinstance(intftype, list): raise ValueError(f'Interface type "{type(intftype)}" must be either str or list!') if not all(x in intftypes for x in intftype): raise ValueError(f'unknown interface type "{intftype}" or it can not ' 'have a source-interface') for it in intftype: base = ['interfaces', it] for intf in conf.list_nodes(base): src_intf = base + [intf, 'source-interface'] if conf.exists(src_intf) and interface in conf.return_values(src_intf): ret_val = intf break return ret_val def get_dhcp_interfaces(conf, vrf=None): """ Common helper functions to retrieve all interfaces from current CLI sessions that have DHCP configured. """ dhcp_interfaces = {} dict = conf.get_config_dict(['interfaces'], get_first_key=True) if not dict: return dhcp_interfaces def check_dhcp(config): ifname = config['ifname'] tmp = {} if 'address' in config and 'dhcp' in config['address']: options = {} if dict_search('dhcp_options.default_route_distance', config) != None: options.update({'dhcp_options' : config['dhcp_options']}) if 'vrf' in config: if vrf == config['vrf']: tmp.update({ifname : options}) else: if vrf is None: tmp.update({ifname : options}) return tmp for section, interface in dict.items(): for ifname in interface: # always reset config level, as get_interface_dict() will alter it conf.set_level([]) # we already have a dict representation of the config from get_config_dict(), # but with the extended information from get_interface_dict() we also # get the DHCP client default-route-distance default option if not specified. _, ifconfig = get_interface_dict(conf, ['interfaces', section], ifname) tmp = check_dhcp(ifconfig) dhcp_interfaces.update(tmp) # check per VLAN interfaces for vif, vif_config in ifconfig.get('vif', {}).items(): tmp = check_dhcp(vif_config) dhcp_interfaces.update(tmp) # check QinQ VLAN interfaces for vif_s, vif_s_config in ifconfig.get('vif_s', {}).items(): tmp = check_dhcp(vif_s_config) dhcp_interfaces.update(tmp) for vif_c, vif_c_config in vif_s_config.get('vif_c', {}).items(): tmp = check_dhcp(vif_c_config) dhcp_interfaces.update(tmp) return dhcp_interfaces def get_pppoe_interfaces(conf, vrf=None): """ Common helper functions to retrieve all interfaces from current CLI sessions that have DHCP configured. """ pppoe_interfaces = {} conf.set_level([]) for ifname in conf.list_nodes(['interfaces', 'pppoe']): # always reset config level, as get_interface_dict() will alter it conf.set_level([]) # we already have a dict representation of the config from get_config_dict(), # but with the extended information from get_interface_dict() we also # get the DHCP client default-route-distance default option if not specified. _, ifconfig = get_interface_dict(conf, ['interfaces', 'pppoe'], ifname) options = {} if 'default_route_distance' in ifconfig: options.update({'default_route_distance' : ifconfig['default_route_distance']}) if 'no_default_route' in ifconfig: options.update({'no_default_route' : {}}) if 'vrf' in ifconfig: if vrf == ifconfig['vrf']: pppoe_interfaces.update({ifname : options}) else: if vrf is None: pppoe_interfaces.update({ifname : options}) return pppoe_interfaces def get_interface_dict(config, base, ifname='', recursive_defaults=True, with_pki=False): """ Common utility function to retrieve and mangle the interfaces configuration from the CLI input nodes. All interfaces have a common base where value retrival is identical. This function must be used whenever possible when working on the interfaces node! Return a dictionary with the necessary interface config keys. """ if not ifname: from vyos import ConfigError # determine tagNode instance if 'VYOS_TAGNODE_VALUE' not in os.environ: raise ConfigError('Interface (VYOS_TAGNODE_VALUE) not specified') ifname = os.environ['VYOS_TAGNODE_VALUE'] # Check if interface has been removed. We must use exists() as # get_config_dict() will always return {} - even when an empty interface # node like the following exists. # +macsec macsec1 { # +} if not config.exists(base + [ifname]): dict = config.get_config_dict(base + [ifname], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) dict.update({'deleted' : {}}) else: # Get config_dict with default values dict = config.get_config_dict(base + [ifname], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=recursive_defaults, with_pki=with_pki) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict or 'dhcp' not in dict['address']: if 'dhcp_options' in dict: del dict['dhcp_options'] # Add interface instance name into dictionary dict.update({'ifname': ifname}) # Check if QoS policy applied on this interface - See ifconfig.interface.set_mirror_redirect() if config.exists(['qos', 'interface', ifname]): dict.update({'traffic_policy': {}}) address = leaf_node_changed(config, base + [ifname, 'address']) if address: dict.update({'address_old' : address}) # Check if we are a member of a bridge device bridge = is_member(config, ifname, 'bridge') if bridge: dict.update({'is_bridge_member' : bridge}) # Check if it is a monitor interface mirror = is_mirror_intf(config, ifname) if mirror: dict.update({'is_mirror_intf' : mirror}) # Check if we are a member of a bond device bond = is_member(config, ifname, 'bonding') if bond: dict.update({'is_bond_member' : bond}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'dhcp-options']) if dhcp: dict.update({'dhcp_options_changed' : {}}) # Changine interface VRF assignemnts require a DHCP restart, too dhcp = is_node_changed(config, base + [ifname, 'vrf']) if dhcp: dict.update({'dhcp_options_changed' : {}}) # Some interfaces come with a source_interface which must also not be part # of any other bond or bridge interface as it is exclusivly assigned as the # Kernels "lower" interface to this new "virtual/upper" interface. if 'source_interface' in dict: # Check if source interface is member of another bridge tmp = is_member(config, dict['source_interface'], 'bridge') if tmp: dict.update({'source_interface_is_bridge_member' : tmp}) # Check if source interface is member of another bridge tmp = is_member(config, dict['source_interface'], 'bonding') if tmp: dict.update({'source_interface_is_bond_member' : tmp}) mac = leaf_node_changed(config, base + [ifname, 'mac']) if mac: dict.update({'mac_old' : mac}) eui64 = leaf_node_changed(config, base + [ifname, 'ipv6', 'address', 'eui64']) if eui64: tmp = dict_search('ipv6.address', dict) if not tmp: dict.update({'ipv6': {'address': {'eui64_old': eui64}}}) else: dict['ipv6']['address'].update({'eui64_old': eui64}) for vif, vif_config in dict.get('vif', {}).items(): # Add subinterface name to dictionary dict['vif'][vif].update({'ifname' : f'{ifname}.{vif}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif}']): dict['vif'][vif].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif', vif, 'address']) if address: dict['vif'][vif].update({'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif'][vif] or 'dhcp' not in dict['vif'][vif]['address']: if 'dhcp_options' in dict['vif'][vif]: del dict['vif'][vif]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif}', 'bridge') if bridge: dict['vif'][vif].update({'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif', vif, 'dhcp-options']) if dhcp: dict['vif'][vif].update({'dhcp_options_changed' : {}}) for vif_s, vif_s_config in dict.get('vif_s', {}).items(): # Add subinterface name to dictionary dict['vif_s'][vif_s].update({'ifname' : f'{ifname}.{vif_s}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif_s}']): dict['vif_s'][vif_s].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif-s', vif_s, 'address']) if address: dict['vif_s'][vif_s].update({'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif_s'][vif_s] or 'dhcp' not in \ dict['vif_s'][vif_s]['address']: if 'dhcp_options' in dict['vif_s'][vif_s]: del dict['vif_s'][vif_s]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif_s}', 'bridge') if bridge: dict['vif_s'][vif_s].update({'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif-s', vif_s, 'dhcp-options']) if dhcp: dict['vif_s'][vif_s].update({'dhcp_options_changed' : {}}) for vif_c, vif_c_config in vif_s_config.get('vif_c', {}).items(): # Add subinterface name to dictionary dict['vif_s'][vif_s]['vif_c'][vif_c].update({'ifname' : f'{ifname}.{vif_s}.{vif_c}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif_s}.{vif_c}']): dict['vif_s'][vif_s]['vif_c'][vif_c].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif-s', vif_s, 'vif-c', vif_c, 'address']) if address: dict['vif_s'][vif_s]['vif_c'][vif_c].update( {'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif_s'][vif_s]['vif_c'][vif_c] or 'dhcp' \ not in dict['vif_s'][vif_s]['vif_c'][vif_c]['address']: if 'dhcp_options' in dict['vif_s'][vif_s]['vif_c'][vif_c]: del dict['vif_s'][vif_s]['vif_c'][vif_c]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif_s}.{vif_c}', 'bridge') if bridge: dict['vif_s'][vif_s]['vif_c'][vif_c].update( {'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif-s', vif_s, 'vif-c', vif_c, 'dhcp-options']) if dhcp: dict['vif_s'][vif_s]['vif_c'][vif_c].update({'dhcp_options_changed' : {}}) # Check vif, vif-s/vif-c VLAN interfaces for removal dict = get_removed_vlans(config, base + [ifname], dict) return ifname, dict def get_vlan_ids(interface): """ Get the VLAN ID of the interface bound to the bridge """ vlan_ids = set() bridge_status = cmd('bridge -j vlan show', shell=True) vlan_filter_status = json.loads(bridge_status) if vlan_filter_status is not None: for interface_status in vlan_filter_status: ifname = interface_status['ifname'] if interface == ifname: vlans_status = interface_status['vlans'] for vlan_status in vlans_status: vlan_id = vlan_status['vlan'] vlan_ids.add(vlan_id) return vlan_ids def get_accel_dict(config, base, chap_secrets, with_pki=False): """ Common utility function to retrieve and mangle the Accel-PPP configuration from different CLI input nodes. All Accel-PPP services have a common base where value retrival is identical. This function must be used whenever possible when working with Accel-PPP services! Return a dictionary with the necessary interface config keys. """ from vyos.utils.cpu import get_core_count from vyos.template import is_ipv4 dict = config.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_recursive_defaults=True, with_pki=with_pki) # set CPUs cores to process requests dict.update({'thread_count' : get_core_count()}) # we need to store the path to the secrets file dict.update({'chap_secrets_file' : chap_secrets}) # We can only have two IPv4 and three IPv6 nameservers - also they are # configured in a different way in the configuration, this is why we split # the configuration if 'name_server' in dict: ns_v4 = [] ns_v6 = [] for ns in dict['name_server']: if is_ipv4(ns): ns_v4.append(ns) else: ns_v6.append(ns) dict.update({'name_server_ipv4' : ns_v4, 'name_server_ipv6' : ns_v6}) del dict['name_server'] # Check option "disable-accounting" per server and replace default value from '1813' to '0' for server in (dict_search('authentication.radius.server', dict) or []): if 'disable_accounting' in dict['authentication']['radius']['server'][server]: dict['authentication']['radius']['server'][server]['acct_port'] = '0' return dict + +def get_frrender_dict(conf, argv=None) -> dict: + from copy import deepcopy + from vyos.config import config_dict_merge + from vyos.frrender import frr_protocols + + # Create an empty dictionary which will be filled down the code path and + # returned to the caller + dict = {} + + if argv and len(argv) > 1: + dict['vrf_context'] = argv[1] + + def dict_helper_ospf_defaults(ospf, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: default-information + # originate comes with a default metric-type of 2, which will enable the + # entire default-information originate tree, even when not set via CLI so we + # need to check this first and probably drop that key. + if dict_search('default_information.originate', ospf) is None: + del default_values['default_information'] + if 'mpls_te' not in ospf: + del default_values['mpls_te'] + if 'graceful_restart' not in ospf: + del default_values['graceful_restart'] + for area_num in default_values.get('area', []): + if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: + del default_values['area'][area_num]['area_type']['nssa'] + + for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: + if dict_search(f'redistribute.{protocol}', ospf) is None: + del default_values['redistribute'][protocol] + if not bool(default_values['redistribute']): + del default_values['redistribute'] + + for interface in ospf.get('interface', []): + # We need to reload the defaults on every pass b/c of + # hello-multiplier dependency on dead-interval + # If hello-multiplier is set, we need to remove the default from + # dead-interval. + if 'hello_multiplier' in ospf['interface'][interface]: + del default_values['interface'][interface]['dead_interval'] + + ospf = config_dict_merge(default_values, ospf) + return ospf + + def dict_helper_ospfv3_defaults(ospfv3, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: default-information + # originate comes with a default metric-type of 2, which will enable the + # entire default-information originate tree, even when not set via CLI so we + # need to check this first and probably drop that key. + if dict_search('default_information.originate', ospfv3) is None: + del default_values['default_information'] + if 'graceful_restart' not in ospfv3: + del default_values['graceful_restart'] + + for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: + if dict_search(f'redistribute.{protocol}', ospfv3) is None: + del default_values['redistribute'][protocol] + if not bool(default_values['redistribute']): + del default_values['redistribute'] + + default_values.pop('interface', {}) + + # merge in remaining default values + ospfv3 = config_dict_merge(default_values, ospfv3) + return ospfv3 + + def dict_helper_pim_defaults(pim, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. + for interface in pim.get('interface', []): + if 'igmp' not in pim['interface'][interface]: + del default_values['interface'][interface]['igmp'] + + pim = config_dict_merge(default_values, pim) + return pim + + # Ethernet and bonding interfaces can participate in EVPN which is configured via FRR + tmp = {} + for if_type in ['ethernet', 'bonding']: + interface_path = ['interfaces', if_type] + if not conf.exists(interface_path): + continue + for interface in conf.list_nodes(interface_path): + evpn_path = interface_path + [interface, 'evpn'] + if not conf.exists(evpn_path): + continue + + evpn = conf.get_config_dict(evpn_path, key_mangling=('-', '_')) + tmp.update({interface : evpn}) + # At least one participating EVPN interface found, add to result dict + if tmp: dict['interfaces'] = tmp + + # Zebra prefix exchange for Kernel IP/IPv6 and routing protocols + for ip_version in ['ip', 'ipv6']: + ip_cli_path = ['system', ip_version] + ip_dict = conf.get_config_dict(ip_cli_path, key_mangling=('-', '_'), + get_first_key=True, with_recursive_defaults=True) + if ip_dict: + ip_dict['afi'] = ip_version + dict.update({ip_version : ip_dict}) + + # Enable SNMP agentx support + # SNMP AgentX support cannot be disabled once enabled + if conf.exists(['service', 'snmp']): + dict['snmp'] = {} + + # We will always need the policy key + dict['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # We need to check the CLI if the BABEL node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + babel_cli_path = ['protocols', 'babel'] + if conf.exists(babel_cli_path): + babel = conf.get_config_dict(babel_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'babel' : babel}) + + # We need to check the CLI if the BFD node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + bfd_cli_path = ['protocols', 'bfd'] + if conf.exists(bfd_cli_path): + bfd = conf.get_config_dict(bfd_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'bfd' : bfd}) + + # We need to check the CLI if the BGP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + bgp_cli_path = ['protocols', 'bgp'] + if conf.exists(bgp_cli_path): + bgp = conf.get_config_dict(bgp_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + bgp['dependent_vrfs'] = {} + dict.update({'bgp' : bgp}) + elif conf.exists_effective(bgp_cli_path): + dict.update({'bgp' : {'deleted' : '', 'dependent_vrfs' : {}}}) + + # We need to check the CLI if the EIGRP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + eigrp_cli_path = ['protocols', 'eigrp'] + if conf.exists(eigrp_cli_path): + isis = conf.get_config_dict(eigrp_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'eigrp' : isis}) + elif conf.exists_effective(eigrp_cli_path): + dict.update({'eigrp' : {'deleted' : ''}}) + + # We need to check the CLI if the ISIS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + isis_cli_path = ['protocols', 'isis'] + if conf.exists(isis_cli_path): + isis = conf.get_config_dict(isis_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'isis' : isis}) + elif conf.exists_effective(isis_cli_path): + dict.update({'isis' : {'deleted' : ''}}) + + # We need to check the CLI if the MPLS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + mpls_cli_path = ['protocols', 'mpls'] + if conf.exists(mpls_cli_path): + mpls = conf.get_config_dict(mpls_cli_path, key_mangling=('-', '_'), + get_first_key=True) + dict.update({'mpls' : mpls}) + elif conf.exists_effective(mpls_cli_path): + dict.update({'mpls' : {'deleted' : ''}}) + + # We need to check the CLI if the OPENFABRIC node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + openfabric_cli_path = ['protocols', 'openfabric'] + if conf.exists(openfabric_cli_path): + openfabric = conf.get_config_dict(openfabric_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + dict.update({'openfabric' : openfabric}) + elif conf.exists_effective(openfabric_cli_path): + dict.update({'openfabric' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPF node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospf_cli_path = ['protocols', 'ospf'] + if conf.exists(ospf_cli_path): + ospf = conf.get_config_dict(ospf_cli_path, key_mangling=('-', '_'), + get_first_key=True) + ospf = dict_helper_ospf_defaults(ospf, ospf_cli_path) + dict.update({'ospf' : ospf}) + elif conf.exists_effective(ospf_cli_path): + dict.update({'ospf' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPFv3 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospfv3_cli_path = ['protocols', 'ospfv3'] + if conf.exists(ospfv3_cli_path): + ospfv3 = conf.get_config_dict(ospfv3_cli_path, key_mangling=('-', '_'), + get_first_key=True) + ospfv3 = dict_helper_ospfv3_defaults(ospfv3, ospfv3_cli_path) + dict.update({'ospfv3' : ospfv3}) + elif conf.exists_effective(ospfv3_cli_path): + dict.update({'ospfv3' : {'deleted' : ''}}) + + # We need to check the CLI if the PIM node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + pim_cli_path = ['protocols', 'pim'] + if conf.exists(pim_cli_path): + pim = conf.get_config_dict(pim_cli_path, key_mangling=('-', '_'), + get_first_key=True) + pim = dict_helper_pim_defaults(pim, pim_cli_path) + dict.update({'pim' : pim}) + elif conf.exists_effective(pim_cli_path): + dict.update({'pim' : {'deleted' : ''}}) + + # We need to check the CLI if the PIM6 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + pim6_cli_path = ['protocols', 'pim6'] + if conf.exists(pim6_cli_path): + pim6 = conf.get_config_dict(pim6_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'pim6' : pim6}) + elif conf.exists_effective(pim6_cli_path): + dict.update({'pim6' : {'deleted' : ''}}) + + # We need to check the CLI if the RIP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + rip_cli_path = ['protocols', 'rip'] + if conf.exists(rip_cli_path): + rip = conf.get_config_dict(rip_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'rip' : rip}) + elif conf.exists_effective(rip_cli_path): + dict.update({'rip' : {'deleted' : ''}}) + + # We need to check the CLI if the RIPng node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ripng_cli_path = ['protocols', 'ripng'] + if conf.exists(ripng_cli_path): + ripng = conf.get_config_dict(ripng_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'ripng' : ripng}) + elif conf.exists_effective(ripng_cli_path): + dict.update({'ripng' : {'deleted' : ''}}) + + # We need to check the CLI if the RPKI node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + rpki_cli_path = ['protocols', 'rpki'] + if conf.exists(rpki_cli_path): + rpki = conf.get_config_dict(rpki_cli_path, key_mangling=('-', '_'), + get_first_key=True, with_pki=True, + with_recursive_defaults=True) + rpki_ssh_key_base = '/run/frr/id_rpki' + for cache, cache_config in rpki.get('cache',{}).items(): + if 'ssh' in cache_config: + cache_config['ssh']['public_key_file'] = f'{rpki_ssh_key_base}_{cache}.pub' + cache_config['ssh']['private_key_file'] = f'{rpki_ssh_key_base}_{cache}' + dict.update({'rpki' : rpki}) + elif conf.exists_effective(rpki_cli_path): + dict.update({'rpki' : {'deleted' : ''}}) + + # We need to check the CLI if the Segment Routing node is present and thus load in + # all the default values present on the CLI - that's why we have if conf.exists() + sr_cli_path = ['protocols', 'segment-routing'] + if conf.exists(sr_cli_path): + sr = conf.get_config_dict(sr_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'segment_routing' : sr}) + elif conf.exists_effective(sr_cli_path): + dict.update({'segment_routing' : {'deleted' : ''}}) + + # We need to check the CLI if the static node is present and thus load in + # all the default values present on the CLI - that's why we have if conf.exists() + static_cli_path = ['protocols', 'static'] + if conf.exists(static_cli_path): + static = conf.get_config_dict(static_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # T3680 - get a list of all interfaces currently configured to use DHCP + tmp = get_dhcp_interfaces(conf) + if tmp: static.update({'dhcp' : tmp}) + tmp = get_pppoe_interfaces(conf) + if tmp: static.update({'pppoe' : tmp}) + + dict.update({'static' : static}) + elif conf.exists_effective(static_cli_path): + dict.update({'static' : {'deleted' : ''}}) + + # keep a re-usable list of dependent VRFs + dependent_vrfs_default = {} + if 'bgp' in dict: + dependent_vrfs_default = deepcopy(dict['bgp']) + # we do not need to nest the 'dependent_vrfs' key - simply remove it + if 'dependent_vrfs' in dependent_vrfs_default: + del dependent_vrfs_default['dependent_vrfs'] + + vrf_cli_path = ['vrf', 'name'] + if conf.exists(vrf_cli_path): + vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'), + get_first_key=False, + no_tag_node_value_mangle=True) + # We do not have any VRF related default values on the CLI. The defaults will only + # come into place under the protocols tree, thus we can safely merge them with the + # appropriate routing protocols + for vrf_name, vrf_config in vrf['name'].items(): + bgp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'bgp'] + if 'bgp' in vrf_config.get('protocols', []): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(bgp_vrf_path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # merge in remaining default values + vrf_config['protocols']['bgp'] = config_dict_merge(default_values, + vrf_config['protocols']['bgp']) + + # Add this BGP VRF instance as dependency into the default VRF + if 'bgp' in dict: + dict['bgp']['dependent_vrfs'].update({vrf_name : deepcopy(vrf_config)}) + + vrf_config['protocols']['bgp']['dependent_vrfs'] = conf.get_config_dict( + vrf_cli_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + # We can safely delete ourself from the dependent VRF list + if vrf_name in vrf_config['protocols']['bgp']['dependent_vrfs']: + del vrf_config['protocols']['bgp']['dependent_vrfs'][vrf_name] + + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + vrf_config['protocols']['bgp']['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + elif conf.exists_effective(bgp_vrf_path): + # Add this BGP VRF instance as dependency into the default VRF + tmp = {'deleted' : '', 'dependent_vrfs': deepcopy(vrf['name'])} + # We can safely delete ourself from the dependent VRF list + if vrf_name in tmp['dependent_vrfs']: + del tmp['dependent_vrfs'][vrf_name] + + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + tmp['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + + if 'bgp' in dict: + dict['bgp']['dependent_vrfs'].update({vrf_name : {'protocols': tmp} }) + + if 'protocols' not in vrf['name'][vrf_name]: + vrf['name'][vrf_name].update({'protocols': {'bgp' : tmp}}) + else: + vrf['name'][vrf_name]['protocols'].update({'bgp' : tmp}) + + # We need to check the CLI if the EIGRP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + eigrp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'eigrp'] + if 'eigrp' in vrf_config.get('protocols', []): + eigrp = conf.get_config_dict(eigrp_vrf_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + vrf['name'][vrf_name]['protocols'].update({'eigrp' : isis}) + elif conf.exists_effective(eigrp_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'eigrp' : {'deleted' : ''}}) + + # We need to check the CLI if the ISIS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + isis_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'isis'] + if 'isis' in vrf_config.get('protocols', []): + isis = conf.get_config_dict(isis_vrf_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True, with_recursive_defaults=True) + vrf['name'][vrf_name]['protocols'].update({'isis' : isis}) + elif conf.exists_effective(isis_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'isis' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPF node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospf_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospf'] + if 'ospf' in vrf_config.get('protocols', []): + ospf = conf.get_config_dict(ospf_vrf_path, key_mangling=('-', '_'), get_first_key=True) + ospf = dict_helper_ospf_defaults(vrf_config['protocols']['ospf'], ospf_vrf_path) + vrf['name'][vrf_name]['protocols'].update({'ospf' : ospf}) + elif conf.exists_effective(ospf_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'ospf' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPFv3 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospfv3_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospfv3'] + if 'ospfv3' in vrf_config.get('protocols', []): + ospfv3 = conf.get_config_dict(ospfv3_vrf_path, key_mangling=('-', '_'), get_first_key=True) + ospfv3 = dict_helper_ospfv3_defaults(vrf_config['protocols']['ospfv3'], ospfv3_vrf_path) + vrf['name'][vrf_name]['protocols'].update({'ospfv3' : ospfv3}) + elif conf.exists_effective(ospfv3_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'ospfv3' : {'deleted' : ''}}) + + # We need to check the CLI if the static node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + static_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'static'] + if 'static' in vrf_config.get('protocols', []): + static = conf.get_config_dict(static_vrf_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + # T3680 - get a list of all interfaces currently configured to use DHCP + tmp = get_dhcp_interfaces(conf, vrf_name) + if tmp: static.update({'dhcp' : tmp}) + tmp = get_pppoe_interfaces(conf, vrf_name) + if tmp: static.update({'pppoe' : tmp}) + + vrf['name'][vrf_name]['protocols'].update({'static': static}) + elif conf.exists_effective(static_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'static': {'deleted' : ''}}) + + vrf_vni_path = ['vrf', 'name', vrf_name, 'vni'] + if conf.exists(vrf_vni_path): + vrf_config.update({'vni': conf.return_value(vrf_vni_path)}) + + dict.update({'vrf' : vrf}) + elif conf.exists_effective(vrf_cli_path): + effective_vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'), + get_first_key=False, + no_tag_node_value_mangle=True, + effective=True) + vrf = {'name' : {}} + for vrf_name, vrf_config in effective_vrf.get('name', {}).items(): + vrf['name'].update({vrf_name : {}}) + for protocol in frr_protocols: + if protocol in vrf_config.get('protocols', []): + # Create initial protocols key if not present + if 'protocols' not in vrf['name'][vrf_name]: + vrf['name'][vrf_name].update({'protocols' : {}}) + # All routing protocols are deleted when we pass this point + tmp = {'deleted' : ''} + + # Special treatment for BGP routing protocol + if protocol == 'bgp': + tmp['dependent_vrfs'] = {} + if 'name' in vrf: + tmp['dependent_vrfs'] = conf.get_config_dict( + vrf_cli_path, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True, + effective=True) + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + tmp['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + # We can safely delete ourself from the dependent VRF list + if vrf_name in tmp['dependent_vrfs']: + del tmp['dependent_vrfs'][vrf_name] + + # Update VRF related dict + vrf['name'][vrf_name]['protocols'].update({protocol : tmp}) + + dict.update({'vrf' : vrf}) + + if os.path.exists(frr_debug_enable): + print('======== < BEGIN > ==========') + import pprint + pprint.pprint(dict) + print('========= < END > ===========') + + # Use singleton instance of the FRR render class + if hasattr(conf, 'frrender_cls'): + frrender = getattr(conf, 'frrender_cls') + dict.update({'frrender_cls' : frrender}) + frrender.generate(dict) + + return dict diff --git a/python/vyos/configverify.py b/python/vyos/configverify.py index 92996f2ee..4084425b1 100644 --- a/python/vyos/configverify.py +++ b/python/vyos/configverify.py @@ -1,539 +1,549 @@ # Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. # The sole purpose of this module is to hold common functions used in # all kinds of implementations to verify the CLI configuration. # It is started by migrating the interfaces to the new get_config_dict() # approach which will lead to a lot of code that can be reused. # NOTE: imports should be as local as possible to the function which # makes use of it! from vyos import ConfigError from vyos.utils.dict import dict_search # pattern re-used in ipsec migration script dynamic_interface_pattern = r'(ppp|pppoe|sstpc|l2tp|ipoe)[0-9]+' def verify_mtu(config): """ Common helper function used by interface implementations to perform recurring validation if the specified MTU can be used by the underlaying hardware. """ from vyos.ifconfig import Interface if 'mtu' in config: mtu = int(config['mtu']) tmp = Interface(config['ifname']) # Not all interfaces support min/max MTU # https://vyos.dev/T5011 try: min_mtu = tmp.get_min_mtu() max_mtu = tmp.get_max_mtu() except: # Fallback to defaults min_mtu = 68 max_mtu = 9000 if mtu < min_mtu: raise ConfigError(f'Interface MTU too low, ' \ f'minimum supported MTU is {min_mtu}!') if mtu > max_mtu: raise ConfigError(f'Interface MTU too high, ' \ f'maximum supported MTU is {max_mtu}!') def verify_mtu_parent(config, parent): if 'mtu' not in config or 'mtu' not in parent: return mtu = int(config['mtu']) parent_mtu = int(parent['mtu']) if mtu > parent_mtu: raise ConfigError(f'Interface MTU "{mtu}" too high, ' \ f'parent interface MTU is "{parent_mtu}"!') def verify_mtu_ipv6(config): """ Common helper function used by interface implementations to perform recurring validation if the specified MTU can be used when IPv6 is configured on the interface. IPv6 requires a 1280 bytes MTU. """ from vyos.template import is_ipv6 if 'mtu' in config: # IPv6 minimum required link mtu min_mtu = 1280 if int(config['mtu']) < min_mtu: interface = config['ifname'] error_msg = f'IPv6 address will be configured on interface "{interface}",\n' \ f'the required minimum MTU is "{min_mtu}"!' if 'address' in config: for address in config['address']: if address in ['dhcpv6'] or is_ipv6(address): raise ConfigError(error_msg) tmp = dict_search('ipv6.address.no_default_link_local', config) if tmp == None: raise ConfigError('link-local ' + error_msg) tmp = dict_search('ipv6.address.autoconf', config) if tmp != None: raise ConfigError(error_msg) tmp = dict_search('ipv6.address.eui64', config) if tmp != None: raise ConfigError(error_msg) def verify_vrf(config): """ Common helper function used by interface implementations to perform recurring validation of VRF configuration. """ from vyos.utils.network import interface_exists if 'vrf' in config: vrfs = config['vrf'] if isinstance(vrfs, str): vrfs = [vrfs] for vrf in vrfs: if vrf == 'default': continue if not interface_exists(vrf): raise ConfigError(f'VRF "{vrf}" does not exist!') if 'is_bridge_member' in config: raise ConfigError( 'Interface "{ifname}" cannot be both a member of VRF "{vrf}" ' 'and bridge "{is_bridge_member}"!'.format(**config)) def verify_bond_bridge_member(config): """ Checks if interface has a VRF configured and is also part of a bond or bridge, which is not allowed! """ if 'vrf' in config: ifname = config['ifname'] if 'is_bond_member' in config: raise ConfigError(f'Can not add interface "{ifname}" to bond, it has a VRF assigned!') if 'is_bridge_member' in config: raise ConfigError(f'Can not add interface "{ifname}" to bridge, it has a VRF assigned!') def verify_tunnel(config): """ This helper is used to verify the common part of the tunnel """ from vyos.template import is_ipv4 from vyos.template import is_ipv6 if 'encapsulation' not in config: raise ConfigError('Must configure the tunnel encapsulation for '\ '{ifname}!'.format(**config)) if 'source_address' not in config and 'source_interface' not in config: raise ConfigError('source-address or source-interface required for tunnel!') if 'remote' not in config and config['encapsulation'] != 'gre': raise ConfigError('remote ip address is mandatory for tunnel') if config['encapsulation'] in ['ipip6', 'ip6ip6', 'ip6gre', 'ip6gretap', 'ip6erspan']: error_ipv6 = 'Encapsulation mode requires IPv6' if 'source_address' in config and not is_ipv6(config['source_address']): raise ConfigError(f'{error_ipv6} source-address') if 'remote' in config and not is_ipv6(config['remote']): raise ConfigError(f'{error_ipv6} remote') else: error_ipv4 = 'Encapsulation mode requires IPv4' if 'source_address' in config and not is_ipv4(config['source_address']): raise ConfigError(f'{error_ipv4} source-address') if 'remote' in config and not is_ipv4(config['remote']): raise ConfigError(f'{error_ipv4} remote address') if config['encapsulation'] in ['sit', 'gretap', 'ip6gretap']: if 'source_interface' in config: encapsulation = config['encapsulation'] raise ConfigError(f'Option source-interface can not be used with ' \ f'encapsulation "{encapsulation}"!') elif config['encapsulation'] == 'gre': if 'source_address' in config and is_ipv6(config['source_address']): raise ConfigError('Can not use local IPv6 address is for mGRE tunnels') def verify_mirror_redirect(config): """ Common helper function used by interface implementations to perform recurring validation of mirror and redirect interface configuration via tc(8) It makes no sense to mirror traffic back at yourself! """ from vyos.utils.network import interface_exists if {'mirror', 'redirect'} <= set(config): raise ConfigError('Mirror and redirect can not be enabled at the same time!') if 'mirror' in config: for direction, mirror_interface in config['mirror'].items(): if not interface_exists(mirror_interface): raise ConfigError(f'Requested mirror interface "{mirror_interface}" '\ 'does not exist!') if mirror_interface == config['ifname']: raise ConfigError(f'Can not mirror "{direction}" traffic back '\ 'the originating interface!') if 'redirect' in config: redirect_ifname = config['redirect'] if not interface_exists(redirect_ifname): raise ConfigError(f'Requested redirect interface "{redirect_ifname}" '\ 'does not exist!') if ('mirror' in config or 'redirect' in config) and dict_search('traffic_policy.in', config) is not None: # XXX: support combination of limiting and redirect/mirror - this is an # artificial limitation raise ConfigError('Can not use ingress policy together with mirror or redirect!') def verify_authentication(config): """ Common helper function used by interface implementations to perform recurring validation of authentication for either PPPoE or WWAN interfaces. If authentication CLI option is defined, both username and password must be set! """ if 'authentication' not in config: return if not {'username', 'password'} <= set(config['authentication']): raise ConfigError('Authentication requires both username and ' \ 'password to be set!') def verify_address(config): """ Common helper function used by interface implementations to perform recurring validation of IP address assignment when interface is part of a bridge or bond. """ if {'is_bridge_member', 'address'} <= set(config): interface = config['ifname'] bridge_name = next(iter(config['is_bridge_member'])) raise ConfigError(f'Cannot assign address to interface "{interface}" ' f'as it is a member of bridge "{bridge_name}"!') def verify_bridge_delete(config): """ Common helper function used by interface implementations to perform recurring validation of IP address assignmenr when interface also is part of a bridge. """ if 'is_bridge_member' in config: interface = config['ifname'] bridge_name = next(iter(config['is_bridge_member'])) raise ConfigError(f'Interface "{interface}" cannot be deleted as it ' f'is a member of bridge "{bridge_name}"!') def verify_interface_exists(config, ifname, state_required=False, warning_only=False): """ Common helper function used by interface implementations to perform recurring validation if an interface actually exists. We first probe if the interface is defined on the CLI, if it's not found we try if it exists at the OS level. """ from vyos.base import Warning from vyos.utils.dict import dict_search_recursive from vyos.utils.network import interface_exists if not state_required: # Check if interface is present in CLI config tmp = getattr(config, 'interfaces_root', {}) if bool(list(dict_search_recursive(tmp, ifname))): return True # Interface not found on CLI, try Linux Kernel if interface_exists(ifname): return True message = f'Interface "{ifname}" does not exist!' if warning_only: Warning(message) return False raise ConfigError(message) def verify_source_interface(config): """ Common helper function used by interface implementations to perform recurring validation of the existence of a source-interface required by e.g. peth/MACvlan, MACsec ... """ import re from vyos.utils.network import interface_exists ifname = config['ifname'] if 'source_interface' not in config: raise ConfigError(f'Physical source-interface required for "{ifname}"!') src_ifname = config['source_interface'] # We do not allow sourcing other interfaces (e.g. tunnel) from dynamic interfaces tmp = re.compile(dynamic_interface_pattern) if tmp.match(src_ifname): raise ConfigError(f'Can not source "{ifname}" from dynamic interface "{src_ifname}"!') if not interface_exists(src_ifname): raise ConfigError(f'Specified source-interface {src_ifname} does not exist') if 'source_interface_is_bridge_member' in config: bridge_name = next(iter(config['source_interface_is_bridge_member'])) raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface ' f'is already a member of bridge "{bridge_name}"!') if 'source_interface_is_bond_member' in config: bond_name = next(iter(config['source_interface_is_bond_member'])) raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface ' f'is already a member of bond "{bond_name}"!') if 'is_source_interface' in config: tmp = config['is_source_interface'] raise ConfigError(f'Can not use source-interface "{src_ifname}", it already ' \ f'belongs to interface "{tmp}"!') def verify_dhcpv6(config): """ Common helper function used by interface implementations to perform recurring validation of DHCPv6 options which are mutually exclusive. """ if 'dhcpv6_options' in config: if {'parameters_only', 'temporary'} <= set(config['dhcpv6_options']): raise ConfigError('DHCPv6 temporary and parameters-only options ' 'are mutually exclusive!') # It is not allowed to have duplicate SLA-IDs as those identify an # assigned IPv6 subnet from a delegated prefix for pd in (dict_search('dhcpv6_options.pd', config) or []): sla_ids = [] interfaces = dict_search(f'dhcpv6_options.pd.{pd}.interface', config) if not interfaces: raise ConfigError('DHCPv6-PD requires an interface where to assign ' 'the delegated prefix!') for count, interface in enumerate(interfaces): if 'sla_id' in interfaces[interface]: sla_ids.append(interfaces[interface]['sla_id']) else: sla_ids.append(str(count)) # Check for duplicates duplicates = [x for n, x in enumerate(sla_ids) if x in sla_ids[:n]] if duplicates: raise ConfigError('Site-Level Aggregation Identifier (SLA-ID) ' 'must be unique per prefix-delegation!') def verify_vlan_config(config): """ Common helper function used by interface implementations to perform recurring validation of interface VLANs """ # VLAN and Q-in-Q IDs are not allowed to overlap if 'vif' in config and 'vif_s' in config: duplicate = list(set(config['vif']) & set(config['vif_s'])) if duplicate: raise ConfigError(f'Duplicate VLAN id "{duplicate[0]}" used for vif and vif-s interfaces!') parent_ifname = config['ifname'] # 802.1q VLANs for vlan_id in config.get('vif', {}): vlan = config['vif'][vlan_id] vlan['ifname'] = f'{parent_ifname}.{vlan_id}' verify_dhcpv6(vlan) verify_address(vlan) verify_vrf(vlan) verify_mirror_redirect(vlan) verify_mtu_parent(vlan, config) # 802.1ad (Q-in-Q) VLANs for s_vlan_id in config.get('vif_s', {}): s_vlan = config['vif_s'][s_vlan_id] s_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}' verify_dhcpv6(s_vlan) verify_address(s_vlan) verify_vrf(s_vlan) verify_mirror_redirect(s_vlan) verify_mtu_parent(s_vlan, config) for c_vlan_id in s_vlan.get('vif_c', {}): c_vlan = s_vlan['vif_c'][c_vlan_id] c_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}.{c_vlan_id}' verify_dhcpv6(c_vlan) verify_address(c_vlan) verify_vrf(c_vlan) verify_mirror_redirect(c_vlan) verify_mtu_parent(c_vlan, config) verify_mtu_parent(c_vlan, s_vlan) def verify_diffie_hellman_length(file, min_keysize): """ Verify Diffie-Hellamn keypair length given via file. It must be greater then or equal to min_keysize """ import os import re from vyos.utils.process import cmd try: keysize = str(min_keysize) except: return False if os.path.exists(file): out = cmd(f'openssl dhparam -inform PEM -in {file} -text') prog = re.compile('\d+\s+bit') if prog.search(out): bits = prog.search(out)[0].split()[0] if int(bits) >= int(min_keysize): return True return False def verify_common_route_maps(config): """ Common helper function used by routing protocol implementations to perform recurring validation if the specified route-map for either zebra to kernel installation exists (this is the top-level route_map key) or when a route is redistributed with a route-map that it exists! """ # XXX: This function is called in combination with a previous call to: # tmp = conf.get_config_dict(['policy']) - see protocols_ospf.py as example. # We should NOT call this with the key_mangling option as this would rename # route-map hypens '-' to underscores '_' and one could no longer distinguish # what should have been the "proper" route-map name, as foo-bar and foo_bar # are two entire different route-map instances! for route_map in ['route-map', 'route_map']: if route_map not in config: continue tmp = config[route_map] # Check if the specified route-map exists, if not error out - if dict_search(f'policy.route-map.{tmp}', config) == None: + if dict_search(f'policy.route_map.{tmp}', config) == None: raise ConfigError(f'Specified route-map "{tmp}" does not exist!') if 'redistribute' in config: for protocol, protocol_config in config['redistribute'].items(): if 'route_map' in protocol_config: verify_route_map(protocol_config['route_map'], config) def verify_route_map(route_map_name, config): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified route-map exists! """ # Check if the specified route-map exists, if not error out - if dict_search(f'policy.route-map.{route_map_name}', config) == None: + if dict_search(f'policy.route_map.{route_map_name}', config) == None: raise ConfigError(f'Specified route-map "{route_map_name}" does not exist!') def verify_prefix_list(prefix_list, config, version=''): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified prefix-list exists! """ # Check if the specified prefix-list exists, if not error out - if dict_search(f'policy.prefix-list{version}.{prefix_list}', config) == None: + if dict_search(f'policy.prefix_list{version}.{prefix_list}', config) == None: raise ConfigError(f'Specified prefix-list{version} "{prefix_list}" does not exist!') def verify_access_list(access_list, config, version=''): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified prefix-list exists! """ # Check if the specified ACL exists, if not error out - if dict_search(f'policy.access-list{version}.{access_list}', config) == None: + if dict_search(f'policy.access_list{version}.{access_list}', config) == None: raise ConfigError(f'Specified access-list{version} "{access_list}" does not exist!') def verify_pki_certificate(config: dict, cert_name: str, no_password_protected: bool=False): """ Common helper function user by PKI consumers to perform recurring validation functions for PEM based certificates """ if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'certificate' not in config['pki']: raise ConfigError('PKI does not contain any certificates!') if cert_name not in config['pki']['certificate']: raise ConfigError(f'Certificate "{cert_name}" not found in configuration!') pki_cert = config['pki']['certificate'][cert_name] if 'certificate' not in pki_cert: raise ConfigError(f'PEM certificate for "{cert_name}" missing in configuration!') if 'private' not in pki_cert or 'key' not in pki_cert['private']: raise ConfigError(f'PEM private key for "{cert_name}" missing in configuration!') if no_password_protected and 'password_protected' in pki_cert['private']: raise ConfigError('Password protected PEM private key is not supported!') def verify_pki_ca_certificate(config: dict, ca_name: str): """ Common helper function user by PKI consumers to perform recurring validation functions for PEM based CA certificates """ if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'ca' not in config['pki']: raise ConfigError('PKI does not contain any CA certificates!') if ca_name not in config['pki']['ca']: raise ConfigError(f'CA Certificate "{ca_name}" not found in configuration!') pki_cert = config['pki']['ca'][ca_name] if 'certificate' not in pki_cert: raise ConfigError(f'PEM CA certificate for "{cert_name}" missing in configuration!') def verify_pki_dh_parameters(config: dict, dh_name: str, min_key_size: int=0): """ Common helper function user by PKI consumers to perform recurring validation functions on DH parameters """ from vyos.pki import load_dh_parameters if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'dh' not in config['pki']: raise ConfigError('PKI does not contain any DH parameters!') if dh_name not in config['pki']['dh']: raise ConfigError(f'DH parameter "{dh_name}" not found in configuration!') if min_key_size: pki_dh = config['pki']['dh'][dh_name] dh_params = load_dh_parameters(pki_dh['parameters']) dh_numbers = dh_params.parameter_numbers() dh_bits = dh_numbers.p.bit_length() if dh_bits < min_key_size: raise ConfigError(f'Minimum DH key-size is {min_key_size} bits!') def verify_eapol(config: dict): """ Common helper function used by interface implementations to perform recurring validation of EAPoL configuration. """ if 'eapol' not in config: return if 'certificate' not in config['eapol']: raise ConfigError('Certificate must be specified when using EAPoL!') verify_pki_certificate(config, config['eapol']['certificate'], no_password_protected=True) if 'ca_certificate' in config['eapol']: for ca_cert in config['eapol']['ca_certificate']: verify_pki_ca_certificate(config, ca_cert) + +def has_frr_protocol_in_dict(config_dict: dict, protocol: str) -> bool: + vrf = None + if config_dict and 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] + if vrf and protocol in (dict_search(f'vrf.name.{vrf}.protocols', config_dict) or []): + return True + if config_dict and protocol in config_dict: + return True + return False diff --git a/python/vyos/defaults.py b/python/vyos/defaults.py index 425990967..9757a34df 100644 --- a/python/vyos/defaults.py +++ b/python/vyos/defaults.py @@ -1,64 +1,65 @@ # Copyright 2018-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. import os base_dir = '/usr/libexec/vyos/' directories = { 'base' : base_dir, 'data' : '/usr/share/vyos/', 'conf_mode' : f'{base_dir}/conf_mode', 'op_mode' : f'{base_dir}/op_mode', 'services' : f'{base_dir}/services', 'config' : '/opt/vyatta/etc/config', 'migrate' : '/opt/vyatta/etc/config-migrate/migrate', 'activate' : f'{base_dir}/activate', 'log' : '/var/log/vyatta', 'templates' : '/usr/share/vyos/templates/', 'certbot' : '/config/auth/letsencrypt', 'api_schema': f'{base_dir}/services/api/graphql/graphql/schema/', 'api_client_op': f'{base_dir}/services/api/graphql/graphql/client_op/', 'api_templates': f'{base_dir}/services/api/graphql/session/templates/', 'vyos_udev_dir' : '/run/udev/vyos', 'isc_dhclient_dir' : '/run/dhclient', 'dhcp6_client_dir' : '/run/dhcp6c', 'vyos_configdir' : '/opt/vyatta/config', 'completion_dir' : f'{base_dir}/completion', 'ca_certificates' : '/usr/local/share/ca-certificates/vyos' } config_status = '/tmp/vyos-config-status' api_config_state = '/run/http-api-state' +frr_debug_enable = '/tmp/vyos.frr.debug' cfg_group = 'vyattacfg' cfg_vintage = 'vyos' commit_lock = os.path.join(directories['vyos_configdir'], '.lock') component_version_json = os.path.join(directories['data'], 'component-versions.json') config_default = os.path.join(directories['data'], 'config.boot.default') rt_symbolic_names = { # Standard routing tables for Linux & reserved IDs for VyOS 'default': 253, # Confusingly, a final fallthru, not the default. 'main': 254, # The actual global table used by iproute2 unless told otherwise. 'local': 255, # Special kernel loopback table. } rt_global_vrf = rt_symbolic_names['main'] rt_global_table = rt_symbolic_names['main'] diff --git a/python/vyos/frr.py b/python/vyos/frr.py deleted file mode 100644 index 6fb81803f..000000000 --- a/python/vyos/frr.py +++ /dev/null @@ -1,551 +0,0 @@ -# Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io> -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this library. If not, see <http://www.gnu.org/licenses/>. - -r""" -A Library for interracting with the FRR daemon suite. -It supports simple configuration manipulation and loading using the official tools -supplied with FRR (vtysh and frr-reload) - -All configuration management and manipulation is done using strings and regex. - - -Example Usage -##### - -# Reading configuration from frr: -``` ->>> original_config = get_configuration() ->>> repr(original_config) -'!\nfrr version 7.3.1\nfrr defaults traditional\nhostname debian\n...... -``` - - -# Modify a configuration section: -``` ->>> new_bgp_section = 'router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n' ->>> modified_config = replace_section(original_config, new_bgp_section, replace_re=r'router bgp \d+') ->>> repr(modified_config) -'............router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n...........' -``` - -Remove a configuration section: -``` ->>> modified_config = remove_section(original_config, r'router ospf') -``` - -Test the new configuration: -``` ->>> try: ->>> mark_configuration(modified configuration) ->>> except ConfigurationNotValid as e: ->>> print('resulting configuration is not valid') ->>> sys.exit(1) -``` - -Apply the new configuration: -``` ->>> try: ->>> replace_configuration(modified_config) ->>> except CommitError as e: ->>> print('Exception while commiting the supplied configuration') ->>> print(e) ->>> exit(1) -``` -""" - -import tempfile -import re - -from vyos import ConfigError -from vyos.utils.process import cmd -from vyos.utils.process import popen -from vyos.utils.process import STDOUT - -import logging -from logging.handlers import SysLogHandler -import os -import sys - -LOG = logging.getLogger(__name__) -DEBUG = False - -ch = SysLogHandler(address='/dev/log') -ch2 = logging.StreamHandler(stream=sys.stdout) -LOG.addHandler(ch) -LOG.addHandler(ch2) - -_frr_daemons = ['zebra', 'staticd', 'bgpd', 'ospfd', 'ospf6d', 'ripd', 'ripngd', - 'isisd', 'pimd', 'pim6d', 'ldpd', 'eigrpd', 'babeld', 'bfdd', 'fabricd'] - -path_vtysh = '/usr/bin/vtysh' -path_frr_reload = '/usr/lib/frr/frr-reload.py' -path_config = '/run/frr' - -default_add_before = r'(ip prefix-list .*|route-map .*|line vty|end)' - - -class FrrError(Exception): - pass - - -class ConfigurationNotValid(FrrError): - """ - The configuratioin supplied to vtysh is not valid - """ - pass - - -class CommitError(FrrError): - """ - Commiting the supplied configuration failed to commit by a unknown reason - see commit error and/or run mark_configuration on the specified configuration - to se error generated - - used by: reload_configuration() - """ - pass - - -class ConfigSectionNotFound(FrrError): - """ - Removal of configuration failed because it is not existing in the supplied configuration - """ - pass - -def init_debugging(): - global DEBUG - - DEBUG = os.path.exists('/tmp/vyos.frr.debug') - if DEBUG: - LOG.setLevel(logging.DEBUG) - -def get_configuration(daemon=None, marked=False): - """ Get current running FRR configuration - daemon: Collect only configuration for the specified FRR daemon, - supplying daemon=None retrieves the complete configuration - marked: Mark the configuration with "end" tags - - return: string containing the running configuration from frr - - """ - if daemon and daemon not in _frr_daemons: - raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') - - cmd = f"{path_vtysh} -c 'show run'" - if daemon: - cmd += f' -d {daemon}' - - output, code = popen(cmd, stderr=STDOUT) - if code: - raise OSError(code, output) - - config = output.replace('\r', '') - # Remove first header lines from FRR config - config = config.split("\n", 3)[-1] - # Mark the configuration with end tags - if marked: - config = mark_configuration(config) - - return config - - -def mark_configuration(config): - """ Add end marks and Test the configuration for syntax faults - If the configuration is valid a marked version of the configuration is returned, - or else it failes with a ConfigurationNotValid Exception - - config: The configuration string to mark/test - return: The marked configuration from FRR - """ - output, code = popen(f"{path_vtysh} -m -f -", stderr=STDOUT, input=config) - - if code == 2: - raise ConfigurationNotValid(str(output)) - elif code: - raise OSError(code, output) - - config = output.replace('\r', '') - return config - - -def reload_configuration(config, daemon=None): - """ Execute frr-reload with the new configuration - This will try to reapply the supplied configuration inside FRR. - The configuration needs to be a complete configuration from the integrated config or - from a daemon. - - config: The configuration to apply - daemon: Apply the conigutaion to the specified FRR daemon, - supplying daemon=None applies to the integrated configuration - return: None - """ - if daemon and daemon not in _frr_daemons: - raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') - - f = tempfile.NamedTemporaryFile('w') - f.write(config) - f.flush() - - LOG.debug(f'reload_configuration: Reloading config using temporary file: {f.name}') - cmd = f'{path_frr_reload} --reload' - if daemon: - cmd += f' --daemon {daemon}' - - if DEBUG: - cmd += f' --debug --stdout' - - cmd += f' {f.name}' - - LOG.debug(f'reload_configuration: Executing command against frr-reload: "{cmd}"') - output, code = popen(cmd, stderr=STDOUT) - f.close() - - for i, e in enumerate(output.split('\n')): - LOG.debug(f'frr-reload output: {i:3} {e}') - - if code == 1: - raise ConfigError(output) - elif code: - raise OSError(code, output) - - return output - - -def save_configuration(): - """ T3217: Save FRR configuration to /run/frr/config/frr.conf """ - return cmd(f'{path_vtysh} -n -w') - - -def execute(command): - """ Run commands inside vtysh - command: str containing commands to execute inside a vtysh session - """ - if not isinstance(command, str): - raise ValueError(f'command needs to be a string: {repr(command)}') - - cmd = f"{path_vtysh} -c '{command}'" - - output, code = popen(cmd, stderr=STDOUT) - if code: - raise OSError(code, output) - - config = output.replace('\r', '') - return config - - -def configure(lines, daemon=False): - """ run commands inside config mode vtysh - lines: list or str conaining commands to execute inside a configure session - only one command executed on each configure() - Executing commands inside a subcontext uses the list to describe the context - ex: ['router bgp 6500', 'neighbor 192.0.2.1 remote-as 65000'] - return: None - """ - if isinstance(lines, str): - lines = [lines] - elif not isinstance(lines, list): - raise ValueError('lines needs to be string or list of commands') - - if daemon and daemon not in _frr_daemons: - raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') - - cmd = f'{path_vtysh}' - if daemon: - cmd += f' -d {daemon}' - - cmd += " -c 'configure terminal'" - for x in lines: - cmd += f" -c '{x}'" - - output, code = popen(cmd, stderr=STDOUT) - if code == 1: - raise ConfigurationNotValid(f'Configuration FRR failed: {repr(output)}') - elif code: - raise OSError(code, output) - - config = output.replace('\r', '') - return config - - -def _replace_section(config, replacement, replace_re, before_re): - r"""Replace a section of FRR config - config: full original configuration - replacement: replacement configuration section - replace_re: The regex to replace - example: ^router bgp \d+$.?*^!$ - this will replace everything between ^router bgp X$ and ^!$ - before_re: When replace_re is not existant, the config will be added before this tag - example: ^line vty$ - - return: modified configuration as a text file - """ - # DEPRECATED, this is replaced by a new implementation - # Check if block is configured, remove the existing instance else add a new one - if re.findall(replace_re, config, flags=re.MULTILINE | re.DOTALL): - # Section is in the configration, replace it - return re.sub(replace_re, replacement, config, count=1, - flags=re.MULTILINE | re.DOTALL) - if before_re: - if not re.findall(before_re, config, flags=re.MULTILINE | re.DOTALL): - raise ConfigSectionNotFound(f"Config section {before_re} not found in config") - - # If no section is in the configuration, add it before the line vty line - return re.sub(before_re, rf'{replacement}\n\g<1>', config, count=1, - flags=re.MULTILINE | re.DOTALL) - - raise ConfigSectionNotFound(f"Config section {replacement} not found in config") - - -def replace_section(config, replacement, from_re, to_re=r'!', before_re=r'line vty'): - r"""Replace a section of FRR config - config: full original configuration - replacement: replacement configuration section - from_re: Regex for the start of section matching - example: 'router bgp \d+' - to_re: Regex for stop of section matching - default: '!' - example: '!' or 'end' - before_re: When from_re/to_re does not return a match, the config will - be added before this tag - default: ^line vty$ - - startline and endline tags will be automatically added to the resulting from_re/to_re and before_re regex'es - """ - # DEPRECATED, this is replaced by a new implementation - return _replace_section(config, replacement, replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=rf'^({before_re})$') - - -def remove_section(config, from_re, to_re='!'): - # DEPRECATED, this is replaced by a new implementation - return _replace_section(config, '', replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=None) - - -def _find_first_block(config, start_pattern, stop_pattern, start_at=0): - '''Find start and stop line numbers for a config block - config: (list) A list conaining the configuration that is searched - start_pattern: (raw-str) The pattern searched for a a start of block tag - stop_pattern: (raw-str) The pattern searched for to signify the end of the block - start_at: (int) The index to start searching at in the <config> - - Returns: - None: No complete block could be found - set(int, int): A complete block found between the line numbers returned in the set - - The object <config> is searched from the start for the regex <start_pattern> until the first match is found. - On a successful match it continues the search for the regex <stop_pattern> until it is found. - After a successful run a set is returned containing the start and stop line numbers. - ''' - LOG.debug(f'_find_first_block: find start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}') - _start = None - for i, element in enumerate(config[start_at:], start=start_at): - # LOG.debug(f'_find_first_block: running line {i:3} "{element}"') - if not _start: - if not re.match(start_pattern, element): - LOG.debug(f'_find_first_block: no match {i:3} "{element}"') - continue - _start = i - LOG.debug(f'_find_first_block: Found start {i:3} "{element}"') - continue - - if not re.match(stop_pattern, element): - LOG.debug(f'_find_first_block: no match {i:3} "{element}"') - continue - - LOG.debug(f'_find_first_block: Found stop {i:3} "{element}"') - return (_start, i) - - LOG.debug('_find_first_block: exit start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}') - return None - - -def _find_first_element(config, pattern, start_at=0): - '''Find the first element that matches the current pattern in config - config: (list) A list containing the configuration that is searched - start_pattern: (raw-str) The pattern searched for - start_at: (int) The index to start searching at in the <config> - - return: Line index of the line containing the searched pattern - - TODO: for now it returns -1 on a no-match because 0 also returns as False - TODO: that means that we can not use False matching to tell if its - ''' - LOG.debug(f'_find_first_element: find start="{pattern}" start_at={start_at}') - for i, element in enumerate(config[start_at:], start=0): - if re.match(pattern + '$', element): - LOG.debug(f'_find_first_element: Found stop {i:3} "{element}"') - return i - LOG.debug(f'_find_first_element: no match {i:3} "{element}"') - LOG.debug(f'_find_first_element: Did not find any match, exiting') - return -1 - - -def _find_elements(config, pattern, start_at=0): - '''Find all instances of pattern and return a list containing all element indexes - config: (list) A list containing the configuration that is searched - start_pattern: (raw-str) The pattern searched for - start_at: (int) The index to start searching at in the <config> - - return: A list of line indexes containing the searched pattern - TODO: refactor this to return a generator instead - ''' - return [i for i, element in enumerate(config[start_at:], start=0) if re.match(pattern + '$', element)] - - -class FRRConfig: - '''Main FRR Configuration manipulation object - Using this object the user could load, manipulate and commit the configuration to FRR - ''' - def __init__(self, config=[]): - self.imported_config = '' - - if isinstance(config, list): - self.config = config.copy() - self.original_config = config.copy() - elif isinstance(config, str): - self.config = config.split('\n') - self.original_config = self.config.copy() - else: - raise ValueError( - 'The config element needs to be a string or list type object') - - if config: - LOG.debug(f'__init__: frr library initiated with initial config') - for i, e in enumerate(self.config): - LOG.debug(f'__init__: initial {i:3} {e}') - - def load_configuration(self, daemon=None): - '''Load the running configuration from FRR into the config object - daemon: str with name of the FRR Daemon to load configuration from or - None to load the consolidated config - - Using this overwrites the current loaded config objects and replaces the original loaded config - ''' - init_debugging() - - self.imported_config = get_configuration(daemon=daemon) - if daemon: - LOG.debug(f'load_configuration: Configuration loaded from FRR daemon {daemon}') - else: - LOG.debug(f'load_configuration: Configuration loaded from FRR integrated config') - - self.original_config = self.imported_config.split('\n') - self.config = self.original_config.copy() - - for i, e in enumerate(self.imported_config.split('\n')): - LOG.debug(f'load_configuration: loaded {i:3} {e}') - return - - def test_configuration(self): - '''Test the current configuration against FRR - This will exception if FRR failes to load the current configuration object - ''' - LOG.debug('test_configation: Testing configuration') - mark_configuration('\n'.join(self.config)) - - def commit_configuration(self, daemon=None): - ''' - Commit the current configuration to FRR daemon: str with name of the - FRR daemon to commit to or None to use the consolidated config. - - Configuration is automatically saved after apply - ''' - LOG.debug('commit_configuration: Commiting configuration') - for i, e in enumerate(self.config): - LOG.debug(f'commit_configuration: new_config {i:3} {e}') - - # https://github.com/FRRouting/frr/issues/10132 - # https://github.com/FRRouting/frr/issues/10133 - count = 0 - count_max = 5 - emsg = '' - while count < count_max: - count += 1 - try: - reload_configuration('\n'.join(self.config), daemon=daemon) - break - except ConfigError as e: - emsg = str(e) - except: - # we just need to re-try the commit of the configuration - # for the listed FRR issues above - pass - if count >= count_max: - if emsg: - raise ConfigError(emsg) - raise ConfigurationNotValid(f'Config commit retry counter ({count_max}) exceeded for {daemon} daemon!') - - # Save configuration to /run/frr/config/frr.conf - save_configuration() - - - def modify_section(self, start_pattern, replacement='!', stop_pattern=r'\S+', remove_stop_mark=False, count=0): - if isinstance(replacement, str): - replacement = replacement.split('\n') - elif not isinstance(replacement, list): - return ValueError("The replacement element needs to be a string or list type object") - LOG.debug(f'modify_section: starting search for {repr(start_pattern)} until {repr(stop_pattern)}') - - _count = 0 - _next_start = 0 - while True: - if count and count <= _count: - # Break out of the loop after specified amount of matches - LOG.debug(f'modify_section: reached limit ({_count}), exiting loop at line {_next_start}') - break - # While searching, always assume that the user wants to search for the exact pattern he entered - # To be more specific the user needs a override, eg. a "pattern.*" - _w = _find_first_block( - self.config, start_pattern+'$', stop_pattern, start_at=_next_start) - if not _w: - # Reached the end, no more elements to remove - LOG.debug(f'modify_section: No more config sections found, exiting') - break - start_element, end_element = _w - LOG.debug(f'modify_section: found match between {start_element} and {end_element}') - for i, e in enumerate(self.config[start_element:end_element+1 if remove_stop_mark else end_element], - start=start_element): - LOG.debug(f'modify_section: remove {i:3} {e}') - del self.config[start_element:end_element + - 1 if remove_stop_mark else end_element] - if replacement: - # Append the replacement config at the current position - for i, e in enumerate(replacement, start=start_element): - LOG.debug(f'modify_section: add {i:3} {e}') - self.config[start_element:start_element] = replacement - _count += 1 - _next_start = start_element + len(replacement) - - return _count - - def add_before(self, before_pattern, addition): - '''Add config block before this element in the configuration''' - if isinstance(addition, str): - addition = addition.split('\n') - elif not isinstance(addition, list): - return ValueError("The replacement element needs to be a string or list type object") - - start = _find_first_element(self.config, before_pattern) - if start < 0: - return False - for i, e in enumerate(addition, start=start): - LOG.debug(f'add_before: add {i:3} {e}') - self.config[start:start] = addition - return True - - def __str__(self): - return '\n'.join(self.config) - - def __repr__(self): - return f'frr({repr(str(self))})' diff --git a/python/vyos/frrender.py b/python/vyos/frrender.py new file mode 100644 index 000000000..95d6c7243 --- /dev/null +++ b/python/vyos/frrender.py @@ -0,0 +1,176 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +""" +Library used to interface with FRRs mgmtd introduced in version 10.0 +""" + +import os + +from vyos.defaults import frr_debug_enable +from vyos.utils.file import write_file +from vyos.utils.process import rc_cmd +from vyos.template import render_to_string +from vyos import ConfigError + +DEBUG_ON = os.path.exists(frr_debug_enable) + +def debug(message): + if not DEBUG_ON: + return + print(message) + +frr_protocols = ['babel', 'bfd', 'bgp', 'eigrp', 'isis', 'mpls', 'nhrp', + 'openfabric', 'ospf', 'ospfv3', 'pim', 'pim6', 'rip', + 'ripng', 'rpki', 'segment_routing', 'static'] + +babel_daemon = 'babeld' +bfd_daemon = 'bfdd' +bgp_daemon = 'bgpd' +isis_daemon = 'isisd' +ldpd_daemon = 'ldpd' +mgmt_daemon = 'mgmtd' +openfabric_daemon = 'fabricd' +ospf_daemon = 'ospfd' +ospf6_daemon = 'ospf6d' +pim_daemon = 'pimd' +pim6_daemon = 'pim6d' +rip_daemon = 'ripd' +ripng_daemon = 'ripngd' +zebra_daemon = 'zebra' + +class FRRender: + def __init__(self): + self._frr_conf = '/run/frr/config/frr.conf' + + def generate(self, config): + if not isinstance(config, dict): + tmp = type(config) + raise ValueError(f'Config must be of type "dict" and not "{tmp}"!') + + def inline_helper(config_dict) -> str: + output = '!\n' + if 'babel' in config_dict and 'deleted' not in config_dict['babel']: + output += render_to_string('frr/babeld.frr.j2', config_dict['babel']) + output += '\n' + if 'bfd' in config_dict and 'deleted' not in config_dict['bfd']: + output += render_to_string('frr/bfdd.frr.j2', config_dict['bfd']) + output += '\n' + if 'bgp' in config_dict and 'deleted' not in config_dict['bgp']: + output += render_to_string('frr/bgpd.frr.j2', config_dict['bgp']) + output += '\n' + if 'eigrp' in config_dict and 'deleted' not in config_dict['eigrp']: + output += render_to_string('frr/eigrpd.frr.j2', config_dict['eigrp']) + output += '\n' + if 'isis' in config_dict and 'deleted' not in config_dict['isis']: + output += render_to_string('frr/isisd.frr.j2', config_dict['isis']) + output += '\n' + if 'mpls' in config_dict and 'deleted' not in config_dict['mpls']: + output += render_to_string('frr/ldpd.frr.j2', config_dict['mpls']) + output += '\n' + if 'openfabric' in config_dict and 'deleted' not in config_dict['openfabric']: + output += render_to_string('frr/fabricd.frr.j2', config_dict['openfabric']) + output += '\n' + if 'ospf' in config_dict and 'deleted' not in config_dict['ospf']: + output += render_to_string('frr/ospfd.frr.j2', config_dict['ospf']) + output += '\n' + if 'ospfv3' in config_dict and 'deleted' not in config_dict['ospfv3']: + output += render_to_string('frr/ospf6d.frr.j2', config_dict['ospfv3']) + output += '\n' + if 'pim' in config_dict and 'deleted' not in config_dict['pim']: + output += render_to_string('frr/pimd.frr.j2', config_dict['pim']) + output += '\n' + if 'pim6' in config_dict and 'deleted' not in config_dict['pim6']: + output += render_to_string('frr/pim6d.frr.j2', config_dict['pim6']) + output += '\n' + if 'policy' in config_dict and len(config_dict['policy']) > 0: + output += render_to_string('frr/policy.frr.j2', config_dict['policy']) + output += '\n' + if 'rip' in config_dict and 'deleted' not in config_dict['rip']: + output += render_to_string('frr/ripd.frr.j2', config_dict['rip']) + output += '\n' + if 'ripng' in config_dict and 'deleted' not in config_dict['ripng']: + output += render_to_string('frr/ripngd.frr.j2', config_dict['ripng']) + output += '\n' + if 'rpki' in config_dict and 'deleted' not in config_dict['rpki']: + output += render_to_string('frr/rpki.frr.j2', config_dict['rpki']) + output += '\n' + if 'segment_routing' in config_dict and 'deleted' not in config_dict['segment_routing']: + output += render_to_string('frr/zebra.segment_routing.frr.j2', config_dict['segment_routing']) + output += '\n' + if 'static' in config_dict and 'deleted' not in config_dict['static']: + output += render_to_string('frr/staticd.frr.j2', config_dict['static']) + output += '\n' + if 'ip' in config_dict and 'deleted' not in config_dict['ip']: + output += render_to_string('frr/zebra.route-map.frr.j2', config_dict['ip']) + output += '\n' + if 'ipv6' in config_dict and 'deleted' not in config_dict['ipv6']: + output += render_to_string('frr/zebra.route-map.frr.j2', config_dict['ipv6']) + output += '\n' + return output + + debug('======< RENDERING CONFIG >======') + # we can not reload an empty file, thus we always embed the marker + output = '!\n' + # Enable SNMP agentx support + # SNMP AgentX support cannot be disabled once enabled + if 'snmp' in config: + output += 'agentx\n' + # Add routing protocols in global VRF + output += inline_helper(config) + # Interface configuration for EVPN is not VRF related + if 'interfaces' in config: + output += render_to_string('frr/evpn.mh.frr.j2', {'interfaces' : config['interfaces']}) + output += '\n' + + if 'vrf' in config and 'name' in config['vrf']: + output += render_to_string('frr/zebra.vrf.route-map.frr.j2', config['vrf']) + '\n' + for vrf, vrf_config in config['vrf']['name'].items(): + if 'protocols' not in vrf_config: + continue + for protocol in vrf_config['protocols']: + vrf_config['protocols'][protocol]['vrf'] = vrf + + output += inline_helper(vrf_config['protocols']) + + debug(output) + debug('======< RENDERING CONFIG COMPLETE >======') + write_file(self._frr_conf, output) + if DEBUG_ON: write_file('/tmp/frr.conf.debug', output) + + def apply(self): + count = 0 + count_max = 5 + emsg = '' + while count < count_max: + count += 1 + debug(f'FRR: Reloading configuration - tries: {count} | Python class ID: {id(self)}') + + cmdline = '/usr/lib/frr/frr-reload.py --reload' + if DEBUG_ON: + cmdline += ' --debug' + rc, emsg = rc_cmd(f'{cmdline} {self._frr_conf}') + if rc != 0: + debug('FRR configuration reload failed, retrying') + continue + debug(emsg) + debug('======< DONE APPLYING CONFIG >======') + break + if count >= count_max: + raise ConfigError(emsg) + + def save_configuration(): + """ T3217: Save FRR configuration to /run/frr/config/frr.conf """ + return cmd('/usr/bin/vtysh -n --writeconfig') diff --git a/smoketest/config-tests/static-route-basic b/smoketest/config-tests/static-route-basic new file mode 100644 index 000000000..d2d33d043 --- /dev/null +++ b/smoketest/config-tests/static-route-basic @@ -0,0 +1,37 @@ +set interfaces ethernet eth0 duplex 'auto' +set interfaces ethernet eth0 speed 'auto' +set interfaces ethernet eth0 vif 203 address '172.18.203.10/24' +set interfaces ethernet eth1 duplex 'auto' +set interfaces ethernet eth1 speed 'auto' +set protocols static mroute 224.1.0.0/24 interface eth0.203 distance '10' +set protocols static mroute 224.2.0.0/24 next-hop 172.18.203.254 distance '20' +set protocols static route 10.0.0.0/8 blackhole distance '200' +set protocols static route 10.0.0.0/8 blackhole tag '333' +set protocols static route 10.0.0.0/8 next-hop 192.0.2.140 bfd multi-hop source-address '192.0.2.10' +set protocols static route 10.0.0.0/8 next-hop 192.0.2.140 bfd profile 'vyos-test' +set protocols static route 10.0.0.0/8 next-hop 192.0.2.140 distance '123' +set protocols static route 10.0.0.0/8 next-hop 192.0.2.140 interface 'eth0' +set protocols static route 172.16.0.0/16 next-hop 172.18.203.254 bfd multi-hop source-address '172.18.203.254' +set protocols static route 172.16.0.0/16 next-hop 172.18.203.254 bfd profile 'foo' +set protocols static route6 2001:db8:1::/48 next-hop fe80::1 bfd multi-hop source-address 'fe80::1' +set protocols static route6 2001:db8:1::/48 next-hop fe80::1 bfd profile 'bar' +set protocols static route6 2001:db8:1::/48 next-hop fe80::1 interface 'eth0.203' +set protocols static route6 2001:db8:2::/48 next-hop fe80::1 bfd multi-hop source-address 'fe80::1' +set protocols static route6 2001:db8:2::/48 next-hop fe80::1 bfd profile 'bar' +set protocols static route6 2001:db8:2::/48 next-hop fe80::1 interface 'eth0.203' +set protocols static route6 2001:db8:3::/48 next-hop fe80::1 bfd +set protocols static route6 2001:db8:3::/48 next-hop fe80::1 interface 'eth0.203' +set service lldp interface all +set service ntp allow-client address '0.0.0.0/0' +set service ntp allow-client address '::/0' +set service ntp server 172.16.100.10 +set service ntp server 172.16.100.20 +set service ntp server 172.16.110.30 +set system config-management commit-revisions '100' +set system console device ttyS0 speed '115200' +set system host-name 'vyos' +set system login user vyos authentication encrypted-password '$6$O5gJRlDYQpj$MtrCV9lxMnZPMbcxlU7.FI793MImNHznxGoMFgm3Q6QP3vfKJyOSRCt3Ka/GzFQyW1yZS4NS616NLHaIPPFHc0' +set system login user vyos authentication plaintext-password '' +set system syslog global facility all level 'info' +set system syslog global facility local7 level 'debug' +set system time-zone 'Asia/Macau' diff --git a/smoketest/configs/static-route-basic b/smoketest/configs/static-route-basic new file mode 100644 index 000000000..648e19676 --- /dev/null +++ b/smoketest/configs/static-route-basic @@ -0,0 +1,148 @@ +interfaces { + ethernet eth0 { + duplex "auto" + speed "auto" + vif 203 { + address "172.18.203.10/24" + } + } + ethernet eth1 { + duplex "auto" + speed "auto" + } +} +protocols { + static { + multicast { + interface-route 224.1.0.0/24 { + next-hop-interface eth0.203 { + distance "10" + } + } + route 224.2.0.0/24 { + next-hop 172.18.203.254 { + distance "20" + } + } + } + route 10.0.0.0/8 { + blackhole { + distance "200" + tag "333" + } + next-hop 192.0.2.140 { + bfd { + multi-hop { + source 192.0.2.10 { + profile "vyos-test" + } + } + } + distance "123" + interface "eth0" + } + } + route 172.16.0.0/16 { + next-hop 172.18.203.254 { + bfd { + multi-hop { + source 172.18.203.254 { + profile "foo" + } + } + } + } + } + route6 2001:db8:1::/48 { + next-hop fe80::1 { + bfd { + multi-hop { + source fe80::1 { + profile "bar" + } + } + } + interface eth0.203 + } + } + route6 2001:db8:2::/48 { + next-hop fe80::1 { + bfd { + multi-hop { + source fe80::1 { + profile "bar" + } + } + } + interface eth0.203 + } + } + route6 2001:db8:3::/48 { + next-hop fe80::1 { + bfd { + } + interface eth0.203 + } + } + } +} +service { + lldp { + interface all { + } + } + ntp { + allow-client { + address "0.0.0.0/0" + address "::/0" + } + server 172.16.100.10 { + } + server 172.16.100.20 { + } + server 172.16.110.30 { + } + } +} +system { + config-management { + commit-revisions 100 + } + console { + device ttyS0 { + speed 115200 + } + } + host-name vyos + login { + user vyos { + authentication { + encrypted-password $6$O5gJRlDYQpj$MtrCV9lxMnZPMbcxlU7.FI793MImNHznxGoMFgm3Q6QP3vfKJyOSRCt3Ka/GzFQyW1yZS4NS616NLHaIPPFHc0 + plaintext-password "" + } + } + } + ntp { + server 0.pool.ntp.org { + } + server 1.pool.ntp.org { + } + server 2.pool.ntp.org { + } + } + syslog { + global { + facility all { + level info + } + facility local7 { + level debug + } + } + } + time-zone "Asia/Macau" +} + +// Warning: Do not remove the following line. +// vyos-config-version: "bgp@5:broadcast-relay@1:cluster@2:config-management@1:conntrack@5:conntrack-sync@2:container@2:dhcp-relay@2:dhcp-server@8:dhcpv6-server@1:dns-dynamic@4:dns-forwarding@4:firewall@15:flow-accounting@1:https@6:ids@1:interfaces@32:ipoe-server@3:ipsec@13:isis@3:l2tp@9:lldp@2:mdns@1:monitoring@1:nat@8:nat66@3:ntp@3:openconnect@3:ospf@2:pim@1:policy@8:pppoe-server@10:pptp@5:qos@2:quagga@11:reverse-proxy@1:rip@1:rpki@2:salt@1:snmp@3:ssh@2:sstp@6:system@27:vrf@3:vrrp@4:vyos-accel-ppp@2:wanloadbalance@3:webproxy@2" +// Release version: 1.4.0 diff --git a/smoketest/scripts/cli/base_vyostest_shim.py b/smoketest/scripts/cli/base_vyostest_shim.py index a383e596c..d95071d1a 100644 --- a/smoketest/scripts/cli/base_vyostest_shim.py +++ b/smoketest/scripts/cli/base_vyostest_shim.py @@ -1,174 +1,192 @@ # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest import paramiko import pprint from time import sleep +from time import time from typing import Type from vyos.configsession import ConfigSession from vyos.configsession import ConfigSessionError from vyos import ConfigError from vyos.defaults import commit_lock from vyos.utils.process import cmd from vyos.utils.process import run save_config = '/tmp/vyos-smoketest-save' # This class acts as shim between individual Smoketests developed for VyOS and # the Python UnitTest framework. Before every test is loaded, we dump the current # system configuration and reload it after the test - despite the test results. # # Using this approach we can not render a live system useless while running any # kind of smoketest. In addition it adds debug capabilities like printing the # command used to execute the test. class VyOSUnitTestSHIM: class TestCase(unittest.TestCase): # if enabled in derived class, print out each and every set/del command # on the CLI. This is usefull to grap all the commands required to # trigger the certain failure condition. # Use "self.debug = True" in derived classes setUp() method debug = False - + commit_guard = time() @classmethod def setUpClass(cls): cls._session = ConfigSession(os.getpid()) cls._session.save_config(save_config) if os.path.exists('/tmp/vyos.smoketest.debug'): cls.debug = True pass @classmethod def tearDownClass(cls): # discard any pending changes which might caused a messed up config cls._session.discard() # ... and restore the initial state cls._session.migrate_and_load_config(save_config) try: cls._session.commit() except (ConfigError, ConfigSessionError): cls._session.discard() cls.fail(cls) def cli_set(self, config): if self.debug: print('set ' + ' '.join(config)) self._session.set(config) def cli_delete(self, config): if self.debug: print('del ' + ' '.join(config)) self._session.delete(config) def cli_discard(self): if self.debug: print('DISCARD') self._session.discard() def cli_commit(self): if self.debug: print('commit') self._session.commit() # during a commit there is a process opening commit_lock, and run() returns 0 while run(f'sudo lsof -nP {commit_lock}') == 0: sleep(0.250) + # reset getFRRconfig() guard timer + self.commit_guard = time() def op_mode(self, path : list) -> None: """ Execute OP-mode command and return stdout """ if self.debug: print('commit') path = ' '.join(path) out = cmd(f'/opt/vyatta/bin/vyatta-op-cmd-wrapper {path}') if self.debug: print(f'\n\ncommand "{path}" returned:\n') pprint.pprint(out) return out - def getFRRconfig(self, string=None, end='$', endsection='^!', daemon=''): + def getFRRconfig(self, string=None, end='$', endsection='^!', daemon='', guard_time=10, empty_retry=0): """ Retrieve current "running configuration" from FRR """ + # Sometimes FRR needs some time after reloading the configuration to + # appear in vtysh. This is a workaround addiung a 10 second guard timer + # between the last cli_commit() and the first read of FRR config via vtysh + while (time() - self.commit_guard) < guard_time: + sleep(0.250) # wait 250 milliseconds command = f'vtysh -c "show run {daemon} no-header"' if string: command += f' | sed -n "/^{string}{end}/,/{endsection}/p"' out = cmd(command) if self.debug: print(f'\n\ncommand "{command}" returned:\n') pprint.pprint(out) + if empty_retry: + retry_count = 0 + while not out and retry_count < empty_retry: + if self.debug and retry_count % 10 == 0: + print(f"Attempt {retry_count}: FRR config is still empty. Retrying...") + retry_count += 1 + sleep(1) + out = cmd(command) + if not out: + print(f'FRR configuration still empty after {empty_retry} retires!') return out @staticmethod def ssh_send_cmd(command, username, password, hostname='localhost'): """ SSH command execution helper """ # Try to login via SSH ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh_client.connect(hostname=hostname, username=username, password=password) _, stdout, stderr = ssh_client.exec_command(command) output = stdout.read().decode().strip() error = stderr.read().decode().strip() ssh_client.close() return output, error # Verify nftables output def verify_nftables(self, nftables_search, table, inverse=False, args=''): nftables_output = cmd(f'sudo nft {args} list table {table}') for search in nftables_search: matched = False for line in nftables_output.split("\n"): if all(item in line for item in search): matched = True break self.assertTrue(not matched if inverse else matched, msg=search) def verify_nftables_chain(self, nftables_search, table, chain, inverse=False, args=''): nftables_output = cmd(f'sudo nft {args} list chain {table} {chain}') for search in nftables_search: matched = False for line in nftables_output.split("\n"): if all(item in line for item in search): matched = True break self.assertTrue(not matched if inverse else matched, msg=search) # Verify ip rule output def verify_rules(self, rules_search, inverse=False, addr_family='inet'): rule_output = cmd(f'ip -family {addr_family} rule show') for search in rules_search: matched = False for line in rule_output.split("\n"): if all(item in line for item in search): matched = True break self.assertTrue(not matched if inverse else matched, msg=search) # standard construction; typing suggestion: https://stackoverflow.com/a/70292317 def ignore_warning(warning: Type[Warning]): import warnings from functools import wraps def inner(f): @wraps(f) def wrapped(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=warning) return f(*args, **kwargs) return wrapped return inner diff --git a/smoketest/scripts/cli/test_interfaces_bonding.py b/smoketest/scripts/cli/test_interfaces_bonding.py index f436424b8..735e4f3c5 100755 --- a/smoketest/scripts/cli/test_interfaces_bonding.py +++ b/smoketest/scripts/cli/test_interfaces_bonding.py @@ -1,313 +1,314 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest from base_interfaces_test import BasicInterfaceTest from vyos.ifconfig import Section from vyos.ifconfig.interface import Interface from vyos.configsession import ConfigSessionError from vyos.utils.network import get_interface_config from vyos.utils.file import read_file +from vyos.frrender import mgmt_daemon class BondingInterfaceTest(BasicInterfaceTest.TestCase): @classmethod def setUpClass(cls): cls._base_path = ['interfaces', 'bonding'] cls._mirror_interfaces = ['dum21354'] cls._members = [] # we need to filter out VLAN interfaces identified by a dot (.) # in their name - just in case! if 'TEST_ETH' in os.environ: cls._members = os.environ['TEST_ETH'].split() else: for tmp in Section.interfaces('ethernet', vlan=False): cls._members.append(tmp) cls._options = {'bond0' : []} for member in cls._members: cls._options['bond0'].append(f'member interface {member}') cls._interfaces = list(cls._options) # call base-classes classmethod super(BondingInterfaceTest, cls).setUpClass() def test_add_single_ip_address(self): super().test_add_single_ip_address() for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() self.assertListEqual(slaves, self._members) def test_vif_8021q_interfaces(self): super().test_vif_8021q_interfaces() for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() self.assertListEqual(slaves, self._members) def test_bonding_remove_member(self): # T2515: when removing a bond member the previously enslaved/member # interface must be in its former admin-up/down state. Here we ensure # that it is admin-up as it was admin-up before. # configure member interfaces for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # remove single bond member port for interface in self._interfaces: remove_member = self._members[0] self.cli_delete(self._base_path + [interface, 'member', 'interface', remove_member]) self.cli_commit() # removed member port must be admin-up for interface in self._interfaces: remove_member = self._members[0] state = Interface(remove_member).get_admin_state() self.assertEqual('up', state) def test_bonding_min_links(self): # configure member interfaces min_links = len(self._interfaces) for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'min-links', str(min_links)]) self.cli_commit() # verify config for interface in self._interfaces: tmp = get_interface_config(interface) self.assertEqual(min_links, tmp['linkinfo']['info_data']['min_links']) # check LACP default rate self.assertEqual('slow', tmp['linkinfo']['info_data']['ad_lacp_rate']) def test_bonding_lacp_rate(self): # configure member interfaces lacp_rate = 'fast' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'lacp-rate', lacp_rate]) self.cli_commit() # verify config for interface in self._interfaces: tmp = get_interface_config(interface) # check LACP minimum links (default value) self.assertEqual(0, tmp['linkinfo']['info_data']['min_links']) self.assertEqual(lacp_rate, tmp['linkinfo']['info_data']['ad_lacp_rate']) def test_bonding_hash_policy(self): # Define available bonding hash policies hash_policies = ['layer2', 'layer2+3', 'encap2+3', 'encap3+4'] for hash_policy in hash_policies: for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'hash-policy', hash_policy]) self.cli_commit() # verify config for interface in self._interfaces: defined_policy = read_file(f'/sys/class/net/{interface}/bonding/xmit_hash_policy').split() self.assertEqual(defined_policy[0], hash_policy) def test_bonding_mii_monitoring_interval(self): for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # verify default for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/miimon').split() self.assertIn('100', tmp) mii_mon = '250' for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'mii-mon-interval', mii_mon]) self.cli_commit() # verify new CLI value for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/miimon').split() self.assertIn(mii_mon, tmp) def test_bonding_multi_use_member(self): # Define available bonding hash policies for interface in ['bond10', 'bond20']: for member in self._members: self.cli_set(self._base_path + [interface, 'member', 'interface', member]) # check validate() - can not use the same member interfaces multiple times with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(self._base_path + ['bond20']) self.cli_commit() def test_bonding_source_interface(self): # Re-use member interface that is already a source-interface bond = 'bond99' pppoe = 'pppoe98756' member = next(iter(self._members)) self.cli_set(self._base_path + [bond, 'member', 'interface', member]) self.cli_set(['interfaces', 'pppoe', pppoe, 'source-interface', member]) # check validate() - can not add interface to bond, it is the source-interface of ... with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(['interfaces', 'pppoe', pppoe]) self.cli_commit() # verify config slaves = read_file(f'/sys/class/net/{bond}/bonding/slaves').split() self.assertIn(member, slaves) def test_bonding_source_bridge_interface(self): # Re-use member interface that is already a source-interface bond = 'bond1097' bridge = 'br6327' member = next(iter(self._members)) self.cli_set(self._base_path + [bond, 'member', 'interface', member]) self.cli_set(['interfaces', 'bridge', bridge, 'member', 'interface', member]) # check validate() - can not add interface to bond, it is a member of bridge ... with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(['interfaces', 'bridge', bridge]) self.cli_commit() # verify config slaves = read_file(f'/sys/class/net/{bond}/bonding/slaves').split() self.assertIn(member, slaves) def test_bonding_uniq_member_description(self): ethernet_path = ['interfaces', 'ethernet'] for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # Add any changes on bonding members # For example add description on separate ethX interfaces for interface in self._interfaces: for member in self._members: self.cli_set(ethernet_path + [member, 'description', member + '_interface']) self.cli_commit() # verify config for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() for member in self._members: self.assertIn(member, slaves) def test_bonding_system_mac(self): # configure member interfaces and system-mac default_system_mac = '00:00:00:00:00:00' # default MAC is all zeroes system_mac = '00:50:ab:cd:ef:11' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'system-mac', system_mac]) self.cli_commit() # verify config for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/ad_actor_system') self.assertIn(tmp, system_mac) for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'system-mac']) self.cli_commit() # verify default value for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/ad_actor_system') self.assertIn(tmp, default_system_mac) def test_bonding_evpn_multihoming(self): id = '5' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'evpn', 'es-id', id]) self.cli_set(self._base_path + [interface, 'evpn', 'es-df-pref', id]) self.cli_set(self._base_path + [interface, 'evpn', 'es-sys-mac', f'00:12:34:56:78:0{id}']) self.cli_set(self._base_path + [interface, 'evpn', 'uplink']) id = int(id) + 1 self.cli_commit() id = '5' for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh es-id {id}', frrconfig) self.assertIn(f' evpn mh es-df-pref {id}', frrconfig) self.assertIn(f' evpn mh es-sys-mac 00:12:34:56:78:0{id}', frrconfig) self.assertIn(f' evpn mh uplink', frrconfig) id = int(id) + 1 for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'evpn', 'es-id']) self.cli_delete(self._base_path + [interface, 'evpn', 'es-df-pref']) self.cli_commit() id = '5' for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh es-sys-mac 00:12:34:56:78:0{id}', frrconfig) self.assertIn(f' evpn mh uplink', frrconfig) id = int(id) + 1 if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_interfaces_ethernet.py b/smoketest/scripts/cli/test_interfaces_ethernet.py index 3d12364f7..c02ca613b 100755 --- a/smoketest/scripts/cli/test_interfaces_ethernet.py +++ b/smoketest/scripts/cli/test_interfaces_ethernet.py @@ -1,226 +1,227 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest from glob import glob from json import loads from netifaces import AF_INET from netifaces import AF_INET6 from netifaces import ifaddresses from base_interfaces_test import BasicInterfaceTest from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import mgmt_daemon from vyos.utils.process import cmd from vyos.utils.process import popen from vyos.utils.file import read_file from vyos.utils.network import is_ipv6_link_local class EthernetInterfaceTest(BasicInterfaceTest.TestCase): @classmethod def setUpClass(cls): cls._base_path = ['interfaces', 'ethernet'] cls._mirror_interfaces = ['dum21354'] # We only test on physical interfaces and not VLAN (sub-)interfaces if 'TEST_ETH' in os.environ: tmp = os.environ['TEST_ETH'].split() cls._interfaces = tmp else: for tmp in Section.interfaces('ethernet', vlan=False): cls._interfaces.append(tmp) cls._macs = {} for interface in cls._interfaces: cls._macs[interface] = read_file(f'/sys/class/net/{interface}/address') # call base-classes classmethod super(EthernetInterfaceTest, cls).setUpClass() def tearDown(self): for interface in self._interfaces: # when using a dedicated interface to test via TEST_ETH environment # variable only this one will be cleared in the end - usable to test # ethernet interfaces via SSH self.cli_delete(self._base_path + [interface]) self.cli_set(self._base_path + [interface, 'duplex', 'auto']) self.cli_set(self._base_path + [interface, 'speed', 'auto']) self.cli_set(self._base_path + [interface, 'hw-id', self._macs[interface]]) self.cli_commit() # Verify that no address remains on the system as this is an eternal # interface. for interface in self._interfaces: self.assertNotIn(AF_INET, ifaddresses(interface)) # required for IPv6 link-local address self.assertIn(AF_INET6, ifaddresses(interface)) for addr in ifaddresses(interface)[AF_INET6]: # checking link local addresses makes no sense if is_ipv6_link_local(addr['addr']): continue self.assertFalse(is_intf_addr_assigned(interface, addr['addr'])) # Ensure no VLAN interfaces are left behind tmp = [x for x in Section.interfaces('ethernet') if x.startswith(f'{interface}.')] self.assertListEqual(tmp, []) def test_offloading_rps(self): # enable RPS on all available CPUs, RPS works with a CPU bitmask, # where each bit represents a CPU (core/thread). The formula below # expands to rps_cpus = 255 for a 8 core system rps_cpus = (1 << os.cpu_count()) -1 # XXX: we should probably reserve one core when the system is under # high preasure so we can still have a core left for housekeeping. # This is done by masking out the lowst bit so CPU0 is spared from # receive packet steering. rps_cpus &= ~1 for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'offload', 'rps']) self.cli_commit() for interface in self._interfaces: cpus = read_file(f'/sys/class/net/{interface}/queues/rx-0/rps_cpus') # remove the nasty ',' separation on larger strings cpus = cpus.replace(',','') cpus = int(cpus, 16) self.assertEqual(f'{cpus:x}', f'{rps_cpus:x}') def test_offloading_rfs(self): global_rfs_flow = 32768 rfs_flow = global_rfs_flow for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'offload', 'rfs']) self.cli_commit() for interface in self._interfaces: queues = len(glob(f'/sys/class/net/{interface}/queues/rx-*')) rfs_flow = int(global_rfs_flow/queues) for i in range(0, queues): tmp = read_file(f'/sys/class/net/{interface}/queues/rx-{i}/rps_flow_cnt') self.assertEqual(int(tmp), rfs_flow) tmp = read_file(f'/proc/sys/net/core/rps_sock_flow_entries') self.assertEqual(int(tmp), global_rfs_flow) # delete configuration of RFS and check all values returned to default "0" for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'offload', 'rfs']) self.cli_commit() for interface in self._interfaces: queues = len(glob(f'/sys/class/net/{interface}/queues/rx-*')) rfs_flow = int(global_rfs_flow/queues) for i in range(0, queues): tmp = read_file(f'/sys/class/net/{interface}/queues/rx-{i}/rps_flow_cnt') self.assertEqual(int(tmp), 0) def test_non_existing_interface(self): unknonw_interface = self._base_path + ['eth667'] self.cli_set(unknonw_interface) # check validate() - interface does not exist with self.assertRaises(ConfigSessionError): self.cli_commit() # we need to remove this wrong interface from the configuration # manually, else tearDown() will have problem in commit() self.cli_delete(unknonw_interface) def test_speed_duplex_verify(self): for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'speed', '1000']) # check validate() - if either speed or duplex is not auto, the # other one must be manually configured, too with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(self._base_path + [interface, 'speed', 'auto']) self.cli_commit() def test_ethtool_ring_buffer(self): for interface in self._interfaces: # We do not use vyos.ethtool here to not have any chance # for invalid testcases. Re-gain data by hand tmp = cmd(f'sudo ethtool --json --show-ring {interface}') tmp = loads(tmp) max_rx = str(tmp[0]['rx-max']) max_tx = str(tmp[0]['tx-max']) self.cli_set(self._base_path + [interface, 'ring-buffer', 'rx', max_rx]) self.cli_set(self._base_path + [interface, 'ring-buffer', 'tx', max_tx]) self.cli_commit() for interface in self._interfaces: tmp = cmd(f'sudo ethtool --json --show-ring {interface}') tmp = loads(tmp) max_rx = str(tmp[0]['rx-max']) max_tx = str(tmp[0]['tx-max']) rx = str(tmp[0]['rx']) tx = str(tmp[0]['tx']) # validate if the above change was carried out properly and the # ring-buffer size got increased self.assertEqual(max_rx, rx) self.assertEqual(max_tx, tx) def test_ethtool_flow_control(self): for interface in self._interfaces: # Disable flow-control self.cli_set(self._base_path + [interface, 'disable-flow-control']) # Check current flow-control state on ethernet interface out, err = popen(f'sudo ethtool --json --show-pause {interface}') # Flow-control not supported - test if it bails out with a proper # this is a dynamic path where err = 1 on VMware, but err = 0 on # a physical box. if bool(err): with self.assertRaises(ConfigSessionError): self.cli_commit() else: out = loads(out) # Flow control is on self.assertTrue(out[0]['autonegotiate']) # commit change on CLI to disable-flow-control and re-test self.cli_commit() out, err = popen(f'sudo ethtool --json --show-pause {interface}') out = loads(out) self.assertFalse(out[0]['autonegotiate']) def test_ethtool_evpn_uplink_tarcking(self): for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'evpn', 'uplink']) self.cli_commit() for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh uplink', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_policy.py b/smoketest/scripts/cli/test_policy.py index a0c6ab055..7ea1b610e 100755 --- a/smoketest/scripts/cli/test_policy.py +++ b/smoketest/scripts/cli/test_policy.py @@ -1,1994 +1,1994 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.process import cmd base_path = ['policy'] class TestPolicy(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_access_list(self): acls = { '50' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'host' : '1.2.3.4' }, }, }, }, '150' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, 'destination' : { 'host' : '2.2.2.2' }, }, '10' : { 'action' : 'deny', 'source' : { 'any' : '' }, 'destination' : { 'any' : '' }, }, }, }, '2000' : { 'rule' : { '5' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '10.0.0.0', 'inverse-mask' : '0.255.255.255' }, }, '10' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, }, '15' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '192.168.0.0', 'inverse-mask' : '0.0.255.255' }, }, '20' : { 'action' : 'permit', 'destination' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, 'source' : { 'network' : '10.0.0.0', 'inverse-mask' : '0.255.255.255' }, }, '25' : { 'action' : 'deny', 'destination' : { 'network' : '192.168.0.0', 'inverse-mask' : '0.0.255.255' }, 'source' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, }, '30' : { 'action' : 'deny', 'destination' : { 'any' : '' }, 'source' : { 'any' : '' }, }, }, }, } for acl, acl_config in acls.items(): path = base_path + ['access-list', acl] self.cli_set(path + ['description', f'VyOS-ACL-{acl}']) if 'rule' not in acl_config: continue for rule, rule_config in acl_config['rule'].items(): self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'any']) if 'host' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'host', rule_config[direction]['host']]) if 'network' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'network', rule_config[direction]['network']]) self.cli_set(path + ['rule', rule, direction, 'inverse-mask', rule_config[direction]['inverse-mask']]) self.cli_commit() config = self.getFRRconfig('access-list', end='') for acl, acl_config in acls.items(): for rule, rule_config in acl_config['rule'].items(): tmp = f'access-list {acl} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' if {'source', 'destination'} <= set(rule_config): tmp += ' ip' for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: tmp += ' any' if 'host' in rule_config[direction]: # XXX: Some weird side rule from the old vyatta days # possible to clean this up after the vyos-1x migration if int(acl) in range(100, 200) or int(acl) in range(2000, 2700): tmp += ' host' tmp += ' ' + rule_config[direction]['host'] if 'network' in rule_config[direction]: tmp += ' ' + rule_config[direction]['network'] + ' ' + rule_config[direction]['inverse-mask'] self.assertIn(tmp, config) def test_access_list6(self): acls = { '50' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:10::/48', 'exact-match' : '' }, }, '15' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:20::/48' }, }, }, }, '100' : { 'rule' : { '5' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:10::/64', 'exact-match' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:20::/64', }, }, '15' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:30::/64', 'exact-match' : '' }, }, '20' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:40::/64', 'exact-match' : '' }, }, '25' : { 'action' : 'deny', 'source' : { 'any' : '' }, }, }, }, } for acl, acl_config in acls.items(): path = base_path + ['access-list6', acl] self.cli_set(path + ['description', f'VyOS-ACL-{acl}']) if 'rule' not in acl_config: continue for rule, rule_config in acl_config['rule'].items(): self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'any']) if 'network' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'network', rule_config[direction]['network']]) if 'exact-match' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'exact-match']) self.cli_commit() config = self.getFRRconfig('ipv6 access-list', end='') for acl, acl_config in acls.items(): for rule, rule_config in acl_config['rule'].items(): tmp = f'ipv6 access-list {acl} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' if {'source', 'destination'} <= set(rule_config): tmp += ' ip' for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: tmp += ' any' if 'network' in rule_config[direction]: tmp += ' ' + rule_config[direction]['network'] if 'exact-match' in rule_config[direction]: tmp += ' exact-match' self.assertIn(tmp, config) def test_as_path_list(self): test_data = { 'VyOS' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '^44501 64502$', }, '10' : { 'action' : 'permit', 'regex' : '44501|44502|44503', }, '15' : { 'action' : 'permit', 'regex' : '^44501_([0-9]+_)+', }, }, }, 'Customers' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '_10_', }, '10' : { 'action' : 'permit', 'regex' : '_20_', }, '15' : { 'action' : 'permit', 'regex' : '_30_', }, '20' : { 'action' : 'deny', 'regex' : '_40_', }, }, }, 'bogons' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '_0_', }, '10' : { 'action' : 'permit', 'regex' : '_23456_', }, '15' : { 'action' : 'permit', 'regex' : '_6449[6-9]_|_65[0-4][0-9][0-9]_|_655[0-4][0-9]_|_6555[0-1]_', }, '20' : { 'action' : 'permit', 'regex' : '_6555[2-9]_|_655[6-9][0-9]_|_65[6-9][0-9][0-9]_|_6[6-9][0-9][0-9][0-]_|_[7-9][0-9][0-9][0-9][0-9]_|_1[0-2][0-9][0-9][0-9][0-9]_|_130[0-9][0-9][0-9]_|_1310[0-6][0-9]_|_13107[01]_', }, }, }, } for as_path, as_path_config in test_data.items(): path = base_path + ['as-path-list', as_path] self.cli_set(path + ['description', f'VyOS-ASPATH-{as_path}']) if 'rule' not in as_path_config: continue for rule, rule_config in as_path_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp as-path access-list', end='') for as_path, as_path_config in test_data.items(): if 'rule' not in as_path_config: continue for rule, rule_config in as_path_config['rule'].items(): tmp = f'bgp as-path access-list {as_path} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_community_list(self): test_data = { '100' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '.*', }, }, }, '200' : { 'rule' : { '5' : { 'action' : 'deny', 'regex' : '^1:201$', }, '10' : { 'action' : 'deny', 'regex' : '1:101$', }, '15' : { 'action' : 'deny', 'regex' : '^1:100$', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['community-list', comm_list] self.cli_set(path + ['description', f'VyOS-COMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp community-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): tmp = f'bgp community-list {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_extended_community_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '.*', }, }, }, '200' : { 'rule' : { '5' : { 'action' : 'deny', 'regex' : '^1:201$', }, '10' : { 'action' : 'deny', 'regex' : '1:101$', }, '15' : { 'action' : 'deny', 'regex' : '^1:100$', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['extcommunity-list', comm_list] self.cli_set(path + ['description', f'VyOS-EXTCOMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp extcommunity-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): # if the community is not a number but a name, the expanded # keyword is used expanded = '' if not comm_list.isnumeric(): expanded = ' expanded' tmp = f'bgp extcommunity-list{expanded} {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_large_community_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '667:123:100', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '65000:120:10', }, '10' : { 'action' : 'permit', 'regex' : '65000:120:20', }, '15' : { 'action' : 'permit', 'regex' : '65000:120:30', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['large-community-list', comm_list] self.cli_set(path + ['description', f'VyOS-LARGECOMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp large-community-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): tmp = f'bgp large-community-list expanded {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_prefix_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '10.0.0.0/8', 'ge' : '16', 'le' : '24', }, '10' : { 'action' : 'deny', 'prefix' : '172.16.0.0/12', 'ge' : '16', }, '15' : { 'action' : 'permit', 'prefix' : '192.168.0.0/16', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '10.0.10.0/24', 'ge' : '25', 'le' : '26', }, '10' : { 'action' : 'deny', 'prefix' : '10.0.20.0/24', 'le' : '25', }, '15' : { 'action' : 'permit', 'prefix' : '10.0.25.0/24', }, }, }, } for prefix_list, prefix_list_config in test_data.items(): path = base_path + ['prefix-list', prefix_list] self.cli_set(path + ['description', f'VyOS-PFX-LIST-{prefix_list}']) if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'prefix' in rule_config: self.cli_set(path + ['rule', rule, 'prefix', rule_config['prefix']]) if 'ge' in rule_config: self.cli_set(path + ['rule', rule, 'ge', rule_config['ge']]) if 'le' in rule_config: self.cli_set(path + ['rule', rule, 'le', rule_config['le']]) self.cli_commit() config = self.getFRRconfig('ip prefix-list', end='') for prefix_list, prefix_list_config in test_data.items(): if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): tmp = f'ip prefix-list {prefix_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['prefix'] if 'ge' in rule_config: tmp += ' ge ' + rule_config['ge'] if 'le' in rule_config: tmp += ' le ' + rule_config['le'] self.assertIn(tmp, config) def test_prefix_list6(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '2001:db8::/32', 'ge' : '40', 'le' : '48', }, '10' : { 'action' : 'deny', 'prefix' : '2001:db8::/32', 'ge' : '48', }, '15' : { 'action' : 'permit', 'prefix' : '2001:db8:1000::/64', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '2001:db8:100::/40', 'ge' : '48', }, '10' : { 'action' : 'permit', 'prefix' : '2001:db8:200::/40', 'ge' : '48', }, '15' : { 'action' : 'deny', 'prefix' : '2001:db8:300::/40', 'le' : '64', }, }, }, } for prefix_list, prefix_list_config in test_data.items(): path = base_path + ['prefix-list6', prefix_list] self.cli_set(path + ['description', f'VyOS-PFX-LIST-{prefix_list}']) if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'prefix' in rule_config: self.cli_set(path + ['rule', rule, 'prefix', rule_config['prefix']]) if 'ge' in rule_config: self.cli_set(path + ['rule', rule, 'ge', rule_config['ge']]) if 'le' in rule_config: self.cli_set(path + ['rule', rule, 'le', rule_config['le']]) self.cli_commit() config = self.getFRRconfig('ipv6 prefix-list', end='') for prefix_list, prefix_list_config in test_data.items(): if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): tmp = f'ipv6 prefix-list {prefix_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['prefix'] if 'ge' in rule_config: tmp += ' ge ' + rule_config['ge'] if 'le' in rule_config: tmp += ' le ' + rule_config['le'] self.assertIn(tmp, config) def test_prefix_list_duplicates(self): # FRR does not allow to specify the same profix list rule multiple times # # vyos(config)# ip prefix-list foo seq 10 permit 192.0.2.0/24 # vyos(config)# ip prefix-list foo seq 20 permit 192.0.2.0/24 # % Configuration failed. # Error type: validation # Error description: duplicated prefix list value: 192.0.2.0/24 # There is also a VyOS verify() function to test this prefix = '100.64.0.0/10' prefix_list = 'duplicates' test_range = range(20, 25) path = base_path + ['prefix-list', prefix_list] for rule in test_range: self.cli_set(path + ['rule', str(rule), 'action', 'permit']) self.cli_set(path + ['rule', str(rule), 'prefix', prefix]) # Duplicate prefixes with self.assertRaises(ConfigSessionError): self.cli_commit() for rule in test_range: self.cli_set(path + ['rule', str(rule), 'le', str(rule)]) self.cli_commit() config = self.getFRRconfig('ip prefix-list', end='') for rule in test_range: tmp = f'ip prefix-list {prefix_list} seq {rule} permit {prefix} le {rule}' self.assertIn(tmp, config) def test_route_map_community_set(self): test_data = { "community-configuration": { "rule": { "10": { "action": "permit", "set": { "community": { "replace": [ "65000:10", "65001:11" ] }, "extcommunity": { "bandwidth": "200", "rt": [ "65000:10", "192.168.0.1:11" ], "soo": [ "192.168.0.1:11", "65000:10" ] }, "large-community": { "replace": [ "65000:65000:10", "65000:65000:11" ] } } }, "20": { "action": "permit", "set": { "community": { "add": [ "65000:10", "65001:11" ] }, "extcommunity": { "bandwidth": "200", "bandwidth-non-transitive": {} }, "large-community": { "add": [ "65000:65000:10", "65000:65000:11" ] } } }, "30": { "action": "permit", "set": { "community": { "none": {} }, "extcommunity": { "none": {} }, "large-community": { "none": {} } } } } } } for route_map, route_map_config in test_data.items(): path = base_path + ['route-map', route_map] self.cli_set(path + ['description', f'VyOS ROUTE-MAP {route_map}']) if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'set' in rule_config: #Add community in configuration if 'community' in rule_config['set']: if 'none' in rule_config['set']['community']: self.cli_set(path + ['rule', rule, 'set', 'community', 'none']) else: community_path = path + ['rule', rule, 'set', 'community'] if 'add' in rule_config['set']['community']: for community_unit in rule_config['set']['community']['add']: self.cli_set(community_path + ['add', community_unit]) if 'replace' in rule_config['set']['community']: for community_unit in rule_config['set']['community']['replace']: self.cli_set(community_path + ['replace', community_unit]) #Add large-community in configuration if 'large-community' in rule_config['set']: if 'none' in rule_config['set']['large-community']: self.cli_set(path + ['rule', rule, 'set', 'large-community', 'none']) else: community_path = path + ['rule', rule, 'set', 'large-community'] if 'add' in rule_config['set']['large-community']: for community_unit in rule_config['set']['large-community']['add']: self.cli_set(community_path + ['add', community_unit]) if 'replace' in rule_config['set']['large-community']: for community_unit in rule_config['set']['large-community']['replace']: self.cli_set(community_path + ['replace', community_unit]) #Add extcommunity in configuration if 'extcommunity' in rule_config['set']: if 'none' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity', 'none']) else: if 'bandwidth' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity', 'bandwidth', rule_config['set']['extcommunity']['bandwidth']]) if 'bandwidth-non-transitive' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set','extcommunity', 'bandwidth-non-transitive']) if 'rt' in rule_config['set']['extcommunity']: for community_unit in rule_config['set']['extcommunity']['rt']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity','rt',community_unit]) if 'soo' in rule_config['set']['extcommunity']: for community_unit in rule_config['set']['extcommunity']['soo']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity','soo',community_unit]) self.cli_commit() for route_map, route_map_config in test_data.items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): name = f'route-map {route_map} {rule_config["action"]} {rule}' config = self.getFRRconfig(name) self.assertIn(name, config) if 'set' in rule_config: #Check community if 'community' in rule_config['set']: if 'none' in rule_config['set']['community']: tmp = f'set community none' self.assertIn(tmp, config) if 'replace' in rule_config['set']['community']: values = ' '.join(rule_config['set']['community']['replace']) tmp = f'set community {values}' self.assertIn(tmp, config) if 'add' in rule_config['set']['community']: values = ' '.join(rule_config['set']['community']['add']) tmp = f'set community {values} additive' self.assertIn(tmp, config) #Check large-community if 'large-community' in rule_config['set']: if 'none' in rule_config['set']['large-community']: tmp = f'set large-community none' self.assertIn(tmp, config) if 'replace' in rule_config['set']['large-community']: values = ' '.join(rule_config['set']['large-community']['replace']) tmp = f'set large-community {values}' self.assertIn(tmp, config) if 'add' in rule_config['set']['large-community']: values = ' '.join(rule_config['set']['large-community']['add']) tmp = f'set large-community {values} additive' self.assertIn(tmp, config) #Check extcommunity if 'extcommunity' in rule_config['set']: if 'none' in rule_config['set']['extcommunity']: tmp = 'set extcommunity none' self.assertIn(tmp, config) if 'bandwidth' in rule_config['set']['extcommunity']: values = rule_config['set']['extcommunity']['bandwidth'] tmp = f'set extcommunity bandwidth {values}' if 'bandwidth-non-transitive' in rule_config['set']['extcommunity']: tmp = tmp + ' non-transitive' self.assertIn(tmp, config) if 'rt' in rule_config['set']['extcommunity']: values = ' '.join(rule_config['set']['extcommunity']['rt']) tmp = f'set extcommunity rt {values}' self.assertIn(tmp, config) if 'soo' in rule_config['set']['extcommunity']: values = ' '.join(rule_config['set']['extcommunity']['soo']) tmp = f'set extcommunity soo {values}' self.assertIn(tmp, config) def test_route_map(self): access_list = '50' as_path_list = '100' test_interface = 'eth0' community_list = 'BGP-comm-0815' # ext community name only allows alphanumeric characters and no hyphen :/ # maybe change this if possible in vyos-1x rewrite extcommunity_list = 'BGPextcomm123' large_community_list = 'bgp-large-community-123456' prefix_list = 'foo-pfx-list' ipv6_nexthop_address = 'fe80::1' local_pref = '300' metric = '50' peer = '2.3.4.5' peerv6 = '2001:db8::1' tag = '6542' goto = '25' ipv4_nexthop_address= '192.0.2.2' ipv4_prefix_len= '18' ipv6_prefix_len= '122' ipv4_nexthop_type= 'blackhole' ipv6_nexthop_type= 'blackhole' test_data = { 'foo-map-bar' : { 'rule' : { '5' : { 'action' : 'permit', 'continue' : '20', }, '10' : { 'action' : 'permit', 'call' : 'complicated-configuration', }, }, }, 'a-matching-rule-0815': { 'rule' : { '5' : { 'action' : 'deny', 'match' : { 'as-path' : as_path_list, 'rpki-invalid': '', 'tag': tag, }, }, '10' : { 'action' : 'permit', 'match' : { 'community' : community_list, 'interface' : test_interface, 'rpki-not-found': '', }, }, '15' : { 'action' : 'permit', 'match' : { 'extcommunity' : extcommunity_list, 'rpki-valid': '', }, 'on-match' : { 'next' : '', }, }, '20' : { 'action' : 'permit', 'match' : { 'ip-address-acl': access_list, 'ip-nexthop-acl': access_list, 'ip-route-source-acl': access_list, 'ipv6-address-acl': access_list, 'origin-incomplete' : '', }, 'on-match' : { 'goto' : goto, }, }, '25' : { 'action' : 'permit', 'match' : { 'ip-address-pfx': prefix_list, 'ip-nexthop-pfx': prefix_list, 'ip-route-source-pfx': prefix_list, 'ipv6-address-pfx': prefix_list, 'origin-igp': '', }, }, '30' : { 'action' : 'permit', 'match' : { 'ipv6-nexthop-address' : ipv6_nexthop_address, 'ipv6-nexthop-access-list' : access_list, 'ipv6-nexthop-prefix-list' : prefix_list, 'ipv6-nexthop-type' : ipv6_nexthop_type, 'ipv6-address-pfx-len' : ipv6_prefix_len, 'large-community' : large_community_list, 'local-pref' : local_pref, 'metric': metric, 'origin-egp': '', 'peer' : peer, }, }, '31' : { 'action' : 'permit', 'match' : { 'peer' : peerv6, }, }, '40' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, 'ip-address-pfx-len' : ipv4_prefix_len, }, }, '42' : { 'action' : 'deny', 'match' : { 'ip-nexthop-plen' : ipv4_prefix_len, }, }, '44' : { 'action' : 'permit', 'match' : { 'ip-nexthop-type' : ipv4_nexthop_type, }, }, }, }, 'complicated-configuration' : { 'rule' : { '10' : { 'action' : 'deny', 'set' : { 'aggregator-as' : '1234567890', 'aggregator-ip' : '10.255.255.0', 'as-path-exclude' : '1234', 'as-path-prepend' : '1234567890 987654321', 'as-path-prepend-last-as' : '5', 'atomic-aggregate' : '', 'distance' : '110', 'ipv6-next-hop-global' : '2001::1', 'ipv6-next-hop-local' : 'fe80::1', 'ip-next-hop' : '192.168.1.1', 'local-preference' : '500', 'metric' : '150', 'metric-type' : 'type-1', 'origin' : 'incomplete', 'l3vpn' : '', 'originator-id' : '172.16.10.1', 'src' : '100.0.0.1', 'tag' : '65530', 'weight' : '2', }, }, }, }, 'bandwidth-configuration' : { 'rule' : { '10' : { 'action' : 'deny', 'set' : { 'as-path-prepend' : '100 100', 'distance' : '200', 'extcommunity-bw' : 'num-multipaths', }, }, }, }, 'evpn-configuration' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'evpn-default-route' : '', 'evpn-rd' : '100:300', 'evpn-route-type' : 'prefix', 'evpn-vni' : '1234', }, }, '20' : { 'action' : 'permit', 'set' : { 'as-path-exclude' : 'all', 'evpn-gateway-ipv4' : '192.0.2.99', 'evpn-gateway-ipv6' : '2001:db8:f00::1', }, }, }, }, 'match-protocol' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'protocol' : 'static', }, }, '20' : { 'action' : 'permit', 'match' : { 'protocol' : 'bgp', }, }, }, }, 'relative-metric' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, }, 'set' : { 'metric' : '+10', }, }, '20' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, }, 'set' : { 'metric' : '-20', }, }, '30': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': 'rtt', }, }, '40': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': '+rtt', }, }, '50': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': '-rtt', }, }, }, }, } self.cli_set(['policy', 'access-list', access_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'access-list', access_list, 'rule', '10', 'source', 'host', '1.1.1.1']) self.cli_set(['policy', 'access-list6', access_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'access-list6', access_list, 'rule', '10', 'source', 'network', '2001:db8::/32']) self.cli_set(['policy', 'as-path-list', as_path_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'as-path-list', as_path_list, 'rule', '10', 'regex', '64501 64502']) self.cli_set(['policy', 'community-list', community_list, 'rule', '10', 'action', 'deny']) self.cli_set(['policy', 'community-list', community_list, 'rule', '10', 'regex', '65432']) self.cli_set(['policy', 'extcommunity-list', extcommunity_list, 'rule', '10', 'action', 'deny']) self.cli_set(['policy', 'extcommunity-list', extcommunity_list, 'rule', '10', 'regex', '65000']) self.cli_set(['policy', 'large-community-list', large_community_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'large-community-list', large_community_list, 'rule', '10', 'regex', '100:200:300']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '10', 'prefix', '192.0.2.0/24']) self.cli_set(['policy', 'prefix-list6', prefix_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'prefix-list6', prefix_list, 'rule', '10', 'prefix', '2001:db8::/32']) for route_map, route_map_config in test_data.items(): path = base_path + ['route-map', route_map] self.cli_set(path + ['description', f'VyOS ROUTE-MAP {route_map}']) if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'call' in rule_config: self.cli_set(path + ['rule', rule, 'call', rule_config['call']]) if 'continue' in rule_config: self.cli_set(path + ['rule', rule, 'continue', rule_config['continue']]) if 'match' in rule_config: if 'as-path' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'as-path', rule_config['match']['as-path']]) if 'community' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'community', 'community-list', rule_config['match']['community']]) self.cli_set(path + ['rule', rule, 'match', 'community', 'exact-match']) if 'evpn-default-route' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'default-route']) if 'evpn-rd' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'rd', rule_config['match']['evpn-rd']]) if 'evpn-route-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'route-type', rule_config['match']['evpn-route-type']]) if 'evpn-vni' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'vni', rule_config['match']['evpn-vni']]) if 'extcommunity' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'extcommunity', rule_config['match']['extcommunity']]) if 'interface' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'interface', rule_config['match']['interface']]) if 'ip-address-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'access-list', rule_config['match']['ip-address-acl']]) if 'ip-address-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'prefix-list', rule_config['match']['ip-address-pfx']]) if 'ip-address-pfx-len' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'prefix-len', rule_config['match']['ip-address-pfx-len']]) if 'ip-nexthop-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'access-list', rule_config['match']['ip-nexthop-acl']]) if 'ip-nexthop-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'prefix-list', rule_config['match']['ip-nexthop-pfx']]) if 'ip-nexthop-addr' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'address', rule_config['match']['ip-nexthop-addr']]) if 'ip-nexthop-plen' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'prefix-len', rule_config['match']['ip-nexthop-plen']]) if 'ip-nexthop-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'type', rule_config['match']['ip-nexthop-type']]) if 'ip-route-source-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'route-source', 'access-list', rule_config['match']['ip-route-source-acl']]) if 'ip-route-source-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'route-source', 'prefix-list', rule_config['match']['ip-route-source-pfx']]) if 'ipv6-address-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'access-list', rule_config['match']['ipv6-address-acl']]) if 'ipv6-address-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'prefix-list', rule_config['match']['ipv6-address-pfx']]) if 'ipv6-address-pfx-len' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'prefix-len', rule_config['match']['ipv6-address-pfx-len']]) if 'ipv6-nexthop-address' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'address', rule_config['match']['ipv6-nexthop-address']]) if 'ipv6-nexthop-access-list' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'access-list', rule_config['match']['ipv6-nexthop-access-list']]) if 'ipv6-nexthop-prefix-list' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'prefix-list', rule_config['match']['ipv6-nexthop-prefix-list']]) if 'ipv6-nexthop-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'type', rule_config['match']['ipv6-nexthop-type']]) if 'large-community' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'large-community', 'large-community-list', rule_config['match']['large-community']]) if 'local-pref' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'local-preference', rule_config['match']['local-pref']]) if 'metric' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'metric', rule_config['match']['metric']]) if 'origin-igp' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'igp']) if 'origin-egp' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'egp']) if 'origin-incomplete' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'incomplete']) if 'peer' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'peer', rule_config['match']['peer']]) if 'rpki-invalid' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'invalid']) if 'rpki-not-found' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'notfound']) if 'rpki-valid' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'valid']) if 'protocol' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'protocol', rule_config['match']['protocol']]) if 'tag' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'tag', rule_config['match']['tag']]) if 'on-match' in rule_config: if 'goto' in rule_config['on-match']: self.cli_set(path + ['rule', rule, 'on-match', 'goto', rule_config['on-match']['goto']]) if 'next' in rule_config['on-match']: self.cli_set(path + ['rule', rule, 'on-match', 'next']) if 'set' in rule_config: if 'aggregator-as' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'aggregator', 'as', rule_config['set']['aggregator-as']]) if 'aggregator-ip' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'aggregator', 'ip', rule_config['set']['aggregator-ip']]) if 'as-path-exclude' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'as-path', 'exclude', rule_config['set']['as-path-exclude']]) if 'as-path-prepend' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'as-path', 'prepend', rule_config['set']['as-path-prepend']]) if 'atomic-aggregate' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'atomic-aggregate']) if 'distance' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'distance', rule_config['set']['distance']]) if 'ipv6-next-hop-global' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ipv6-next-hop', 'global', rule_config['set']['ipv6-next-hop-global']]) if 'ipv6-next-hop-local' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ipv6-next-hop', 'local', rule_config['set']['ipv6-next-hop-local']]) if 'ip-next-hop' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ip-next-hop', rule_config['set']['ip-next-hop']]) if 'l3vpn' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'l3vpn-nexthop', 'encapsulation', 'gre']) if 'local-preference' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'local-preference', rule_config['set']['local-preference']]) if 'metric' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'metric', rule_config['set']['metric']]) if 'metric-type' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'metric-type', rule_config['set']['metric-type']]) if 'origin' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'origin', rule_config['set']['origin']]) if 'originator-id' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'originator-id', rule_config['set']['originator-id']]) if 'src' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'src', rule_config['set']['src']]) if 'tag' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'tag', rule_config['set']['tag']]) if 'weight' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'weight', rule_config['set']['weight']]) if 'evpn-gateway-ipv4' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'evpn', 'gateway', 'ipv4', rule_config['set']['evpn-gateway-ipv4']]) if 'evpn-gateway-ipv6' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'evpn', 'gateway', 'ipv6', rule_config['set']['evpn-gateway-ipv6']]) self.cli_commit() for route_map, route_map_config in test_data.items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): name = f'route-map {route_map} {rule_config["action"]} {rule}' config = self.getFRRconfig(name) self.assertIn(name, config) if 'call' in rule_config: tmp = 'call ' + rule_config['call'] self.assertIn(tmp, config) if 'continue' in rule_config: tmp = 'on-match goto ' + rule_config['continue'] self.assertIn(tmp, config) if 'match' in rule_config: if 'as-path' in rule_config['match']: tmp = 'match as-path ' + rule_config['match']['as-path'] self.assertIn(tmp, config) if 'community' in rule_config['match']: tmp = f'match community {rule_config["match"]["community"]} exact-match' self.assertIn(tmp, config) if 'evpn-default-route' in rule_config['match']: tmp = f'match evpn default-route' self.assertIn(tmp, config) if 'evpn-rd' in rule_config['match']: tmp = f'match evpn rd {rule_config["match"]["evpn-rd"]}' self.assertIn(tmp, config) if 'evpn-route-type' in rule_config['match']: tmp = f'match evpn route-type {rule_config["match"]["evpn-route-type"]}' self.assertIn(tmp, config) if 'evpn-vni' in rule_config['match']: tmp = f'match evpn vni {rule_config["match"]["evpn-vni"]}' self.assertIn(tmp, config) if 'extcommunity' in rule_config['match']: tmp = f'match extcommunity {rule_config["match"]["extcommunity"]}' self.assertIn(tmp, config) if 'interface' in rule_config['match']: tmp = f'match interface {rule_config["match"]["interface"]}' self.assertIn(tmp, config) if 'ip-address-acl' in rule_config['match']: tmp = f'match ip address {rule_config["match"]["ip-address-acl"]}' self.assertIn(tmp, config) if 'ip-address-pfx' in rule_config['match']: tmp = f'match ip address prefix-list {rule_config["match"]["ip-address-pfx"]}' self.assertIn(tmp, config) if 'ip-address-pfx-len' in rule_config['match']: tmp = f'match ip address prefix-len {rule_config["match"]["ip-address-pfx-len"]}' self.assertIn(tmp, config) if 'ip-nexthop-acl' in rule_config['match']: tmp = f'match ip next-hop {rule_config["match"]["ip-nexthop-acl"]}' self.assertIn(tmp, config) if 'ip-nexthop-pfx' in rule_config['match']: tmp = f'match ip next-hop prefix-list {rule_config["match"]["ip-nexthop-pfx"]}' self.assertIn(tmp, config) if 'ip-nexthop-addr' in rule_config['match']: tmp = f'match ip next-hop address {rule_config["match"]["ip-nexthop-addr"]}' self.assertIn(tmp, config) if 'ip-nexthop-plen' in rule_config['match']: tmp = f'match ip next-hop prefix-len {rule_config["match"]["ip-nexthop-plen"]}' self.assertIn(tmp, config) if 'ip-nexthop-type' in rule_config['match']: tmp = f'match ip next-hop type {rule_config["match"]["ip-nexthop-type"]}' self.assertIn(tmp, config) if 'ip-route-source-acl' in rule_config['match']: tmp = f'match ip route-source {rule_config["match"]["ip-route-source-acl"]}' self.assertIn(tmp, config) if 'ip-route-source-pfx' in rule_config['match']: tmp = f'match ip route-source prefix-list {rule_config["match"]["ip-route-source-pfx"]}' self.assertIn(tmp, config) if 'ipv6-address-acl' in rule_config['match']: tmp = f'match ipv6 address {rule_config["match"]["ipv6-address-acl"]}' self.assertIn(tmp, config) if 'ipv6-address-pfx' in rule_config['match']: tmp = f'match ipv6 address prefix-list {rule_config["match"]["ipv6-address-pfx"]}' self.assertIn(tmp, config) if 'ipv6-address-pfx-len' in rule_config['match']: tmp = f'match ipv6 address prefix-len {rule_config["match"]["ipv6-address-pfx-len"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-address' in rule_config['match']: tmp = f'match ipv6 next-hop address {rule_config["match"]["ipv6-nexthop-address"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-access-list' in rule_config['match']: tmp = f'match ipv6 next-hop {rule_config["match"]["ipv6-nexthop-access-list"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-prefix-list' in rule_config['match']: tmp = f'match ipv6 next-hop prefix-list {rule_config["match"]["ipv6-nexthop-prefix-list"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-type' in rule_config['match']: tmp = f'match ipv6 next-hop type {rule_config["match"]["ipv6-nexthop-type"]}' self.assertIn(tmp, config) if 'large-community' in rule_config['match']: tmp = f'match large-community {rule_config["match"]["large-community"]}' self.assertIn(tmp, config) if 'local-pref' in rule_config['match']: tmp = f'match local-preference {rule_config["match"]["local-pref"]}' self.assertIn(tmp, config) if 'metric' in rule_config['match']: tmp = f'match metric {rule_config["match"]["metric"]}' self.assertIn(tmp, config) if 'origin-igp' in rule_config['match']: tmp = f'match origin igp' self.assertIn(tmp, config) if 'origin-egp' in rule_config['match']: tmp = f'match origin egp' self.assertIn(tmp, config) if 'origin-incomplete' in rule_config['match']: tmp = f'match origin incomplete' self.assertIn(tmp, config) if 'peer' in rule_config['match']: tmp = f'match peer {rule_config["match"]["peer"]}' self.assertIn(tmp, config) if 'protocol' in rule_config['match']: tmp = f'match source-protocol {rule_config["match"]["protocol"]}' self.assertIn(tmp, config) if 'rpki-invalid' in rule_config['match']: tmp = f'match rpki invalid' self.assertIn(tmp, config) if 'rpki-not-found' in rule_config['match']: tmp = f'match rpki notfound' self.assertIn(tmp, config) if 'rpki-valid' in rule_config['match']: tmp = f'match rpki valid' self.assertIn(tmp, config) if 'tag' in rule_config['match']: tmp = f'match tag {rule_config["match"]["tag"]}' self.assertIn(tmp, config) if 'on-match' in rule_config: if 'goto' in rule_config['on-match']: tmp = f'on-match goto {rule_config["on-match"]["goto"]}' self.assertIn(tmp, config) if 'next' in rule_config['on-match']: tmp = f'on-match next' self.assertIn(tmp, config) if 'set' in rule_config: tmp = ' set ' if 'aggregator-as' in rule_config['set']: tmp += 'aggregator as ' + rule_config['set']['aggregator-as'] elif 'aggregator-ip' in rule_config['set']: tmp += ' ' + rule_config['set']['aggregator-ip'] elif 'as-path-exclude' in rule_config['set']: tmp += 'as-path exclude ' + rule_config['set']['as-path-exclude'] elif 'as-path-prepend' in rule_config['set']: tmp += 'as-path prepend ' + rule_config['set']['as-path-prepend'] elif 'as-path-prepend-last-as' in rule_config['set']: tmp += 'as-path prepend last-as' + rule_config['set']['as-path-prepend-last-as'] elif 'atomic-aggregate' in rule_config['set']: tmp += 'atomic-aggregate' elif 'distance' in rule_config['set']: tmp += 'distance ' + rule_config['set']['distance'] elif 'ip-next-hop' in rule_config['set']: tmp += 'ip next-hop ' + rule_config['set']['ip-next-hop'] elif 'ipv6-next-hop-global' in rule_config['set']: tmp += 'ipv6 next-hop global ' + rule_config['set']['ipv6-next-hop-global'] elif 'ipv6-next-hop-local' in rule_config['set']: tmp += 'ipv6 next-hop local ' + rule_config['set']['ipv6-next-hop-local'] elif 'l3vpn' in rule_config['set']: tmp += 'l3vpn next-hop encapsulation gre' elif 'local-preference' in rule_config['set']: tmp += 'local-preference ' + rule_config['set']['local-preference'] elif 'metric' in rule_config['set']: tmp += 'metric ' + rule_config['set']['metric'] elif 'metric-type' in rule_config['set']: tmp += 'metric-type ' + rule_config['set']['metric-type'] elif 'origin' in rule_config['set']: tmp += 'origin ' + rule_config['set']['origin'] elif 'originator-id' in rule_config['set']: tmp += 'originator-id ' + rule_config['set']['originator-id'] elif 'src' in rule_config['set']: tmp += 'src ' + rule_config['set']['src'] elif 'tag' in rule_config['set']: tmp += 'tag ' + rule_config['set']['tag'] elif 'weight' in rule_config['set']: tmp += 'weight ' + rule_config['set']['weight'] elif 'vpn-gateway-ipv4' in rule_config['set']: tmp += 'evpn gateway ipv4 ' + rule_config['set']['vpn-gateway-ipv4'] elif 'vpn-gateway-ipv6' in rule_config['set']: tmp += 'evpn gateway ipv6 ' + rule_config['set']['vpn-gateway-ipv6'] self.assertIn(tmp, config) # Test set table for some sources def test_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.1', '203.0.113.2'] rule = '50' table = '23' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 50: from 203.0.113.1 lookup 23 50: from 203.0.113.2 lookup 23 """ tmp = cmd('ip rule show prio 50') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for fwmark def test_fwmark_table_id(self): path = base_path + ['local-route'] fwmk = '24' rule = '101' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 101: from all fwmark 0x18 lookup 154 """ tmp = cmd('ip rule show prio 101') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination def test_destination_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.1' rule = '102' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_commit() original = """ 102: from all to 203.0.113.1 lookup 154 """ tmp = cmd('ip rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination and protocol def test_protocol_destination_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.12' rule = '85' table = '104' proto = 'tcp' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'protocol', proto]) self.cli_commit() original = """ 85: from all to 203.0.113.12 ipproto tcp lookup 104 """ tmp = cmd('ip rule show prio 85') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination, source, protocol, fwmark and port def test_protocol_port_address_fwmark_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.5' src_list = ['203.0.113.1', '203.0.113.2'] rule = '23' fwmark = '123456' table = '123' new_table = '111' proto = 'udp' new_proto = 'tcp' src_port = '5555' dst_port = '8888' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'source', 'port', src_port]) self.cli_set(path + ['rule', rule, 'protocol', proto]) self.cli_set(path + ['rule', rule, 'fwmark', fwmark]) self.cli_set(path + ['rule', rule, 'destination', 'port', dst_port]) for src in src_list: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 23: from 203.0.113.1 to 203.0.113.5 fwmark 0x1e240 ipproto udp sport 5555 dport 8888 lookup 123 23: from 203.0.113.2 to 203.0.113.5 fwmark 0x1e240 ipproto udp sport 5555 dport 8888 lookup 123 """ tmp = cmd(f'ip rule show prio {rule}') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Change table and protocol, delete fwmark and source port self.cli_delete(path + ['rule', rule, 'fwmark']) self.cli_delete(path + ['rule', rule, 'source', 'port']) self.cli_set(path + ['rule', rule, 'set', 'table', new_table]) self.cli_set(path + ['rule', rule, 'protocol', new_proto]) self.cli_commit() original = """ 23: from 203.0.113.1 to 203.0.113.5 ipproto tcp dport 8888 lookup 111 23: from 203.0.113.2 to 203.0.113.5 ipproto tcp dport 8888 lookup 111 """ tmp = cmd(f'ip rule show prio {rule}') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with fwmark def test_fwmark_sources_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] fwmk = '23' rule = '100' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 100: from 203.0.113.11 fwmark 0x17 lookup 150 100: from 203.0.113.12 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with iif def test_iif_sources_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] iif = 'lo' rule = '100' table = '150' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'inbound-interface', iif]) for src in sources: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() # Check generated configuration # Expected values original = """ 100: from 203.0.113.11 iif lo lookup 150 100: from 203.0.113.12 iif lo lookup 150 """ tmp = cmd('ip rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources and destinations with fwmark def test_fwmark_sources_destination_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] destinations = ['203.0.113.13', '203.0.113.15'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 203.0.113.11 to 203.0.113.13 fwmark 0x17 lookup 150 103: from 203.0.113.11 to 203.0.113.15 fwmark 0x17 lookup 150 103: from 203.0.113.12 to 203.0.113.13 fwmark 0x17 lookup 150 103: from 203.0.113.12 to 203.0.113.15 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table ipv6 for some sources ipv6 def test_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:123::/48', '2001:db8:126::/48'] rule = '50' table = '23' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 50: from 2001:db8:123::/48 lookup 23 50: from 2001:db8:126::/48 lookup 23 """ tmp = cmd('ip -6 rule show prio 50') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for fwmark ipv6 def test_fwmark_ipv6_table_id(self): path = base_path + ['local-route6'] fwmk = '24' rule = '100' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 100: from all fwmark 0x18 lookup 154 """ tmp = cmd('ip -6 rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination ipv6 def test_destination_ipv6_table_id(self): path = base_path + ['local-route6'] dst = '2001:db8:1337::/126' rule = '101' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_commit() original = """ 101: from all to 2001:db8:1337::/126 lookup 154 """ tmp = cmd('ip -6 rule show prio 101') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with fwmark ipv6 def test_fwmark_sources_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/126'] fwmk = '23' rule = '102' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 102: from 2001:db8:1338::/126 fwmark 0x17 lookup 150 102: from 2001:db8:1339::/126 fwmark 0x17 lookup 150 """ tmp = cmd('ip -6 rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with iif ipv6 def test_iif_sources_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/126'] iif = 'lo' rule = '102' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'inbound-interface', iif]) self.cli_commit() # Check generated configuration # Expected values original = """ 102: from 2001:db8:1338::/126 iif lo lookup 150 102: from 2001:db8:1339::/126 iif lo lookup 150 """ tmp = cmd('ip -6 rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources and destinations with fwmark ipv6 def test_fwmark_sources_destination_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/56'] destinations = ['2001:db8:13::/48', '2001:db8:16::/48'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 2001:db8:1338::/126 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1338::/126 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:16::/48 fwmark 0x17 lookup 150 """ tmp = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test delete table for sources and destination with fwmark ipv4/ipv6 def test_delete_ipv4_ipv6_table_id(self): path = base_path + ['local-route'] path_v6 = base_path + ['local-route6'] sources = ['203.0.113.0/24', '203.0.114.5'] destinations = ['203.0.112.0/24', '203.0.116.5'] sources_v6 = ['2001:db8:1338::/126', '2001:db8:1339::/56'] destinations_v6 = ['2001:db8:13::/48', '2001:db8:16::/48'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) for src in sources_v6: for dst in destinations_v6: self.cli_set(path_v6 + ['rule', rule, 'set', 'table', table]) self.cli_set(path_v6 + ['rule', rule, 'source', 'address', src]) self.cli_set(path_v6 + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path_v6 + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 203.0.113.0/24 to 203.0.116.5 fwmark 0x17 lookup 150 103: from 203.0.114.5 to 203.0.112.0/24 fwmark 0x17 lookup 150 103: from 203.0.114.5 to 203.0.116.5 fwmark 0x17 lookup 150 103: from 203.0.113.0/24 to 203.0.112.0/24 fwmark 0x17 lookup 150 """ original_v6 = """ 103: from 2001:db8:1338::/126 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1338::/126 to 2001:db8:13::/48 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 103') tmp_v6 = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) self.assertEqual(sort_ip(tmp_v6), sort_ip(original_v6)) self.cli_delete(path) self.cli_delete(path_v6) self.cli_commit() tmp = cmd('ip rule show prio 103') tmp_v6 = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), []) self.assertEqual(sort_ip(tmp_v6), []) # Test multiple commits ipv4 def test_multiple_commit_ipv4_table_id(self): path = base_path + ['local-route'] sources = ['192.0.2.1', '192.0.2.2'] destination = '203.0.113.25' rule = '105' table = '151' self.cli_set(path + ['rule', rule, 'set', 'table', table]) for src in sources: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original_first = """ 105: from 192.0.2.1 lookup 151 105: from 192.0.2.2 lookup 151 """ tmp = cmd('ip rule show prio 105') self.assertEqual(sort_ip(tmp), sort_ip(original_first)) # Create second commit with added destination self.cli_set(path + ['rule', rule, 'destination', 'address', destination]) self.cli_commit() original_second = """ 105: from 192.0.2.1 to 203.0.113.25 lookup 151 105: from 192.0.2.2 to 203.0.113.25 lookup 151 """ tmp = cmd('ip rule show prio 105') self.assertEqual(sort_ip(tmp), sort_ip(original_second)) def test_frr_individual_remove_T6283_T6250(self): path = base_path + ['route-map'] route_maps = ['RMAP-1', 'RMAP_2'] seq = '10' base_local_preference = 300 base_table = 50 # T6250 local_preference = base_local_preference table = base_table for route_map in route_maps: self.cli_set(path + [route_map, 'rule', seq, 'action', 'permit']) self.cli_set(path + [route_map, 'rule', seq, 'set', 'table', str(table)]) self.cli_set(path + [route_map, 'rule', seq, 'set', 'local-preference', str(local_preference)]) local_preference += 20 table += 5 self.cli_commit() local_preference = base_local_preference table = base_table for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set local-preference {local_preference}', config) self.assertIn(f' set table {table}', config) local_preference += 20 table += 5 for route_map in route_maps: self.cli_delete(path + [route_map, 'rule', '10', 'set', 'table']) # we explicitly commit multiple times to be as vandal as possible to the system self.cli_commit() local_preference = base_local_preference for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set local-preference {local_preference}', config) local_preference += 20 # T6283 seq = '20' prepend = '100 100 100' for route_map in route_maps: self.cli_set(path + [route_map, 'rule', seq, 'action', 'permit']) self.cli_set(path + [route_map, 'rule', seq, 'set', 'as-path', 'prepend', prepend]) self.cli_commit() for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set as-path prepend {prepend}', config) for route_map in route_maps: self.cli_delete(path + [route_map, 'rule', seq, 'set']) # we explicitly commit multiple times to be as vandal as possible to the system self.cli_commit() for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertNotIn(f' set', config) def sort_ip(output): o = '\n'.join([' '.join(line.strip().split()) for line in output.strip().splitlines()]) o = o.splitlines() o.sort() return o if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_babel.py b/smoketest/scripts/cli/test_protocols_babel.py index 606c1efd3..107e262cc 100755 --- a/smoketest/scripts/cli/test_protocols_babel.py +++ b/smoketest/scripts/cli/test_protocols_babel.py @@ -1,218 +1,218 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.ifconfig import Section +from vyos.frrender import babel_daemon from vyos.utils.process import process_named_running from vyos.xml_ref import default_value -PROCESS_NAME = 'babeld' base_path = ['protocols', 'babel'] class TestProtocolsBABEL(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): cls._interfaces = Section.interfaces('ethernet', vlan=False) # call base-classes classmethod super(TestProtocolsBABEL, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(babel_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_delete(cls, ['policy', 'prefix-list']) cls.cli_delete(cls, ['policy', 'prefix-list6']) def tearDown(self): # always destroy the entire babel configuration to make the processes # life as hard as possible self.cli_delete(base_path) self.cli_delete(['policy', 'prefix-list']) self.cli_delete(['policy', 'prefix-list6']) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(babel_daemon)) def test_babel_interfaces(self): def_update_interval = default_value(base_path + ['interface', 'eth0', 'update-interval']) channel = '20' hello_interval = '1000' max_rtt_penalty = '100' rtt_decay = '23' rtt_max = '119' rtt_min = '11' rxcost = '40000' type = 'wired' for interface in self._interfaces: self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['interface', interface, 'channel', channel]) self.cli_set(base_path + ['interface', interface, 'enable-timestamps']) self.cli_set(base_path + ['interface', interface, 'hello-interval', hello_interval]) self.cli_set(base_path + ['interface', interface, 'max-rtt-penalty', max_rtt_penalty]) self.cli_set(base_path + ['interface', interface, 'rtt-decay', rtt_decay]) self.cli_set(base_path + ['interface', interface, 'rtt-max', rtt_max]) self.cli_set(base_path + ['interface', interface, 'rtt-min', rtt_min]) self.cli_set(base_path + ['interface', interface, 'enable-timestamps']) self.cli_set(base_path + ['interface', interface, 'rxcost', rxcost]) self.cli_set(base_path + ['interface', interface, 'split-horizon', 'disable']) self.cli_set(base_path + ['interface', interface, 'type', type]) self.cli_commit() - frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router babel', endsection='^exit', daemon=babel_daemon) for interface in self._interfaces: self.assertIn(f' network {interface}', frrconfig) - iface_config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + iface_config = self.getFRRconfig(f'interface {interface}', endsection='^exit', daemon=babel_daemon) self.assertIn(f' babel channel {channel}', iface_config) self.assertIn(f' babel enable-timestamps', iface_config) self.assertIn(f' babel update-interval {def_update_interval}', iface_config) self.assertIn(f' babel hello-interval {hello_interval}', iface_config) self.assertIn(f' babel rtt-decay {rtt_decay}', iface_config) self.assertIn(f' babel rtt-max {rtt_max}', iface_config) self.assertIn(f' babel rtt-min {rtt_min}', iface_config) self.assertIn(f' babel rxcost {rxcost}', iface_config) self.assertIn(f' babel max-rtt-penalty {max_rtt_penalty}', iface_config) self.assertIn(f' no babel split-horizon', iface_config) self.assertIn(f' babel {type}', iface_config) def test_babel_redistribute(self): ipv4_protos = ['bgp', 'connected', 'isis', 'kernel', 'ospf', 'rip', 'static'] ipv6_protos = ['bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static'] for protocol in ipv4_protos: self.cli_set(base_path + ['redistribute', 'ipv4', protocol]) for protocol in ipv6_protos: self.cli_set(base_path + ['redistribute', 'ipv6', protocol]) self.cli_commit() - frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router babel', endsection='^exit', daemon=babel_daemon, empty_retry=5) for protocol in ipv4_protos: self.assertIn(f' redistribute ipv4 {protocol}', frrconfig) for protocol in ipv6_protos: if protocol == 'ospfv3': protocol = 'ospf6' self.assertIn(f' redistribute ipv6 {protocol}', frrconfig) def test_babel_basic(self): diversity_factor = '64' resend_delay = '100' smoothing_half_life = '400' self.cli_set(base_path + ['parameters', 'diversity']) self.cli_set(base_path + ['parameters', 'diversity-factor', diversity_factor]) self.cli_set(base_path + ['parameters', 'resend-delay', resend_delay]) self.cli_set(base_path + ['parameters', 'smoothing-half-life', smoothing_half_life]) self.cli_commit() - frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router babel', endsection='^exit', daemon=babel_daemon) self.assertIn(f' babel diversity', frrconfig) self.assertIn(f' babel diversity-factor {diversity_factor}', frrconfig) self.assertIn(f' babel resend-delay {resend_delay}', frrconfig) self.assertIn(f' babel smoothing-half-life {smoothing_half_life}', frrconfig) def test_babel_distribute_list(self): access_list_in4 = '40' access_list_out4 = '50' access_list_in4_iface = '44' access_list_out4_iface = '55' access_list_in6 = 'AL-foo-in6' access_list_out6 = 'AL-foo-out6' prefix_list_in4 = 'PL-foo-in4' prefix_list_out4 = 'PL-foo-out4' prefix_list_in6 = 'PL-foo-in6' prefix_list_out6 = 'PL-foo-out6' self.cli_set(['policy', 'access-list', access_list_in4]) self.cli_set(['policy', 'access-list', access_list_out4]) self.cli_set(['policy', 'access-list6', access_list_in6]) self.cli_set(['policy', 'access-list6', access_list_out6]) self.cli_set(['policy', 'access-list', f'{access_list_in4_iface}']) self.cli_set(['policy', 'access-list', f'{access_list_out4_iface}']) self.cli_set(['policy', 'prefix-list', prefix_list_in4]) self.cli_set(['policy', 'prefix-list', prefix_list_out4]) self.cli_set(['policy', 'prefix-list6', prefix_list_in6]) self.cli_set(['policy', 'prefix-list6', prefix_list_out6]) self.cli_set(base_path + ['distribute-list', 'ipv4', 'access-list', 'in', access_list_in4]) self.cli_set(base_path + ['distribute-list', 'ipv4', 'access-list', 'out', access_list_out4]) self.cli_set(base_path + ['distribute-list', 'ipv6', 'access-list', 'in', access_list_in6]) self.cli_set(base_path + ['distribute-list', 'ipv6', 'access-list', 'out', access_list_out6]) self.cli_set(base_path + ['distribute-list', 'ipv4', 'prefix-list', 'in', prefix_list_in4]) self.cli_set(base_path + ['distribute-list', 'ipv4', 'prefix-list', 'out', prefix_list_out4]) self.cli_set(base_path + ['distribute-list', 'ipv6', 'prefix-list', 'in', prefix_list_in6]) self.cli_set(base_path + ['distribute-list', 'ipv6', 'prefix-list', 'out', prefix_list_out6]) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface]) self.cli_set(['policy', 'access-list6', f'{access_list_in6}-{interface}']) self.cli_set(['policy', 'access-list6', f'{access_list_out6}-{interface}']) self.cli_set(['policy', 'prefix-list', f'{prefix_list_in4}-{interface}']) self.cli_set(['policy', 'prefix-list', f'{prefix_list_out4}-{interface}']) self.cli_set(['policy', 'prefix-list6', f'{prefix_list_in6}-{interface}']) self.cli_set(['policy', 'prefix-list6', f'{prefix_list_out6}-{interface}']) tmp_path = base_path + ['distribute-list', 'ipv4', 'interface', interface] self.cli_set(tmp_path + ['access-list', 'in', f'{access_list_in4_iface}']) self.cli_set(tmp_path + ['access-list', 'out', f'{access_list_out4_iface}']) self.cli_set(tmp_path + ['prefix-list', 'in', f'{prefix_list_in4}-{interface}']) self.cli_set(tmp_path + ['prefix-list', 'out', f'{prefix_list_out4}-{interface}']) tmp_path = base_path + ['distribute-list', 'ipv6', 'interface', interface] self.cli_set(tmp_path + ['access-list', 'in', f'{access_list_in6}-{interface}']) self.cli_set(tmp_path + ['access-list', 'out', f'{access_list_out6}-{interface}']) self.cli_set(tmp_path + ['prefix-list', 'in', f'{prefix_list_in6}-{interface}']) self.cli_set(tmp_path + ['prefix-list', 'out', f'{prefix_list_out6}-{interface}']) self.cli_commit() - frrconfig = self.getFRRconfig('router babel', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router babel', endsection='^exit', daemon=babel_daemon) self.assertIn(f' distribute-list {access_list_in4} in', frrconfig) self.assertIn(f' distribute-list {access_list_out4} out', frrconfig) self.assertIn(f' ipv6 distribute-list {access_list_in6} in', frrconfig) self.assertIn(f' ipv6 distribute-list {access_list_out6} out', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_in4} in', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_out4} out', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in6} in', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out6} out', frrconfig) for interface in self._interfaces: self.assertIn(f' distribute-list {access_list_in4_iface} in {interface}', frrconfig) self.assertIn(f' distribute-list {access_list_out4_iface} out {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list {access_list_in6}-{interface} in {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list {access_list_out6}-{interface} out {interface}', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_in4}-{interface} in {interface}', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_out4}-{interface} out {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in6}-{interface} in {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out6}-{interface} out {interface}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_bfd.py b/smoketest/scripts/cli/test_protocols_bfd.py index 716d0a806..32e39e8f7 100755 --- a/smoketest/scripts/cli/test_protocols_bfd.py +++ b/smoketest/scripts/cli/test_protocols_bfd.py @@ -1,236 +1,238 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError +from vyos.frrender import bfd_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'bfdd' base_path = ['protocols', 'bfd'] +frr_endsection = '^ exit' dum_if = 'dum1001' vrf_name = 'red' peers = { '192.0.2.10' : { 'intv_rx' : '500', 'intv_tx' : '600', 'multihop' : '', 'source_addr': '192.0.2.254', 'profile' : 'foo-bar-baz', 'minimum_ttl': '20', }, '192.0.2.20' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '100', 'intv_rx' : '222', 'intv_tx' : '333', 'passive' : '', 'shutdown' : '', 'profile' : 'foo', 'source_intf': dum_if, }, '2001:db8::1000:1' : { 'source_addr': '2001:db8::1', 'vrf' : vrf_name, }, '2001:db8::2000:1' : { 'source_addr': '2001:db8::1', 'multihop' : '', 'profile' : 'baz_foo', }, } profiles = { 'foo' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '101', 'intv_rx' : '222', 'intv_tx' : '333', 'shutdown' : '', 'minimum_ttl': '40', }, 'foo-bar-baz' : { 'intv_mult' : '4', 'intv_rx' : '400', 'intv_tx' : '400', }, 'baz_foo' : { 'intv_mult' : '102', 'intv_rx' : '444', 'passive' : '', }, } class TestProtocolsBFD(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsBFD, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(bfd_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(bfd_daemon)) def test_bfd_peer(self): self.cli_set(['vrf', 'name', vrf_name, 'table', '1000']) for peer, peer_config in peers.items(): if 'echo_mode' in peer_config: self.cli_set(base_path + ['peer', peer, 'echo-mode']) if 'intv_echo' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'echo-interval', peer_config["intv_echo"]]) if 'intv_mult' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'multiplier', peer_config["intv_mult"]]) if 'intv_rx' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'receive', peer_config["intv_rx"]]) if 'intv_tx' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'transmit', peer_config["intv_tx"]]) if 'minimum_ttl' in peer_config: self.cli_set(base_path + ['peer', peer, 'minimum-ttl', peer_config["minimum_ttl"]]) if 'multihop' in peer_config: self.cli_set(base_path + ['peer', peer, 'multihop']) if 'passive' in peer_config: self.cli_set(base_path + ['peer', peer, 'passive']) if 'shutdown' in peer_config: self.cli_set(base_path + ['peer', peer, 'shutdown']) if 'source_addr' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'address', peer_config["source_addr"]]) if 'source_intf' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'interface', peer_config["source_intf"]]) if 'vrf' in peer_config: self.cli_set(base_path + ['peer', peer, 'vrf', peer_config["vrf"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig('bfd', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('bfd', endsection='^exit', daemon=bfd_daemon) for peer, peer_config in peers.items(): tmp = f'peer {peer}' if 'multihop' in peer_config: tmp += f' multihop' if 'source_addr' in peer_config: tmp += f' local-address {peer_config["source_addr"]}' if 'source_intf' in peer_config: tmp += f' interface {peer_config["source_intf"]}' if 'vrf' in peer_config: tmp += f' vrf {peer_config["vrf"]}' self.assertIn(tmp, frrconfig) - peerconfig = self.getFRRconfig(f' peer {peer}', end='', daemon=PROCESS_NAME) - + peerconfig = self.getFRRconfig(f' peer {peer}', end='', endsection=frr_endsection, + daemon=bfd_daemon) if 'echo_mode' in peer_config: self.assertIn(f'echo-mode', peerconfig) if 'intv_echo' in peer_config: self.assertIn(f'echo receive-interval {peer_config["intv_echo"]}', peerconfig) self.assertIn(f'echo transmit-interval {peer_config["intv_echo"]}', peerconfig) if 'intv_mult' in peer_config: self.assertIn(f'detect-multiplier {peer_config["intv_mult"]}', peerconfig) if 'intv_rx' in peer_config: self.assertIn(f'receive-interval {peer_config["intv_rx"]}', peerconfig) if 'intv_tx' in peer_config: self.assertIn(f'transmit-interval {peer_config["intv_tx"]}', peerconfig) if 'minimum_ttl' in peer_config: self.assertIn(f'minimum-ttl {peer_config["minimum_ttl"]}', peerconfig) if 'passive' in peer_config: self.assertIn(f'passive-mode', peerconfig) if 'shutdown' in peer_config: self.assertIn(f'shutdown', peerconfig) else: self.assertNotIn(f'shutdown', peerconfig) self.cli_delete(['vrf', 'name', vrf_name]) def test_bfd_profile(self): for profile, profile_config in profiles.items(): if 'echo_mode' in profile_config: self.cli_set(base_path + ['profile', profile, 'echo-mode']) if 'intv_echo' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'echo-interval', profile_config["intv_echo"]]) if 'intv_mult' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'multiplier', profile_config["intv_mult"]]) if 'intv_rx' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'receive', profile_config["intv_rx"]]) if 'intv_tx' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'transmit', profile_config["intv_tx"]]) if 'minimum_ttl' in profile_config: self.cli_set(base_path + ['profile', profile, 'minimum-ttl', profile_config["minimum_ttl"]]) if 'passive' in profile_config: self.cli_set(base_path + ['profile', profile, 'passive']) if 'shutdown' in profile_config: self.cli_set(base_path + ['profile', profile, 'shutdown']) for peer, peer_config in peers.items(): if 'profile' in peer_config: self.cli_set(base_path + ['peer', peer, 'profile', peer_config["profile"] + 'wrong']) if 'source_addr' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'address', peer_config["source_addr"]]) if 'source_intf' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'interface', peer_config["source_intf"]]) # BFD profile does not exist! with self.assertRaises(ConfigSessionError): self.cli_commit() for peer, peer_config in peers.items(): if 'profile' in peer_config: self.cli_set(base_path + ['peer', peer, 'profile', peer_config["profile"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration for profile, profile_config in profiles.items(): - config = self.getFRRconfig(f' profile {profile}', endsection='^ !') + config = self.getFRRconfig(f' profile {profile}', endsection=frr_endsection) if 'echo_mode' in profile_config: self.assertIn(f' echo-mode', config) if 'intv_echo' in profile_config: self.assertIn(f' echo receive-interval {profile_config["intv_echo"]}', config) self.assertIn(f' echo transmit-interval {profile_config["intv_echo"]}', config) if 'intv_mult' in profile_config: self.assertIn(f' detect-multiplier {profile_config["intv_mult"]}', config) if 'intv_rx' in profile_config: self.assertIn(f' receive-interval {profile_config["intv_rx"]}', config) if 'intv_tx' in profile_config: self.assertIn(f' transmit-interval {profile_config["intv_tx"]}', config) if 'minimum_ttl' in profile_config: self.assertIn(f' minimum-ttl {profile_config["minimum_ttl"]}', config) if 'passive' in profile_config: self.assertIn(f' passive-mode', config) if 'shutdown' in profile_config: self.assertIn(f' shutdown', config) else: self.assertNotIn(f'shutdown', config) for peer, peer_config in peers.items(): - peerconfig = self.getFRRconfig(f' peer {peer}', end='', daemon=PROCESS_NAME) + peerconfig = self.getFRRconfig(f' peer {peer}', end='', + endsection=frr_endsection, daemon=bfd_daemon) if 'profile' in peer_config: self.assertIn(f' profile {peer_config["profile"]}', peerconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_bgp.py b/smoketest/scripts/cli/test_protocols_bgp.py index ea2f561a4..cdf18a051 100755 --- a/smoketest/scripts/cli/test_protocols_bgp.py +++ b/smoketest/scripts/cli/test_protocols_bgp.py @@ -1,1411 +1,1417 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2023 VyOS maintainers and contributors +# Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from time import sleep from base_vyostest_shim import VyOSUnitTestSHIM from vyos.ifconfig import Section from vyos.configsession import ConfigSessionError from vyos.template import is_ipv6 from vyos.utils.process import process_named_running from vyos.utils.process import cmd +from vyos.frrender import bgp_daemon -PROCESS_NAME = 'bgpd' ASN = '64512' base_path = ['protocols', 'bgp'] route_map_in = 'foo-map-in' route_map_out = 'foo-map-out' prefix_list_in = 'pfx-foo-in' prefix_list_out = 'pfx-foo-out' prefix_list_in6 = 'pfx-foo-in6' prefix_list_out6 = 'pfx-foo-out6' bfd_profile = 'foo-bar-baz' import_afi = 'ipv4-unicast' import_vrf = 'red' import_rd = ASN + ':100' import_vrf_base = ['vrf', 'name'] neighbor_config = { '192.0.2.1' : { 'bfd' : '', 'cap_dynamic' : '', 'cap_ext_next' : '', 'cap_ext_sver' : '', 'remote_as' : '100', 'adv_interv' : '400', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'system_as' : '300', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'no_send_comm_ext' : '', 'addpath_all' : '', 'p_attr_discard' : ['10', '20', '30', '40', '50'], }, '192.0.2.2' : { 'bfd_profile' : bfd_profile, 'remote_as' : '200', 'shutdown' : '', 'no_cap_nego' : '', 'port' : '667', 'cap_strict' : '', 'advertise_map' : route_map_in, 'non_exist_map' : route_map_out, 'pfx_list_in' : prefix_list_in, 'pfx_list_out' : prefix_list_out, 'no_send_comm_std' : '', 'local_role' : 'rs-client', 'p_attr_taw' : '200', }, '192.0.2.3' : { 'advertise_map' : route_map_in, 'description' : 'foo bar baz', 'remote_as' : '200', 'passive' : '', 'multi_hop' : '5', 'update_src' : 'lo', 'peer_group' : 'foo', 'graceful_rst' : '', }, '2001:db8::1' : { 'advertise_map' : route_map_in, 'exist_map' : route_map_out, 'cap_dynamic' : '', 'cap_ext_next' : '', 'cap_ext_sver' : '', 'remote_as' : '123', 'adv_interv' : '400', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'system_as' : '300', 'solo' : '', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'no_send_comm_std' : '', 'addpath_per_as' : '', 'peer_group' : 'foo-bar', 'local_role' : 'customer', 'local_role_strict': '', }, '2001:db8::2' : { 'remote_as' : '456', 'shutdown' : '', 'no_cap_nego' : '', 'port' : '667', 'cap_strict' : '', 'pfx_list_in' : prefix_list_in6, 'pfx_list_out' : prefix_list_out6, 'no_send_comm_ext' : '', 'peer_group' : 'foo-bar_baz', 'graceful_rst_hlp' : '', 'disable_conn_chk' : '', }, } peer_group_config = { 'foo' : { 'advertise_map' : route_map_in, 'exist_map' : route_map_out, 'bfd' : '', 'remote_as' : '100', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'disable_conn_chk' : '', 'p_attr_discard' : ['100', '150', '200'], }, 'bar' : { 'remote_as' : '111', 'graceful_rst_no' : '', 'port' : '667', 'p_attr_taw' : '126', }, 'foo-bar' : { 'advertise_map' : route_map_in, 'description' : 'foo peer bar group', 'remote_as' : '200', 'shutdown' : '', 'no_cap_nego' : '', 'system_as' : '300', 'pfx_list_in' : prefix_list_in, 'pfx_list_out' : prefix_list_out, 'no_send_comm_ext' : '', }, 'foo-bar_baz' : { 'advertise_map' : route_map_in, 'non_exist_map' : route_map_out, 'bfd_profile' : bfd_profile, 'cap_dynamic' : '', 'cap_ext_next' : '', 'remote_as' : '200', 'passive' : '', 'multi_hop' : '5', 'update_src' : 'lo', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'local_role' : 'peer', 'local_role_strict': '', }, } class TestProtocolsBGP(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsBGP, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(bgp_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_delete(cls, ['policy', 'route-map']) cls.cli_delete(cls, ['policy', 'prefix-list']) cls.cli_delete(cls, ['policy', 'prefix-list6']) cls.cli_delete(cls, ['vrf']) cls.cli_set(cls, ['policy', 'route-map', route_map_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'route-map', route_map_out, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '10', 'prefix', '192.0.2.0/25']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '10', 'prefix', '192.0.2.128/25']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in6, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in6, 'rule', '10', 'prefix', '2001:db8:1000::/64']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out6, 'rule', '10', 'action', 'deny']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out6, 'rule', '10', 'prefix', '2001:db8:2000::/64']) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['policy', 'route-map']) cls.cli_delete(cls, ['policy', 'prefix-list']) cls.cli_delete(cls, ['policy', 'prefix-list6']) def setUp(self): self.cli_set(base_path + ['system-as', ASN]) def tearDown(self): # cleanup any possible VRF mess self.cli_delete(['vrf']) # always destrox the entire bgpd configuration to make the processes # life as hard as possible self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('router bgp', endsection='^exit', daemon=bgp_daemon) + self.assertNotIn(f'router bgp', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(bgp_daemon)) def create_bgp_instances_for_import_test(self): table = '1000' self.cli_set(import_vrf_base + [import_vrf, 'table', table]) self.cli_set(import_vrf_base + [import_vrf, 'protocols', 'bgp', 'system-as', ASN]) def verify_frr_config(self, peer, peer_config, frrconfig): # recurring patterns to verify for both a simple neighbor and a peer-group if 'bfd' in peer_config: self.assertIn(f' neighbor {peer} bfd', frrconfig) if 'bfd_profile' in peer_config: self.assertIn(f' neighbor {peer} bfd profile {peer_config["bfd_profile"]}', frrconfig) self.assertIn(f' neighbor {peer} bfd check-control-plane-failure', frrconfig) if 'cap_dynamic' in peer_config: self.assertIn(f' neighbor {peer} capability dynamic', frrconfig) if 'cap_ext_next' in peer_config: self.assertIn(f' neighbor {peer} capability extended-nexthop', frrconfig) if 'cap_ext_sver' in peer_config: self.assertIn(f' neighbor {peer} capability software-version', frrconfig) if 'description' in peer_config: self.assertIn(f' neighbor {peer} description {peer_config["description"]}', frrconfig) if 'no_cap_nego' in peer_config: self.assertIn(f' neighbor {peer} dont-capability-negotiate', frrconfig) if 'multi_hop' in peer_config: self.assertIn(f' neighbor {peer} ebgp-multihop {peer_config["multi_hop"]}', frrconfig) if 'local_as' in peer_config: self.assertIn(f' neighbor {peer} local-as {peer_config["local_as"]} no-prepend replace-as', frrconfig) if 'local_role' in peer_config: tmp = f' neighbor {peer} local-role {peer_config["local_role"]}' if 'local_role_strict' in peer_config: tmp += ' strict' self.assertIn(tmp, frrconfig) if 'cap_over' in peer_config: self.assertIn(f' neighbor {peer} override-capability', frrconfig) if 'passive' in peer_config: self.assertIn(f' neighbor {peer} passive', frrconfig) if 'password' in peer_config: self.assertIn(f' neighbor {peer} password {peer_config["password"]}', frrconfig) if 'port' in peer_config: self.assertIn(f' neighbor {peer} port {peer_config["port"]}', frrconfig) if 'remote_as' in peer_config: self.assertIn(f' neighbor {peer} remote-as {peer_config["remote_as"]}', frrconfig) if 'solo' in peer_config: self.assertIn(f' neighbor {peer} solo', frrconfig) if 'shutdown' in peer_config: self.assertIn(f' neighbor {peer} shutdown', frrconfig) if 'ttl_security' in peer_config: self.assertIn(f' neighbor {peer} ttl-security hops {peer_config["ttl_security"]}', frrconfig) if 'update_src' in peer_config: self.assertIn(f' neighbor {peer} update-source {peer_config["update_src"]}', frrconfig) if 'route_map_in' in peer_config: self.assertIn(f' neighbor {peer} route-map {peer_config["route_map_in"]} in', frrconfig) if 'route_map_out' in peer_config: self.assertIn(f' neighbor {peer} route-map {peer_config["route_map_out"]} out', frrconfig) if 'pfx_list_in' in peer_config: self.assertIn(f' neighbor {peer} prefix-list {peer_config["pfx_list_in"]} in', frrconfig) if 'pfx_list_out' in peer_config: self.assertIn(f' neighbor {peer} prefix-list {peer_config["pfx_list_out"]} out', frrconfig) if 'no_send_comm_std' in peer_config: self.assertIn(f' no neighbor {peer} send-community', frrconfig) if 'no_send_comm_ext' in peer_config: self.assertIn(f' no neighbor {peer} send-community extended', frrconfig) if 'addpath_all' in peer_config: self.assertIn(f' neighbor {peer} addpath-tx-all-paths', frrconfig) if 'p_attr_discard' in peer_config: tmp = ' '.join(peer_config["p_attr_discard"]) self.assertIn(f' neighbor {peer} path-attribute discard {tmp}', frrconfig) if 'p_attr_taw' in peer_config: self.assertIn(f' neighbor {peer} path-attribute treat-as-withdraw {peer_config["p_attr_taw"]}', frrconfig) if 'addpath_per_as' in peer_config: self.assertIn(f' neighbor {peer} addpath-tx-bestpath-per-AS', frrconfig) if 'advertise_map' in peer_config: base = f' neighbor {peer} advertise-map {peer_config["advertise_map"]}' if 'exist_map' in peer_config: base = f'{base} exist-map {peer_config["exist_map"]}' if 'non_exist_map' in peer_config: base = f'{base} non-exist-map {peer_config["non_exist_map"]}' self.assertIn(base, frrconfig) if 'graceful_rst' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart', frrconfig) if 'graceful_rst_no' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart-disable', frrconfig) if 'graceful_rst_hlp' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart-helper', frrconfig) if 'disable_conn_chk' in peer_config: self.assertIn(f' neighbor {peer} disable-connected-check', frrconfig) def test_bgp_01_simple(self): router_id = '127.0.0.1' local_pref = '500' stalepath_time = '60' max_path_v4 = '2' max_path_v4ibgp = '4' max_path_v6 = '8' max_path_v6ibgp = '16' cond_adv_timer = '30' min_hold_time = '2' tcp_keepalive_idle = '66' tcp_keepalive_interval = '77' tcp_keepalive_probes = '22' self.cli_set(base_path + ['parameters', 'allow-martian-nexthop']) self.cli_set(base_path + ['parameters', 'disable-ebgp-connected-route-check']) self.cli_set(base_path + ['parameters', 'no-hard-administrative-reset']) self.cli_set(base_path + ['parameters', 'log-neighbor-changes']) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'explicit-null']) self.cli_set(base_path + ['parameters', 'router-id', router_id]) # System AS number MUST be defined - as this is set in setUp() we remove # this once for testing of the proper error self.cli_delete(base_path + ['system-as']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['system-as', ASN]) # Default local preference (higher = more preferred, default value is 100) self.cli_set(base_path + ['parameters', 'default', 'local-pref', local_pref]) self.cli_set(base_path + ['parameters', 'graceful-restart', 'stalepath-time', stalepath_time]) self.cli_set(base_path + ['parameters', 'graceful-shutdown']) self.cli_set(base_path + ['parameters', 'ebgp-requires-policy']) self.cli_set(base_path + ['parameters', 'bestpath', 'as-path', 'multipath-relax']) self.cli_set(base_path + ['parameters', 'bestpath', 'bandwidth', 'default-weight-for-missing']) self.cli_set(base_path + ['parameters', 'bestpath', 'compare-routerid']) self.cli_set(base_path + ['parameters', 'bestpath', 'peer-type', 'multipath-relax']) self.cli_set(base_path + ['parameters', 'conditional-advertisement', 'timer', cond_adv_timer]) self.cli_set(base_path + ['parameters', 'fast-convergence']) self.cli_set(base_path + ['parameters', 'minimum-holdtime', min_hold_time]) self.cli_set(base_path + ['parameters', 'no-suppress-duplicates']) self.cli_set(base_path + ['parameters', 'reject-as-sets']) self.cli_set(base_path + ['parameters', 'route-reflector-allow-outbound-policy']) self.cli_set(base_path + ['parameters', 'shutdown']) self.cli_set(base_path + ['parameters', 'suppress-fib-pending']) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'idle', tcp_keepalive_idle]) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'interval', tcp_keepalive_interval]) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'probes', tcp_keepalive_probes]) # AFI maximum path support self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'maximum-paths', 'ebgp', max_path_v4]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'maximum-paths', 'ibgp', max_path_v4ibgp]) self.cli_set(base_path + ['address-family', 'ipv4-labeled-unicast', 'maximum-paths', 'ebgp', max_path_v4]) self.cli_set(base_path + ['address-family', 'ipv4-labeled-unicast', 'maximum-paths', 'ibgp', max_path_v4ibgp]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'maximum-paths', 'ebgp', max_path_v6]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'maximum-paths', 'ibgp', max_path_v6ibgp]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' bgp router-id {router_id}', frrconfig) self.assertIn(f' bgp allow-martian-nexthop', frrconfig) self.assertIn(f' bgp disable-ebgp-connected-route-check', frrconfig) self.assertIn(f' bgp log-neighbor-changes', frrconfig) self.assertIn(f' bgp default local-preference {local_pref}', frrconfig) self.assertIn(f' bgp conditional-advertisement timer {cond_adv_timer}', frrconfig) self.assertIn(f' bgp fast-convergence', frrconfig) self.assertIn(f' bgp graceful-restart stalepath-time {stalepath_time}', frrconfig) self.assertIn(f' bgp graceful-shutdown', frrconfig) self.assertIn(f' no bgp hard-administrative-reset', frrconfig) self.assertIn(f' bgp labeled-unicast explicit-null', frrconfig) self.assertIn(f' bgp bestpath as-path multipath-relax', frrconfig) self.assertIn(f' bgp bestpath bandwidth default-weight-for-missing', frrconfig) self.assertIn(f' bgp bestpath compare-routerid', frrconfig) self.assertIn(f' bgp bestpath peer-type multipath-relax', frrconfig) self.assertIn(f' bgp minimum-holdtime {min_hold_time}', frrconfig) self.assertIn(f' bgp reject-as-sets', frrconfig) self.assertIn(f' bgp route-reflector allow-outbound-policy', frrconfig) self.assertIn(f' bgp shutdown', frrconfig) self.assertIn(f' bgp suppress-fib-pending', frrconfig) self.assertIn(f' bgp tcp-keepalive {tcp_keepalive_idle} {tcp_keepalive_interval} {tcp_keepalive_probes}', frrconfig) self.assertNotIn(f'bgp ebgp-requires-policy', frrconfig) self.assertIn(f' no bgp suppress-duplicates', frrconfig) - afiv4_config = self.getFRRconfig(' address-family ipv4 unicast') + afiv4_config = self.getFRRconfig(' address-family ipv4 unicast', + endsection='^ exit-address-family', daemon=bgp_daemon) self.assertIn(f' maximum-paths {max_path_v4}', afiv4_config) self.assertIn(f' maximum-paths ibgp {max_path_v4ibgp}', afiv4_config) - afiv4_config = self.getFRRconfig(' address-family ipv4 labeled-unicast') + afiv4_config = self.getFRRconfig(' address-family ipv4 labeled-unicast', + endsection='^ exit-address-family', daemon=bgp_daemon) self.assertIn(f' maximum-paths {max_path_v4}', afiv4_config) self.assertIn(f' maximum-paths ibgp {max_path_v4ibgp}', afiv4_config) - afiv6_config = self.getFRRconfig(' address-family ipv6 unicast') + afiv6_config = self.getFRRconfig(' address-family ipv6 unicast', + endsection='^ exit-address-family', daemon=bgp_daemon) self.assertIn(f' maximum-paths {max_path_v6}', afiv6_config) self.assertIn(f' maximum-paths ibgp {max_path_v6ibgp}', afiv6_config) def test_bgp_02_neighbors(self): # Test out individual neighbor configuration items, not all of them are # also available to a peer-group! self.cli_set(base_path + ['parameters', 'deterministic-med']) for peer, peer_config in neighbor_config.items(): afi = 'ipv4-unicast' if is_ipv6(peer): afi = 'ipv6-unicast' if 'adv_interv' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'advertisement-interval', peer_config["adv_interv"]]) if 'bfd' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'bfd']) if 'bfd_profile' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'bfd', 'profile', peer_config["bfd_profile"]]) self.cli_set(base_path + ['neighbor', peer, 'bfd', 'check-control-plane-failure']) if 'cap_dynamic' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'dynamic']) if 'cap_ext_next' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'extended-nexthop']) if 'cap_ext_sver' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'software-version']) if 'description' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'description', peer_config["description"]]) if 'no_cap_nego' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'disable-capability-negotiation']) if 'multi_hop' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'ebgp-multihop', peer_config["multi_hop"]]) if 'local_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-as', peer_config["local_as"], 'no-prepend', 'replace-as']) if 'local_role' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-role', peer_config["local_role"]]) if 'local_role_strict' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-role', peer_config["local_role"], 'strict']) if 'cap_over' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'override-capability']) if 'passive' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'passive']) if 'password' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'password', peer_config["password"]]) if 'port' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'port', peer_config["port"]]) if 'remote_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'remote-as', peer_config["remote_as"]]) if 'cap_strict' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'strict-capability-match']) if 'shutdown' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'shutdown']) if 'solo' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'solo']) if 'ttl_security' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'ttl-security', 'hops', peer_config["ttl_security"]]) if 'update_src' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'update-source', peer_config["update_src"]]) if 'p_attr_discard' in peer_config: for attribute in peer_config['p_attr_discard']: self.cli_set(base_path + ['neighbor', peer, 'path-attribute', 'discard', attribute]) if 'p_attr_taw' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'path-attribute', 'treat-as-withdraw', peer_config["p_attr_taw"]]) if 'route_map_in' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'route-map', 'import', peer_config["route_map_in"]]) if 'route_map_out' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'route-map', 'export', peer_config["route_map_out"]]) if 'pfx_list_in' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'prefix-list', 'import', peer_config["pfx_list_in"]]) if 'pfx_list_out' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'prefix-list', 'export', peer_config["pfx_list_out"]]) if 'no_send_comm_std' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'disable-send-community', 'standard']) if 'no_send_comm_ext' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'disable-send-community', 'extended']) if 'addpath_all' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'addpath-tx-all']) if 'addpath_per_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'addpath-tx-per-as']) if 'graceful_rst' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'enable']) if 'graceful_rst_no' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'disable']) if 'graceful_rst_hlp' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'restart-helper']) if 'disable_conn_chk' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'disable-connected-check']) # Conditional advertisement if 'advertise_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'advertise-map', peer_config["advertise_map"]]) # Either exist-map or non-exist-map needs to be specified if 'exist_map' not in peer_config and 'non_exist_map' not in peer_config: with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'exist-map', route_map_in]) if 'exist_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'exist-map', peer_config["exist_map"]]) if 'non_exist_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'non-exist-map', peer_config["non_exist_map"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) for peer, peer_config in neighbor_config.items(): if 'adv_interv' in peer_config: self.assertIn(f' neighbor {peer} advertisement-interval {peer_config["adv_interv"]}', frrconfig) if 'cap_strict' in peer_config: self.assertIn(f' neighbor {peer} strict-capability-match', frrconfig) self.verify_frr_config(peer, peer_config, frrconfig) def test_bgp_03_peer_groups(self): # Test out individual peer-group configuration items for peer_group, config in peer_group_config.items(): if 'bfd' in config: self.cli_set(base_path + ['peer-group', peer_group, 'bfd']) if 'bfd_profile' in config: self.cli_set(base_path + ['peer-group', peer_group, 'bfd', 'profile', config["bfd_profile"]]) self.cli_set(base_path + ['peer-group', peer_group, 'bfd', 'check-control-plane-failure']) if 'cap_dynamic' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'dynamic']) if 'cap_ext_next' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'extended-nexthop']) if 'cap_ext_sver' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'software-version']) if 'description' in config: self.cli_set(base_path + ['peer-group', peer_group, 'description', config["description"]]) if 'no_cap_nego' in config: self.cli_set(base_path + ['peer-group', peer_group, 'disable-capability-negotiation']) if 'multi_hop' in config: self.cli_set(base_path + ['peer-group', peer_group, 'ebgp-multihop', config["multi_hop"]]) if 'local_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-as', config["local_as"], 'no-prepend', 'replace-as']) if 'local_role' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-role', config["local_role"]]) if 'local_role_strict' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-role', config["local_role"], 'strict']) if 'cap_over' in config: self.cli_set(base_path + ['peer-group', peer_group, 'override-capability']) if 'passive' in config: self.cli_set(base_path + ['peer-group', peer_group, 'passive']) if 'password' in config: self.cli_set(base_path + ['peer-group', peer_group, 'password', config["password"]]) if 'port' in config: self.cli_set(base_path + ['peer-group', peer_group, 'port', config["port"]]) if 'remote_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', config["remote_as"]]) if 'shutdown' in config: self.cli_set(base_path + ['peer-group', peer_group, 'shutdown']) if 'ttl_security' in config: self.cli_set(base_path + ['peer-group', peer_group, 'ttl-security', 'hops', config["ttl_security"]]) if 'update_src' in config: self.cli_set(base_path + ['peer-group', peer_group, 'update-source', config["update_src"]]) if 'route_map_in' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'route-map', 'import', config["route_map_in"]]) if 'route_map_out' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'route-map', 'export', config["route_map_out"]]) if 'pfx_list_in' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'prefix-list', 'import', config["pfx_list_in"]]) if 'pfx_list_out' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'prefix-list', 'export', config["pfx_list_out"]]) if 'no_send_comm_std' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'disable-send-community', 'standard']) if 'no_send_comm_ext' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'disable-send-community', 'extended']) if 'addpath_all' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'addpath-tx-all']) if 'addpath_per_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'addpath-tx-per-as']) if 'graceful_rst' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'enable']) if 'graceful_rst_no' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'disable']) if 'graceful_rst_hlp' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'restart-helper']) if 'disable_conn_chk' in config: self.cli_set(base_path + ['peer-group', peer_group, 'disable-connected-check']) if 'p_attr_discard' in config: for attribute in config['p_attr_discard']: self.cli_set(base_path + ['peer-group', peer_group, 'path-attribute', 'discard', attribute]) if 'p_attr_taw' in config: self.cli_set(base_path + ['peer-group', peer_group, 'path-attribute', 'treat-as-withdraw', config["p_attr_taw"]]) # Conditional advertisement if 'advertise_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'advertise-map', config["advertise_map"]]) # Either exist-map or non-exist-map needs to be specified if 'exist_map' not in config and 'non_exist_map' not in config: with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'exist-map', route_map_in]) if 'exist_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'exist-map', config["exist_map"]]) if 'non_exist_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'non-exist-map', config["non_exist_map"]]) for peer, peer_config in neighbor_config.items(): if 'peer_group' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'peer-group', peer_config['peer_group']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) for peer, peer_config in peer_group_config.items(): self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.verify_frr_config(peer, peer_config, frrconfig) for peer, peer_config in neighbor_config.items(): if 'peer_group' in peer_config: self.assertIn(f' neighbor {peer} peer-group {peer_config["peer_group"]}', frrconfig) def test_bgp_04_afi_ipv4(self): networks = { '10.0.0.0/8' : { 'as_set' : '', 'summary_only' : '', 'route_map' : route_map_in, }, '100.64.0.0/10' : { 'as_set' : '', }, '192.168.0.0/16' : { 'summary_only' : '', }, } # We want to redistribute ... redistributes = ['connected', 'isis', 'kernel', 'ospf', 'rip', 'static'] for redistribute in redistributes: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'redistribute', redistribute]) for network, network_config in networks.items(): self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'network', network]) if 'as_set' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'as-set']) if 'summary_only' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'summary-only']) if 'route_map' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'route-map', network_config['route_map']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv4 unicast', frrconfig) for redistribute in redistributes: self.assertIn(f' redistribute {redistribute}', frrconfig) for network, network_config in networks.items(): self.assertIn(f' network {network}', frrconfig) command = f'aggregate-address {network}' if 'as_set' in network_config: command = f'{command} as-set' if 'summary_only' in network_config: command = f'{command} summary-only' if 'route_map' in network_config: command = f'{command} route-map {network_config["route_map"]}' self.assertIn(command, frrconfig) def test_bgp_05_afi_ipv6(self): networks = { '2001:db8:100::/48' : { }, '2001:db8:200::/48' : { }, '2001:db8:300::/48' : { 'summary_only' : '', }, } # We want to redistribute ... redistributes = ['connected', 'kernel', 'ospfv3', 'ripng', 'static'] for redistribute in redistributes: self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'redistribute', redistribute]) for network, network_config in networks.items(): self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'network', network]) if 'summary_only' in network_config: self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'aggregate-address', network, 'summary-only']) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) # T2100: By default ebgp-requires-policy is disabled to keep VyOS # 1.3 and 1.2 backwards compatibility self.assertIn(f' no bgp ebgp-requires-policy', frrconfig) for redistribute in redistributes: # FRR calls this OSPF6 if redistribute == 'ospfv3': redistribute = 'ospf6' self.assertIn(f' redistribute {redistribute}', frrconfig) for network, network_config in networks.items(): self.assertIn(f' network {network}', frrconfig) if 'as_set' in network_config: self.assertIn(f' aggregate-address {network} summary-only', frrconfig) def test_bgp_06_listen_range(self): # Implemented via T1875 limit = '64' listen_ranges = ['192.0.2.0/25', '192.0.2.128/25'] peer_group = 'listenfoobar' self.cli_set(base_path + ['listen', 'limit', limit]) for prefix in listen_ranges: self.cli_set(base_path + ['listen', 'range', prefix]) # check validate() - peer-group must be defined for range/prefix with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['listen', 'range', prefix, 'peer-group', peer_group]) # check validate() - peer-group does yet not exist! with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', ASN]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.assertIn(f' neighbor {peer_group} remote-as {ASN}', frrconfig) self.assertIn(f' bgp listen limit {limit}', frrconfig) for prefix in listen_ranges: self.assertIn(f' bgp listen range {prefix} peer-group {peer_group}', frrconfig) def test_bgp_07_l2vpn_evpn(self): vnis = ['10010', '10020', '10030'] soo = '1.2.3.4:10000' evi_limit = '1000' route_targets = ['1.1.1.1:100', '1.1.1.1:200', '1.1.1.1:300'] self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-all-vni']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-default-gw']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-svi-ip']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'flooding', 'disable']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'default-originate', 'ipv4']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'default-originate', 'ipv6']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'disable-ead-evi-rx']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'disable-ead-evi-tx']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'mac-vrf', 'soo', soo]) for vni in vnis: self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'vni', vni, 'advertise-default-gw']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'vni', vni, 'advertise-svi-ip']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'ead-es-frag', 'evi-limit', evi_limit]) for route_target in route_targets: self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'ead-es-route-target', 'export', route_target]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'router bgp {ASN}') + frrconfig = self.getFRRconfig(f'router bgp {ASN}', endsection='^exit', daemon=bgp_daemon) self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family l2vpn evpn', frrconfig) self.assertIn(f' advertise-all-vni', frrconfig) self.assertIn(f' advertise-default-gw', frrconfig) self.assertIn(f' advertise-svi-ip', frrconfig) self.assertIn(f' default-originate ipv4', frrconfig) self.assertIn(f' default-originate ipv6', frrconfig) self.assertIn(f' disable-ead-evi-rx', frrconfig) self.assertIn(f' disable-ead-evi-tx', frrconfig) self.assertIn(f' flooding disable', frrconfig) self.assertIn(f' mac-vrf soo {soo}', frrconfig) for vni in vnis: - vniconfig = self.getFRRconfig(f' vni {vni}') + vniconfig = self.getFRRconfig(f' vni {vni}', endsection='^exit-vni') self.assertIn(f'vni {vni}', vniconfig) self.assertIn(f' advertise-default-gw', vniconfig) self.assertIn(f' advertise-svi-ip', vniconfig) self.assertIn(f' ead-es-frag evi-limit {evi_limit}', frrconfig) for route_target in route_targets: self.assertIn(f' ead-es-route-target export {route_target}', frrconfig) def test_bgp_09_distance_and_flowspec(self): distance_external = '25' distance_internal = '30' distance_local = '35' distance_v4_prefix = '169.254.0.0/32' distance_v6_prefix = '2001::/128' distance_prefix_value = '110' distance_families = ['ipv4-unicast', 'ipv6-unicast','ipv4-multicast', 'ipv6-multicast'] verify_families = ['ipv4 unicast', 'ipv6 unicast','ipv4 multicast', 'ipv6 multicast'] flowspec_families = ['address-family ipv4 flowspec', 'address-family ipv6 flowspec'] flowspec_int = 'lo' # Per family distance support for family in distance_families: self.cli_set(base_path + ['address-family', family, 'distance', 'external', distance_external]) self.cli_set(base_path + ['address-family', family, 'distance', 'internal', distance_internal]) self.cli_set(base_path + ['address-family', family, 'distance', 'local', distance_local]) if 'ipv4' in family: self.cli_set(base_path + ['address-family', family, 'distance', 'prefix', distance_v4_prefix, 'distance', distance_prefix_value]) if 'ipv6' in family: self.cli_set(base_path + ['address-family', family, 'distance', 'prefix', distance_v6_prefix, 'distance', distance_prefix_value]) # IPv4 flowspec interface check self.cli_set(base_path + ['address-family', 'ipv4-flowspec', 'local-install', 'interface', flowspec_int]) # IPv6 flowspec interface check self.cli_set(base_path + ['address-family', 'ipv6-flowspec', 'local-install', 'interface', flowspec_int]) # Commit changes self.cli_commit() # Verify FRR distances configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for family in verify_families: self.assertIn(f'address-family {family}', frrconfig) self.assertIn(f'distance bgp {distance_external} {distance_internal} {distance_local}', frrconfig) if 'ipv4' in family: self.assertIn(f'distance {distance_prefix_value} {distance_v4_prefix}', frrconfig) if 'ipv6' in family: self.assertIn(f'distance {distance_prefix_value} {distance_v6_prefix}', frrconfig) # Verify FRR flowspec configuration for family in flowspec_families: self.assertIn(f'{family}', frrconfig) self.assertIn(f'local-install {flowspec_int}', frrconfig) def test_bgp_10_vrf_simple(self): router_id = '127.0.0.3' vrfs = ['red', 'green', 'blue'] # It is safe to assume that when the basic VRF test works, all # other BGP related features work, as we entirely inherit the CLI # templates and Jinja2 FRR template. table = '1000' # testing only one AFI is sufficient as it's generic code for vrf in vrfs: vrf_base = ['vrf', 'name', vrf] self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'bgp', 'system-as', ASN]) self.cli_set(vrf_base + ['protocols', 'bgp', 'parameters', 'router-id', router_id]) table = str(int(table) + 1000) # import VRF routes do main RIB self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'import', 'vrf', vrf]) self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) for vrf in vrfs: self.assertIn(f' import vrf {vrf}', frrconfig) # Verify FRR bgpd configuration frr_vrf_config = self.getFRRconfig(f'router bgp {ASN} vrf {vrf}') self.assertIn(f'router bgp {ASN} vrf {vrf}', frr_vrf_config) self.assertIn(f' bgp router-id {router_id}', frr_vrf_config) def test_bgp_11_confederation(self): router_id = '127.10.10.2' confed_id = str(int(ASN) + 1) confed_asns = '10 20 30 40' self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_set(base_path + ['parameters', 'confederation', 'identifier', confed_id]) for asn in confed_asns.split(): self.cli_set(base_path + ['parameters', 'confederation', 'peers', asn]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' bgp router-id {router_id}', frrconfig) self.assertIn(f' bgp confederation identifier {confed_id}', frrconfig) self.assertIn(f' bgp confederation peers {confed_asns}', frrconfig) def test_bgp_12_v6_link_local(self): remote_asn = str(int(ASN) + 10) interface = 'eth0' self.cli_set(base_path + ['neighbor', interface, 'address-family', 'ipv6-unicast']) self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as', remote_asn]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {interface} interface v6only remote-as {remote_asn}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) self.assertIn(f' neighbor {interface} activate', frrconfig) self.assertIn(f' exit-address-family', frrconfig) def test_bgp_13_vpn(self): remote_asn = str(int(ASN) + 150) neighbor = '192.0.2.55' vrf_name = 'red' label = 'auto' rd = f'{neighbor}:{ASN}' rt_export = f'{neighbor}:1002 1.2.3.4:567' rt_import = f'{neighbor}:1003 500:100' # testing only one AFI is sufficient as it's generic code for afi in ['ipv4-unicast', 'ipv6-unicast']: self.cli_set(base_path + ['address-family', afi, 'export', 'vpn']) self.cli_set(base_path + ['address-family', afi, 'import', 'vpn']) self.cli_set(base_path + ['address-family', afi, 'label', 'vpn', 'export', label]) self.cli_set(base_path + ['address-family', afi, 'label', 'vpn', 'allocation-mode', 'per-nexthop']) self.cli_set(base_path + ['address-family', afi, 'rd', 'vpn', 'export', rd]) self.cli_set(base_path + ['address-family', afi, 'route-map', 'vpn', 'export', route_map_out]) self.cli_set(base_path + ['address-family', afi, 'route-map', 'vpn', 'import', route_map_in]) self.cli_set(base_path + ['address-family', afi, 'route-target', 'vpn', 'export', rt_export]) self.cli_set(base_path + ['address-family', afi, 'route-target', 'vpn', 'import', rt_import]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for afi in ['ipv4', 'ipv6']: - afi_config = self.getFRRconfig(f' address-family {afi} unicast', endsection='exit-address-family', daemon='bgpd') + afi_config = self.getFRRconfig(f' address-family {afi} unicast', endsection='exit-address-family', daemon=bgp_daemon) self.assertIn(f'address-family {afi} unicast', afi_config) self.assertIn(f' export vpn', afi_config) self.assertIn(f' import vpn', afi_config) self.assertIn(f' label vpn export {label}', afi_config) self.assertIn(f' label vpn export allocation-mode per-nexthop', afi_config) self.assertIn(f' rd vpn export {rd}', afi_config) self.assertIn(f' route-map vpn export {route_map_out}', afi_config) self.assertIn(f' route-map vpn import {route_map_in}', afi_config) self.assertIn(f' rt vpn export {rt_export}', afi_config) self.assertIn(f' rt vpn import {rt_import}', afi_config) self.assertIn(f' exit-address-family', afi_config) def test_bgp_14_remote_as_peer_group_override(self): # Peer-group member cannot override remote-as of peer-group remote_asn = str(int(ASN) + 150) neighbor = '192.0.2.1' peer_group = 'bar' interface = 'eth0' self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', remote_asn]) self.cli_set(base_path + ['neighbor', neighbor, 'peer-group', peer_group]) self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', remote_asn]) # Peer-group member cannot override remote-as of peer-group with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', neighbor, 'remote-as']) # re-test with interface based peer-group self.cli_set(base_path + ['neighbor', interface, 'interface', 'peer-group', peer_group]) self.cli_set(base_path + ['neighbor', interface, 'interface', 'remote-as', 'external']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', interface, 'interface', 'remote-as']) # re-test with interface based v6only peer-group self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'peer-group', peer_group]) self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as', 'external']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {neighbor} peer-group {peer_group}', frrconfig) self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.assertIn(f' neighbor {peer_group} remote-as {remote_asn}', frrconfig) def test_bgp_15_local_as_ebgp(self): # https://vyos.dev/T4560 # local-as allowed only for ebgp peers neighbor = '192.0.2.99' remote_asn = '500' local_asn = '400' self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', ASN]) self.cli_set(base_path + ['neighbor', neighbor, 'local-as', local_asn]) # check validate() - local-as allowed only for ebgp peers with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', remote_asn]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {neighbor} remote-as {remote_asn}', frrconfig) self.assertIn(f' neighbor {neighbor} local-as {local_asn}', frrconfig) def test_bgp_16_import_rd_rt_compatibility(self): # Verify if import vrf and rd vpn export # exist in the same address family self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_set( base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_17_import_rd_rt_compatibility(self): # Verify if vrf that is in import vrf list contains rd vpn export self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'address-family ipv4 unicast', frrconfig) self.assertIn(f' import vrf {import_vrf}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_set( import_vrf_base + [import_vrf] + base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_18_deleting_import_vrf(self): # Verify deleting vrf that is in import vrf list self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'address-family ipv4 unicast', frrconfig) self.assertIn(f' import vrf {import_vrf}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_delete(import_vrf_base + [import_vrf]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_19_deleting_default_vrf(self): # Verify deleting existent vrf default if other vrfs were created self.create_bgp_instances_for_import_test() self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_delete(base_path) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_20_import_rd_rt_compatibility(self): # Verify if vrf that has rd vpn export is in import vrf of other vrfs self.create_bgp_instances_for_import_test() self.cli_set( import_vrf_base + [import_vrf] + base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.assertIn(f'address-family ipv4 unicast', frrconfig_vrf) self.assertIn(f' rd vpn export {import_rd}', frrconfig_vrf) self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_21_import_unspecified_vrf(self): # Verify if vrf that is in import is unspecified self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', 'test']) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_22_interface_mpls_forwarding(self): interfaces = Section.interfaces('ethernet', vlan=False) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mpls', 'forwarding']) self.cli_commit() for interface in interfaces: frrconfig = self.getFRRconfig(f'interface {interface}') self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' mpls bgp forwarding', frrconfig) def test_bgp_23_vrf_interface_mpls_forwarding(self): self.create_bgp_instances_for_import_test() interfaces = Section.interfaces('ethernet', vlan=False) for interface in interfaces: self.cli_set(['interfaces', 'ethernet', interface, 'vrf', import_vrf]) self.cli_set(import_vrf_base + [import_vrf] + base_path + ['interface', interface, 'mpls', 'forwarding']) self.cli_commit() for interface in interfaces: frrconfig = self.getFRRconfig(f'interface {interface}') self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' mpls bgp forwarding', frrconfig) self.cli_delete(['interfaces', 'ethernet', interface, 'vrf']) def test_bgp_24_srv6_sid(self): locator_name = 'VyOS_foo' sid = 'auto' nexthop_ipv4 = '192.0.0.1' nexthop_ipv6 = '2001:db8:100:200::2' self.cli_set(base_path + ['srv6', 'locator', locator_name]) self.cli_set(base_path + ['sid', 'vpn', 'per-vrf', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'sid', 'vpn', 'export', sid]) # verify() - SID per VRF and SID per address-family are mutually exclusive! with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['address-family', 'ipv4-unicast', 'sid']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' segment-routing srv6', frrconfig) self.assertIn(f' locator {locator_name}', frrconfig) self.assertIn(f' sid vpn per-vrf export {sid}', frrconfig) # Now test AFI SID self.cli_delete(base_path + ['sid']) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'sid', 'vpn', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'nexthop', 'vpn', 'export', nexthop_ipv4]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'sid', 'vpn', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'nexthop', 'vpn', 'export', nexthop_ipv6]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' segment-routing srv6', frrconfig) self.assertIn(f' locator {locator_name}', frrconfig) afiv4_config = self.getFRRconfig(' address-family ipv4 unicast') self.assertIn(f' sid vpn export {sid}', afiv4_config) self.assertIn(f' nexthop vpn export {nexthop_ipv4}', afiv4_config) afiv6_config = self.getFRRconfig(' address-family ipv6 unicast') self.assertIn(f' sid vpn export {sid}', afiv6_config) self.assertIn(f' nexthop vpn export {nexthop_ipv6}', afiv4_config) def test_bgp_25_ipv4_labeled_unicast_peer_group(self): pg_ipv4 = 'foo4' ipv4_max_prefix = '20' ipv4_prefix = '192.0.2.0/24' self.cli_set(base_path + ['listen', 'range', ipv4_prefix, 'peer-group', pg_ipv4]) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'ipv4-explicit-null']) self.cli_set(base_path + ['peer-group', pg_ipv4, 'address-family', 'ipv4-labeled-unicast', 'maximum-prefix', ipv4_max_prefix]) self.cli_set(base_path + ['peer-group', pg_ipv4, 'remote-as', 'external']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {pg_ipv4} peer-group', frrconfig) self.assertIn(f' neighbor {pg_ipv4} remote-as external', frrconfig) self.assertIn(f' bgp listen range {ipv4_prefix} peer-group {pg_ipv4}', frrconfig) self.assertIn(f' bgp labeled-unicast ipv4-explicit-null', frrconfig) afiv4_config = self.getFRRconfig(' address-family ipv4 labeled-unicast') self.assertIn(f' neighbor {pg_ipv4} activate', afiv4_config) self.assertIn(f' neighbor {pg_ipv4} maximum-prefix {ipv4_max_prefix}', afiv4_config) def test_bgp_26_ipv6_labeled_unicast_peer_group(self): pg_ipv6 = 'foo6' ipv6_max_prefix = '200' ipv6_prefix = '2001:db8:1000::/64' self.cli_set(base_path + ['listen', 'range', ipv6_prefix, 'peer-group', pg_ipv6]) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'ipv6-explicit-null']) self.cli_set(base_path + ['peer-group', pg_ipv6, 'address-family', 'ipv6-labeled-unicast', 'maximum-prefix', ipv6_max_prefix]) self.cli_set(base_path + ['peer-group', pg_ipv6, 'remote-as', 'external']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {pg_ipv6} peer-group', frrconfig) self.assertIn(f' neighbor {pg_ipv6} remote-as external', frrconfig) self.assertIn(f' bgp listen range {ipv6_prefix} peer-group {pg_ipv6}', frrconfig) self.assertIn(f' bgp labeled-unicast ipv6-explicit-null', frrconfig) afiv6_config = self.getFRRconfig(' address-family ipv6 labeled-unicast') self.assertIn(f' neighbor {pg_ipv6} activate', afiv6_config) self.assertIn(f' neighbor {pg_ipv6} maximum-prefix {ipv6_max_prefix}', afiv6_config) def test_bgp_27_route_reflector_client(self): self.cli_set(base_path + ['peer-group', 'peer1', 'address-family', 'l2vpn-evpn', 'route-reflector-client']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() self.cli_set(base_path + ['peer-group', 'peer1', 'remote-as', 'internal']) self.cli_commit() conf = self.getFRRconfig(' address-family l2vpn evpn') self.assertIn('neighbor peer1 route-reflector-client', conf) def test_bgp_28_peer_group_member_all_internal_or_external(self): def _common_config_check(conf, include_ras=True): if include_ras: self.assertIn(f'neighbor {int_neighbors[0]} remote-as {ASN}', conf) self.assertIn(f'neighbor {int_neighbors[1]} remote-as {ASN}', conf) self.assertIn(f'neighbor {ext_neighbors[0]} remote-as {int(ASN) + 1}',conf) self.assertIn(f'neighbor {int_neighbors[0]} peer-group {int_pg_name}', conf) self.assertIn(f'neighbor {int_neighbors[1]} peer-group {int_pg_name}', conf) self.assertIn(f'neighbor {ext_neighbors[0]} peer-group {ext_pg_name}', conf) int_neighbors = ['192.0.2.2', '192.0.2.3'] ext_neighbors = ['192.122.2.2', '192.122.2.3'] int_pg_name, ext_pg_name = 'SMOKETESTINT', 'SMOKETESTEXT' self.cli_set(base_path + ['neighbor', int_neighbors[0], 'peer-group', int_pg_name]) self.cli_set(base_path + ['neighbor', int_neighbors[0], 'remote-as', ASN]) self.cli_set(base_path + ['peer-group', int_pg_name, 'address-family', 'ipv4-unicast']) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'peer-group', ext_pg_name]) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'remote-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', ext_pg_name, 'address-family', 'ipv4-unicast']) self.cli_commit() # test add external remote-as to internal group self.cli_set(base_path + ['neighbor', int_neighbors[1], 'peer-group', int_pg_name]) self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', f'{int(ASN) + 1}']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nPeer-group members must be all internal or all external\n', str(e.exception)) # test add internal remote-as to internal group self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', ASN]) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf) # test add internal remote-as to external group self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'peer-group', ext_pg_name]) self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', ASN]) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nPeer-group members must be all internal or all external\n', str(e.exception)) # test add external remote-as to external group self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', f'{int(ASN) + 2}']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf) self.assertIn(f'neighbor {ext_neighbors[1]} remote-as {int(ASN) + 2}', conf) self.assertIn(f'neighbor {ext_neighbors[1]} peer-group {ext_pg_name}', conf) # test named remote-as self.cli_set(base_path + ['neighbor', int_neighbors[0], 'remote-as', 'internal']) self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', 'internal']) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'remote-as', 'external']) self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', 'external']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf, include_ras=False) self.assertIn(f'neighbor {int_neighbors[0]} remote-as internal', conf) self.assertIn(f'neighbor {int_neighbors[1]} remote-as internal', conf) self.assertIn(f'neighbor {ext_neighbors[0]} remote-as external', conf) self.assertIn(f'neighbor {ext_neighbors[1]} remote-as external', conf) self.assertIn(f'neighbor {ext_neighbors[1]} peer-group {ext_pg_name}', conf) def test_bgp_29_peer_group_remote_as_equal_local_as(self): self.cli_set(base_path + ['system-as', ASN]) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'local-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'remote-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'address-family', 'l2vpn-evpn']) self.cli_set(base_path + ['peer-group', 'UNDERLAY', 'address-family', 'ipv4-unicast']) self.cli_set(base_path + ['neighbor', '10.177.70.62', 'peer-group', 'UNDERLAY']) self.cli_set(base_path + ['neighbor', '10.177.70.62', 'remote-as', 'external']) self.cli_set(base_path + ['neighbor', '10.177.75.1', 'peer-group', 'OVERLAY']) self.cli_set(base_path + ['neighbor', '10.177.75.2', 'peer-group', 'OVERLAY']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'neighbor OVERLAY remote-as {int(ASN) + 1}', conf) self.assertIn(f'neighbor OVERLAY local-as {int(ASN) + 1}', conf) def test_bgp_99_bmp(self): target_name = 'instance-bmp' target_address = '127.0.0.1' target_port = '5000' min_retry = '1024' max_retry = '2048' monitor_ipv4 = 'pre-policy' monitor_ipv6 = 'pre-policy' mirror_buffer = '32000000' bmp_path = base_path + ['bmp'] target_path = bmp_path + ['target', target_name] # by default the 'bmp' module not loaded for the bgpd expect Error self.cli_set(bmp_path) if not process_named_running('bgpd', 'bmp'): with self.assertRaises(ConfigSessionError): self.cli_commit() # add required 'bmp' module to bgpd and restart bgpd self.cli_delete(bmp_path) self.cli_set(['system', 'frr', 'bmp']) self.cli_commit() # restart bgpd to apply "-M bmp" and update PID cmd(f'sudo kill -9 {self.daemon_pid}') # let the bgpd process recover sleep(10) # update daemon PID - this was a planned daemon restart - self.daemon_pid = process_named_running(PROCESS_NAME) + self.daemon_pid = process_named_running(bgp_daemon) # set bmp config but not set address self.cli_set(target_path + ['port', target_port]) # address is not set, expect Error with self.assertRaises(ConfigSessionError): self.cli_commit() # config other bmp options self.cli_set(target_path + ['address', target_address]) self.cli_set(bmp_path + ['mirror-buffer-limit', mirror_buffer]) self.cli_set(target_path + ['port', target_port]) self.cli_set(target_path + ['min-retry', min_retry]) self.cli_set(target_path + ['max-retry', max_retry]) self.cli_set(target_path + ['mirror']) self.cli_set(target_path + ['monitor', 'ipv4-unicast', monitor_ipv4]) self.cli_set(target_path + ['monitor', 'ipv6-unicast', monitor_ipv6]) self.cli_commit() # Verify bgpd bmp configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'bmp mirror buffer-limit {mirror_buffer}', frrconfig) self.assertIn(f'bmp targets {target_name}', frrconfig) self.assertIn(f'bmp mirror', frrconfig) self.assertIn(f'bmp monitor ipv4 unicast {monitor_ipv4}', frrconfig) self.assertIn(f'bmp monitor ipv6 unicast {monitor_ipv6}', frrconfig) self.assertIn(f'bmp connect {target_address} port {target_port} min-retry {min_retry} max-retry {max_retry}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_isis.py b/smoketest/scripts/cli/test_protocols_isis.py index 769f3dd33..bde32a1ca 100755 --- a/smoketest/scripts/cli/test_protocols_isis.py +++ b/smoketest/scripts/cli/test_protocols_isis.py @@ -1,416 +1,416 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section from vyos.utils.process import process_named_running +from vyos.frrender import isis_daemon -PROCESS_NAME = 'isisd' base_path = ['protocols', 'isis'] domain = 'VyOS' net = '49.0001.1921.6800.1002.00' class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): cls._interfaces = Section.interfaces('ethernet') # call base-classes classmethod super(TestProtocolsISIS, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(isis_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_delete(cls, ['vrf']) def tearDown(self): # cleanup any possible VRF mess self.cli_delete(['vrf']) # always destrox the entire isisd configuration to make the processes # life as hard as possible self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(isis_daemon)) def isis_base_config(self): self.cli_set(base_path + ['net', net]) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface]) def test_isis_01_redistribute(self): prefix_list = 'EXPORT-ISIS' route_map = 'EXPORT-ISIS' rule = '10' metric_style = 'transition' self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'prefix', '203.0.113.0/24']) self.cli_set(['policy', 'route-map', route_map, 'rule', rule, 'action', 'permit']) self.cli_set(['policy', 'route-map', route_map, 'rule', rule, 'match', 'ip', 'address', 'prefix-list', prefix_list]) self.cli_set(base_path) # verify() - net id and interface are mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.isis_base_config() self.cli_set(base_path + ['redistribute', 'ipv4', 'connected']) # verify() - Redistribute level-1 or level-2 should be specified with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['redistribute', 'ipv4', 'connected', 'level-2', 'route-map', route_map]) self.cli_set(base_path + ['metric-style', metric_style]) self.cli_set(base_path + ['log-adjacency-changes']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' metric-style {metric_style}', tmp) self.assertIn(f' log-adjacency-changes', tmp) self.assertIn(f' redistribute ipv4 connected level-2 route-map {route_map}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.cli_delete(['policy', 'route-map', route_map]) self.cli_delete(['policy', 'prefix-list', prefix_list]) def test_isis_02_vrfs(self): vrfs = ['red', 'green', 'blue'] # It is safe to assume that when the basic VRF test works, all other # IS-IS related features work, as we entirely inherit the CLI templates # and Jinja2 FRR template. table = '1000' vrf = 'red' vrf_base = ['vrf', 'name', vrf] vrf_iface = 'eth1' self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'isis', 'net', net]) self.cli_set(vrf_base + ['protocols', 'isis', 'interface', vrf_iface]) self.cli_set(vrf_base + ['protocols', 'isis', 'advertise-high-metrics']) self.cli_set(vrf_base + ['protocols', 'isis', 'advertise-passive-only']) self.cli_set(['interfaces', 'ethernet', vrf_iface, 'vrf', vrf]) # Also set a default VRF IS-IS config self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', 'eth0']) self.cli_commit() # Verify FRR isisd configuration - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f'router isis {domain}', tmp) self.assertIn(f' net {net}', tmp) - tmp = self.getFRRconfig(f'router isis {domain} vrf {vrf}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain} vrf {vrf}', daemon=isis_daemon) self.assertIn(f'router isis {domain} vrf {vrf}', tmp) self.assertIn(f' net {net}', tmp) self.assertIn(f' advertise-high-metrics', tmp) self.assertIn(f' advertise-passive-only', tmp) self.cli_delete(['vrf', 'name', vrf]) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) def test_isis_04_default_information(self): metric = '50' route_map = 'default-foo-' self.isis_base_config() for afi in ['ipv4', 'ipv6']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['default-information', 'originate', afi, level, 'always']) self.cli_set(base_path + ['default-information', 'originate', afi, level, 'metric', metric]) self.cli_set(base_path + ['default-information', 'originate', afi, level, 'route-map', route_map + level + afi]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) for afi in ['ipv4', 'ipv6']: for level in ['level-1', 'level-2']: route_map_name = route_map + level + afi self.assertIn(f' default-information originate {afi} {level} always route-map {route_map_name} metric {metric}', tmp) def test_isis_05_password(self): password = 'foo' self.isis_base_config() for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'password', 'plaintext-password', f'{password}-{interface}']) self.cli_set(base_path + ['area-password', 'plaintext-password', password]) self.cli_set(base_path + ['area-password', 'md5', password]) self.cli_set(base_path + ['domain-password', 'plaintext-password', password]) self.cli_set(base_path + ['domain-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for area-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['area-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for domain-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['domain-password', 'md5', password]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' domain-password clear {password}', tmp) self.assertIn(f' area-password clear {password}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' isis password clear {password}-{interface}', tmp) def test_isis_06_spf_delay_bfd(self): network = 'point-to-point' holddown = '10' init_delay = '50' long_delay = '200' short_delay = '100' time_to_learn = '75' bfd_profile = 'isis-bfd' self.cli_set(base_path + ['net', net]) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'network', network]) self.cli_set(base_path + ['interface', interface, 'bfd', 'profile', bfd_profile]) self.cli_set(base_path + ['spf-delay-ietf', 'holddown', holddown]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'init-delay', init_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'long-delay', long_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'short-delay', short_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'time-to-learn', time_to_learn]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' spf-delay-ietf init-delay {init_delay} short-delay {short_delay} long-delay {long_delay} holddown {holddown} time-to-learn {time_to_learn}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' isis network {network}', tmp) self.assertIn(f' isis bfd', tmp) self.assertIn(f' isis bfd profile {bfd_profile}', tmp) def test_isis_07_segment_routing_configuration(self): global_block_low = "300" global_block_high = "399" local_block_low = "400" local_block_high = "499" interface = 'lo' maximum_stack_size = '5' prefix_one = '192.168.0.1/32' prefix_two = '192.168.0.2/32' prefix_three = '192.168.0.3/32' prefix_four = '192.168.0.4/32' prefix_one_value = '1' prefix_two_value = '2' prefix_three_value = '60000' prefix_four_value = '65000' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['segment-routing', 'maximum-label-depth', maximum_stack_size]) self.cli_set(base_path + ['segment-routing', 'global-block', 'low-label-value', global_block_low]) self.cli_set(base_path + ['segment-routing', 'global-block', 'high-label-value', global_block_high]) self.cli_set(base_path + ['segment-routing', 'local-block', 'low-label-value', local_block_low]) self.cli_set(base_path + ['segment-routing', 'local-block', 'high-label-value', local_block_high]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'value', prefix_one_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'explicit-null']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'value', prefix_two_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'no-php-flag']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_three, 'absolute', 'value', prefix_three_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_three, 'absolute', 'explicit-null']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_four, 'absolute', 'value', prefix_four_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_four, 'absolute', 'no-php-flag']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' segment-routing on', tmp) self.assertIn(f' segment-routing global-block {global_block_low} {global_block_high} local-block {local_block_low} {local_block_high}', tmp) self.assertIn(f' segment-routing node-msd {maximum_stack_size}', tmp) self.assertIn(f' segment-routing prefix {prefix_one} index {prefix_one_value} explicit-null', tmp) self.assertIn(f' segment-routing prefix {prefix_two} index {prefix_two_value} no-php-flag', tmp) self.assertIn(f' segment-routing prefix {prefix_three} absolute {prefix_three_value} explicit-null', tmp) self.assertIn(f' segment-routing prefix {prefix_four} absolute {prefix_four_value} no-php-flag', tmp) def test_isis_08_ldp_sync(self): holddown = "500" interface = 'lo' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['ldp-sync', 'holddown', holddown]) # Commit main ISIS changes self.cli_commit() # Verify main ISIS changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' mpls ldp-sync', tmp) self.assertIn(f' mpls ldp-sync holddown {holddown}', tmp) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'holddown', holddown]) # Commit interface changes for holddown self.cli_commit() for interface in self._interfaces: # Verify interface changes for holddown - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f'interface {interface}', tmp) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' isis mpls ldp-sync holddown {holddown}', tmp) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'disable']) # Commit interface changes for disable self.cli_commit() for interface in self._interfaces: # Verify interface changes for disable - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f'interface {interface}', tmp) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' no isis mpls ldp-sync', tmp) def test_isis_09_lfa(self): prefix_list = 'lfa-prefix-list-test-1' prefix_list_address = '192.168.255.255/32' interface = 'lo' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '1', 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '1', 'prefix', prefix_list_address]) # Commit main ISIS changes self.cli_commit() # Add remote portion of LFA with prefix list with validation for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'remote', 'prefix-list', prefix_list, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute remote-lfa prefix-list {prefix_list} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA load-sharing portion with validation for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'load-sharing', 'disable', level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute load-sharing disable {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA priority-limit portion with validation for priority in ['critical', 'high', 'medium']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'priority-limit', priority, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute priority-limit {priority} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA tiebreaker portion with validation index = '100' for tiebreaker in ['downstream','lowest-backup-metric','node-protecting']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'tiebreaker', tiebreaker, 'index', index, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute lfa tiebreaker {tiebreaker} index {index} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Clean up and remove prefix list self.cli_delete(['policy', 'prefix-list', prefix_list]) self.cli_commit() def test_isis_10_topology(self): topologies = ['ipv4-multicast', 'ipv4-mgmt', 'ipv6-unicast', 'ipv6-multicast', 'ipv6-mgmt'] interface = 'lo' # Set a basic IS-IS config self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) for topology in topologies: self.cli_set(base_path + ['topology', topology]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' topology {topology}', tmp) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_mpls.py b/smoketest/scripts/cli/test_protocols_mpls.py index 0c1599f9b..88528973d 100755 --- a/smoketest/scripts/cli/test_protocols_mpls.py +++ b/smoketest/scripts/cli/test_protocols_mpls.py @@ -1,120 +1,120 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import ldpd_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'ldpd' base_path = ['protocols', 'mpls', 'ldp'] peers = { '192.0.2.10' : { 'intv_rx' : '500', 'intv_tx' : '600', 'multihop' : '', 'source_addr': '192.0.2.254', }, '192.0.2.20' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '100', 'intv_rx' : '222', 'intv_tx' : '333', 'passive' : '', 'shutdown' : '', }, '2001:db8::a' : { 'source_addr': '2001:db8::1', }, '2001:db8::b' : { 'source_addr': '2001:db8::1', 'multihop' : '', }, } profiles = { 'foo' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '101', 'intv_rx' : '222', 'intv_tx' : '333', 'shutdown' : '', }, 'bar' : { 'intv_mult' : '102', 'intv_rx' : '444', 'passive' : '', }, } class TestProtocolsMPLS(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsMPLS, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(ldpd_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(ldpd_daemon)) def test_mpls_basic(self): router_id = '1.2.3.4' transport_ipv4_addr = '5.6.7.8' interfaces = Section.interfaces('ethernet') self.cli_set(base_path + ['router-id', router_id]) # At least one LDP interface must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() for interface in interfaces: self.cli_set(base_path + ['interface', interface]) # LDP transport address missing with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['discovery', 'transport-ipv4-address', transport_ipv4_addr]) # Commit changes self.cli_commit() # Validate configuration - frrconfig = self.getFRRconfig('mpls ldp', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('mpls ldp', daemon=ldpd_daemon) self.assertIn(f'mpls ldp', frrconfig) self.assertIn(f' router-id {router_id}', frrconfig) # Validate AFI IPv4 - afiv4_config = self.getFRRconfig(' address-family ipv4', daemon=PROCESS_NAME) + afiv4_config = self.getFRRconfig(' address-family ipv4', daemon=ldpd_daemon) self.assertIn(f' discovery transport-address {transport_ipv4_addr}', afiv4_config) for interface in interfaces: self.assertIn(f' interface {interface}', afiv4_config) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_openfabric.py b/smoketest/scripts/cli/test_protocols_openfabric.py index e37aed456..f1372d7ab 100644 --- a/smoketest/scripts/cli/test_protocols_openfabric.py +++ b/smoketest/scripts/cli/test_protocols_openfabric.py @@ -1,186 +1,186 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.process import process_named_running +from vyos.frrender import openfabric_daemon -PROCESS_NAME = 'fabricd' base_path = ['protocols', 'openfabric'] domain = 'VyOS' net = '49.0001.1111.1111.1111.00' dummy_if = 'dum1234' address_families = ['ipv4', 'ipv6'] path = base_path + ['domain', domain] class TestProtocolsOpenFabric(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsOpenFabric, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(openfabric_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(openfabric_daemon)) def openfabric_base_config(self): self.cli_set(['interfaces', 'dummy', dummy_if]) self.cli_set(base_path + ['net', net]) for family in address_families: self.cli_set(path + ['interface', dummy_if, 'address-family', family]) def test_openfabric_01_router_params(self): fabric_tier = '5' lsp_gen_interval = '20' self.cli_set(base_path) # verify() - net id and domain name are mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.openfabric_base_config() self.cli_set(path + ['log-adjacency-changes']) self.cli_set(path + ['set-overload-bit']) self.cli_set(path + ['fabric-tier', fabric_tier]) self.cli_set(path + ['lsp-gen-interval', lsp_gen_interval]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' log-adjacency-changes', tmp) self.assertIn(f' set-overload-bit', tmp) self.assertIn(f' fabric-tier {fabric_tier}', tmp) self.assertIn(f' lsp-gen-interval {lsp_gen_interval}', tmp) - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) self.assertIn(f' ipv6 router openfabric {domain}', tmp) def test_openfabric_02_loopback_interface(self): interface = 'lo' hello_interval = '100' metric = '24478' self.openfabric_base_config() self.cli_set(path + ['interface', interface, 'address-family', 'ipv4']) self.cli_set(path + ['interface', interface, 'hello-interval', hello_interval]) self.cli_set(path + ['interface', interface, 'metric', metric]) # Commit all changes self.cli_commit() # Verify FRR openfabric configuration - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain}', tmp) self.assertIn(f' net {net}', tmp) # Verify interface configuration - tmp = self.getFRRconfig(f'interface {interface}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) # for lo interface 'openfabric passive' is implied self.assertIn(f' openfabric passive', tmp) self.assertIn(f' openfabric metric {metric}', tmp) def test_openfabric_03_password(self): password = 'foo' self.openfabric_base_config() self.cli_set(path + ['interface', dummy_if, 'password', 'plaintext-password', f'{password}-{dummy_if}']) self.cli_set(path + ['interface', dummy_if, 'password', 'md5', f'{password}-{dummy_if}']) # verify() - can not use both md5 and plaintext-password for password for the interface with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['interface', dummy_if, 'password', 'md5']) self.cli_set(path + ['domain-password', 'plaintext-password', password]) self.cli_set(path + ['domain-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for domain-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['domain-password', 'md5']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' domain-password clear {password}', tmp) - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' openfabric password clear {password}-{dummy_if}', tmp) def test_openfabric_multiple_domains(self): domain_2 = 'VyOS_2' interface = 'dum5678' new_path = base_path + ['domain', domain_2] self.openfabric_base_config() # set same interface for 2 OpenFabric domains self.cli_set(['interfaces', 'dummy', interface]) self.cli_set(new_path + ['interface', interface, 'address-family', 'ipv4']) self.cli_set(path + ['interface', interface, 'address-family', 'ipv4']) # verify() - same interface can be used only for one OpenFabric instance with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['interface', interface]) # Commit all changes self.cli_commit() # Verify FRR openfabric configuration - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain}', tmp) self.assertIn(f' net {net}', tmp) - tmp = self.getFRRconfig(f'router openfabric {domain_2}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain_2}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain_2}', tmp) self.assertIn(f' net {net}', tmp) # Verify interface configuration - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) self.assertIn(f' ipv6 router openfabric {domain}', tmp) - tmp = self.getFRRconfig(f'interface {interface}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain_2}', tmp) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_ospf.py b/smoketest/scripts/cli/test_protocols_ospf.py index c3ae54e12..599bf3930 100755 --- a/smoketest/scripts/cli/test_protocols_ospf.py +++ b/smoketest/scripts/cli/test_protocols_ospf.py @@ -1,582 +1,578 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest -import time +from time import sleep from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import ospf_daemon from vyos.utils.process import process_named_running +from vyos.xml_ref import default_value -PROCESS_NAME = 'ospfd' base_path = ['protocols', 'ospf'] route_map = 'foo-bar-baz10' dummy_if = 'dum3562' class TestProtocolsOSPF(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsOSPF, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(ospf_daemon) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '20', 'action', 'permit']) cls.cli_set(cls, ['interfaces', 'dummy', dummy_if]) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['policy', 'route-map', route_map]) cls.cli_delete(cls, ['interfaces', 'dummy', dummy_if]) super(TestProtocolsOSPF, cls).tearDownClass() def tearDown(self): self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) + self.assertNotIn(f'router ospf', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(ospf_daemon)) def test_ospf_01_defaults(self): # commit changes self.cli_set(base_path) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' auto-cost reference-bandwidth 100', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults def test_ospf_02_simple(self): router_id = '127.0.0.1' abr_type = 'ibm' bandwidth = '1000' metric = '123' self.cli_set(base_path + ['auto-cost', 'reference-bandwidth', bandwidth]) self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_set(base_path + ['parameters', 'abr-type', abr_type]) self.cli_set(base_path + ['parameters', 'opaque-lsa']) self.cli_set(base_path + ['parameters', 'rfc1583-compatibility']) self.cli_set(base_path + ['log-adjacency-changes', 'detail']) self.cli_set(base_path + ['default-metric', metric]) self.cli_set(base_path + ['passive-interface', 'default']) self.cli_set(base_path + ['area', '10', 'area-type', 'stub']) self.cli_set(base_path + ['area', '10', 'network', '10.0.0.0/16']) self.cli_set(base_path + ['area', '10', 'range', '10.0.1.0/24']) self.cli_set(base_path + ['area', '10', 'range', '10.0.2.0/24', 'not-advertise']) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' compatible rfc1583', frrconfig) self.assertIn(f' auto-cost reference-bandwidth {bandwidth}', frrconfig) self.assertIn(f' ospf router-id {router_id}', frrconfig) self.assertIn(f' ospf abr-type {abr_type}', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults self.assertIn(f' capability opaque', frrconfig) self.assertIn(f' default-metric {metric}', frrconfig) self.assertIn(f' passive-interface default', frrconfig) self.assertIn(f' area 10 stub', frrconfig) self.assertIn(f' network 10.0.0.0/16 area 10', frrconfig) self.assertIn(f' area 10 range 10.0.1.0/24', frrconfig) self.assertNotIn(f' area 10 range 10.0.1.0/24 not-advertise', frrconfig) self.assertIn(f' area 10 range 10.0.2.0/24 not-advertise', frrconfig) def test_ospf_03_access_list(self): acl = '100' seq = '10' protocols = ['bgp', 'connected', 'isis', 'kernel', 'rip', 'static'] self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'action', 'permit']) self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'source', 'any']) self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'destination', 'any']) for ptotocol in protocols: self.cli_set(base_path + ['access-list', acl, 'export', ptotocol]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults for ptotocol in protocols: self.assertIn(f' distribute-list {acl} out {ptotocol}', frrconfig) # defaults self.cli_delete(['policy', 'access-list', acl]) def test_ospf_04_default_originate(self): seq = '100' metric = '50' metric_type = '1' self.cli_set(base_path + ['default-information', 'originate', 'metric', metric]) self.cli_set(base_path + ['default-information', 'originate', 'metric-type', metric_type]) self.cli_set(base_path + ['default-information', 'originate', 'route-map', route_map]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults self.assertIn(f' default-information originate metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) # Now set 'always' self.cli_set(base_path + ['default-information', 'originate', 'always']) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f' default-information originate always metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) def test_ospf_05_options(self): global_distance = '128' intra_area = '100' inter_area = '110' external = '120' on_startup = '30' on_shutdown = '60' refresh = '50' aggregation_timer = '100' summary_nets = { '10.0.1.0/24' : {}, '10.0.2.0/24' : {'tag' : '50'}, '10.0.3.0/24' : {'no_advertise' : {}}, } self.cli_set(base_path + ['distance', 'global', global_distance]) self.cli_set(base_path + ['distance', 'ospf', 'external', external]) self.cli_set(base_path + ['distance', 'ospf', 'intra-area', intra_area]) self.cli_set(base_path + ['max-metric', 'router-lsa', 'on-startup', on_startup]) self.cli_set(base_path + ['max-metric', 'router-lsa', 'on-shutdown', on_shutdown]) self.cli_set(base_path + ['mpls-te', 'enable']) self.cli_set(base_path + ['refresh', 'timers', refresh]) self.cli_set(base_path + ['aggregation', 'timer', aggregation_timer]) for summary, summary_options in summary_nets.items(): self.cli_set(base_path + ['summary-address', summary]) if 'tag' in summary_options: self.cli_set(base_path + ['summary-address', summary, 'tag', summary_options['tag']]) if 'no_advertise' in summary_options: self.cli_set(base_path + ['summary-address', summary, 'no-advertise']) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' mpls-te on', frrconfig) self.assertIn(f' mpls-te router-address 0.0.0.0', frrconfig) # default self.assertIn(f' distance {global_distance}', frrconfig) self.assertIn(f' distance ospf intra-area {intra_area} external {external}', frrconfig) self.assertIn(f' max-metric router-lsa on-startup {on_startup}', frrconfig) self.assertIn(f' max-metric router-lsa on-shutdown {on_shutdown}', frrconfig) self.assertIn(f' refresh timer {refresh}', frrconfig) self.assertIn(f' aggregation timer {aggregation_timer}', frrconfig) for summary, summary_options in summary_nets.items(): self.assertIn(f' summary-address {summary}', frrconfig) if 'tag' in summary_options: tag = summary_options['tag'] self.assertIn(f' summary-address {summary} tag {tag}', frrconfig) if 'no_advertise' in summary_options: self.assertIn(f' summary-address {summary} no-advertise', frrconfig) # enable inter-area self.cli_set(base_path + ['distance', 'ospf', 'inter-area', inter_area]) self.cli_commit() - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f' distance ospf intra-area {intra_area} inter-area {inter_area} external {external}', frrconfig) + # https://github.com/FRRouting/frr/issues/17011 + # We need to wait on_shutdown time, until the OSPF process is removed from the CLI + # otherwise the test in tearDown() will fail + self.cli_delete(base_path) + self.cli_commit() + + sleep(int(on_shutdown) + 5) # additional grace period of 5 seconds def test_ospf_06_neighbor(self): priority = '10' poll_interval = '20' neighbors = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] for neighbor in neighbors: self.cli_set(base_path + ['neighbor', neighbor, 'priority', priority]) self.cli_set(base_path + ['neighbor', neighbor, 'poll-interval', poll_interval]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) for neighbor in neighbors: self.assertIn(f' neighbor {neighbor} priority {priority} poll-interval {poll_interval}', frrconfig) # default def test_ospf_07_redistribute(self): metric = '15' metric_type = '1' redistribute = ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static'] for protocol in redistribute: self.cli_set(base_path + ['redistribute', protocol, 'metric', metric]) self.cli_set(base_path + ['redistribute', protocol, 'route-map', route_map]) self.cli_set(base_path + ['redistribute', protocol, 'metric-type', metric_type]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) for protocol in redistribute: self.assertIn(f' redistribute {protocol} metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) def test_ospf_08_virtual_link(self): networks = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'] area = '10' shortcut = 'enable' virtual_link = '192.0.2.1' hello = '6' retransmit = '5' transmit = '5' dead = '40' + window_default = default_value(base_path + ['area', area, 'virtual-link', virtual_link, 'retransmit-window']) self.cli_set(base_path + ['area', area, 'shortcut', shortcut]) self.cli_set(base_path + ['area', area, 'virtual-link', virtual_link, 'hello-interval', hello]) self.cli_set(base_path + ['area', area, 'virtual-link', virtual_link, 'retransmit-interval', retransmit]) self.cli_set(base_path + ['area', area, 'virtual-link', virtual_link, 'transmit-delay', transmit]) self.cli_set(base_path + ['area', area, 'virtual-link', virtual_link, 'dead-interval', dead]) for network in networks: self.cli_set(base_path + ['area', area, 'network', network]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' area {area} shortcut {shortcut}', frrconfig) - self.assertIn(f' area {area} virtual-link {virtual_link} hello-interval {hello} retransmit-interval {retransmit} transmit-delay {transmit} dead-interval {dead}', frrconfig) + self.assertIn(f' area {area} virtual-link {virtual_link} hello-interval {hello} retransmit-interval {retransmit} retransmit-window {window_default} transmit-delay {transmit} dead-interval {dead}', frrconfig) for network in networks: self.assertIn(f' network {network} area {area}', frrconfig) def test_ospf_09_interface_configuration(self): interfaces = Section.interfaces('ethernet') password = 'vyos1234' bandwidth = '10000' cost = '150' network = 'point-to-point' priority = '200' bfd_profile = 'vyos-test' self.cli_set(base_path + ['passive-interface', 'default']) for interface in interfaces: base_interface = base_path + ['interface', interface] self.cli_set(base_interface + ['authentication', 'plaintext-password', password]) self.cli_set(base_interface + ['bandwidth', bandwidth]) self.cli_set(base_interface + ['bfd', 'profile', bfd_profile]) self.cli_set(base_interface + ['cost', cost]) self.cli_set(base_interface + ['mtu-ignore']) self.cli_set(base_interface + ['network', network]) self.cli_set(base_interface + ['priority', priority]) self.cli_set(base_interface + ['passive', 'disable']) # commit changes self.cli_commit() - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' passive-interface default', frrconfig) for interface in interfaces: # Can not use daemon for getFRRconfig() as bandwidth parameter belongs to zebra process config = self.getFRRconfig(f'interface {interface}') self.assertIn(f'interface {interface}', config) self.assertIn(f' ip ospf authentication-key {password}', config) self.assertIn(f' ip ospf bfd', config) self.assertIn(f' ip ospf bfd profile {bfd_profile}', config) self.assertIn(f' ip ospf cost {cost}', config) self.assertIn(f' ip ospf mtu-ignore', config) self.assertIn(f' ip ospf network {network}', config) self.assertIn(f' ip ospf priority {priority}', config) self.assertIn(f' no ip ospf passive', config) self.assertIn(f' bandwidth {bandwidth}', config) # T5467: Remove interface from OSPF process and VRF self.cli_delete(base_path + ['interface']) self.cli_commit() for interface in interfaces: # T5467: It must also be removed from FRR config - frrconfig = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=ospf_daemon) self.assertNotIn(f'interface {interface}', frrconfig) # There should be no OSPF related command at all under the interface self.assertNotIn(f' ip ospf', frrconfig) def test_ospf_11_interface_area(self): area = '0' interfaces = Section.interfaces('ethernet') self.cli_set(base_path + ['area', area, 'network', '10.0.0.0/8']) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'area', area]) # we can not have bot area network and interface area set with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['area', area, 'network']) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=ospf_daemon) self.assertIn(f'interface {interface}', config) self.assertIn(f' ip ospf area {area}', config) def test_ospf_12_vrfs(self): # It is safe to assume that when the basic VRF test works, all # other OSPF related features work, as we entirely inherit the CLI # templates and Jinja2 FRR template. table = '1000' vrf = 'blue' vrf_base = ['vrf', 'name', vrf] vrf_iface = 'eth1' area = '1' self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'ospf', 'interface', vrf_iface, 'area', area]) self.cli_set(['interfaces', 'ethernet', vrf_iface, 'vrf', vrf]) # Also set a default VRF OSPF config self.cli_set(base_path) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' auto-cost reference-bandwidth 100', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults - frrconfig = self.getFRRconfig(f'router ospf vrf {vrf}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'router ospf vrf {vrf}', daemon=ospf_daemon) self.assertIn(f'router ospf vrf {vrf}', frrconfig) self.assertIn(f' auto-cost reference-bandwidth 100', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # defaults - frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=ospf_daemon) self.assertIn(f'interface {vrf_iface}', frrconfig) self.assertIn(f' ip ospf area {area}', frrconfig) # T5467: Remove interface from OSPF process and VRF self.cli_delete(vrf_base + ['protocols', 'ospf', 'interface']) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) self.cli_commit() # T5467: It must also be removed from FRR config - frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=ospf_daemon) self.assertNotIn(f'interface {vrf_iface}', frrconfig) # There should be no OSPF related command at all under the interface self.assertNotIn(f' ip ospf', frrconfig) # cleanup self.cli_delete(['vrf', 'name', vrf]) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) def test_ospf_13_export_list(self): # Verify explort-list works on ospf-area acl = '100' seq = '10' area = '0.0.0.10' network = '10.0.0.0/8' self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'action', 'permit']) self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'source', 'any']) self.cli_set(['policy', 'access-list', acl, 'rule', seq, 'destination', 'any']) self.cli_set(base_path + ['area', area, 'network', network]) self.cli_set(base_path + ['area', area, 'export-list', acl]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) # default self.assertIn(f' network {network} area {area}', frrconfig) self.assertIn(f' area {area} export-list {acl}', frrconfig) def test_ospf_14_segment_routing_configuration(self): global_block_low = "300" global_block_high = "399" local_block_low = "400" local_block_high = "499" maximum_stack_size = '5' prefix_one = '192.168.0.1/32' prefix_two = '192.168.0.2/32' prefix_one_value = '1' prefix_two_value = '2' self.cli_set(base_path + ['interface', dummy_if]) self.cli_set(base_path + ['segment-routing', 'maximum-label-depth', maximum_stack_size]) self.cli_set(base_path + ['segment-routing', 'global-block', 'low-label-value', global_block_low]) self.cli_set(base_path + ['segment-routing', 'global-block', 'high-label-value', global_block_high]) self.cli_set(base_path + ['segment-routing', 'local-block', 'low-label-value', local_block_low]) self.cli_set(base_path + ['segment-routing', 'local-block', 'high-label-value', local_block_high]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'value', prefix_one_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'explicit-null']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'value', prefix_two_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'no-php-flag']) # Commit all changes self.cli_commit() # Verify all changes - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f' segment-routing on', frrconfig) self.assertIn(f' segment-routing global-block {global_block_low} {global_block_high} local-block {local_block_low} {local_block_high}', frrconfig) self.assertIn(f' segment-routing node-msd {maximum_stack_size}', frrconfig) self.assertIn(f' segment-routing prefix {prefix_one} index {prefix_one_value} explicit-null', frrconfig) self.assertIn(f' segment-routing prefix {prefix_two} index {prefix_two_value} no-php-flag', frrconfig) def test_ospf_15_ldp_sync(self): holddown = "500" interfaces = Section.interfaces('ethernet') self.cli_set(base_path + ['interface', dummy_if]) self.cli_set(base_path + ['ldp-sync', 'holddown', holddown]) # Commit main OSPF changes self.cli_commit() # Verify main OSPF changes - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' timers throttle spf 200 1000 10000', frrconfig) self.assertIn(f' mpls ldp-sync holddown {holddown}', frrconfig) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'holddown', holddown]) # Commit interface changes for holddown self.cli_commit() for interface in interfaces: # Verify interface changes for holddown - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=ospf_daemon) self.assertIn(f'interface {interface}', config) self.assertIn(f' ip ospf dead-interval 40', config) self.assertIn(f' ip ospf mpls ldp-sync', config) self.assertIn(f' ip ospf mpls ldp-sync holddown {holddown}', config) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'disable']) # Commit interface changes for disable self.cli_commit() for interface in interfaces: # Verify interface changes for disable - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=ospf_daemon) self.assertIn(f'interface {interface}', config) self.assertIn(f' ip ospf dead-interval 40', config) self.assertNotIn(f' ip ospf mpls ldp-sync', config) def test_ospf_16_graceful_restart(self): period = '300' supported_grace_time = '400' router_ids = ['192.0.2.1', '192.0.2.2'] self.cli_set(base_path + ['capability', 'opaque']) self.cli_set(base_path + ['graceful-restart', 'grace-period', period]) self.cli_set(base_path + ['graceful-restart', 'helper', 'planned-only']) self.cli_set(base_path + ['graceful-restart', 'helper', 'no-strict-lsa-checking']) self.cli_set(base_path + ['graceful-restart', 'helper', 'supported-grace-time', supported_grace_time]) for router_id in router_ids: self.cli_set(base_path + ['graceful-restart', 'helper', 'enable', 'router-id', router_id]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' capability opaque', frrconfig) self.assertIn(f' graceful-restart grace-period {period}', frrconfig) self.assertIn(f' graceful-restart helper planned-only', frrconfig) self.assertIn(f' no graceful-restart helper strict-lsa-checking', frrconfig) self.assertIn(f' graceful-restart helper supported-grace-time {supported_grace_time}', frrconfig) for router_id in router_ids: self.assertIn(f' graceful-restart helper enable {router_id}', frrconfig) def test_ospf_17_duplicate_area_network(self): area0 = '0' area1 = '1' network = '10.0.0.0/8' self.cli_set(base_path + ['area', area0, 'network', network]) # we can not have the same network defined on two areas self.cli_set(base_path + ['area', area1, 'network', network]) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['area', area0]) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) - # Required to prevent the race condition T6761 - retry_count = 0 - max_retries = 60 - - while not frrconfig and retry_count < max_retries: - # Log every 10 seconds - if retry_count % 10 == 0: - print(f"Attempt {retry_count}: FRR config is still empty. Retrying...") - - retry_count += 1 - time.sleep(1) - frrconfig = self.getFRRconfig('router ospf', daemon=PROCESS_NAME) - - if not frrconfig: - print("Failed to retrieve FRR config after 60 seconds") - + frrconfig = self.getFRRconfig('router ospf', endsection='^exit', daemon=ospf_daemon, empty_retry=60) self.assertIn(f'router ospf', frrconfig) self.assertIn(f' network {network} area {area1}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_ospfv3.py b/smoketest/scripts/cli/test_protocols_ospfv3.py index 989e1552d..d961a4fdc 100755 --- a/smoketest/scripts/cli/test_protocols_ospfv3.py +++ b/smoketest/scripts/cli/test_protocols_ospfv3.py @@ -1,339 +1,342 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import ospf6_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'ospf6d' base_path = ['protocols', 'ospfv3'] route_map = 'foo-bar-baz-0815' router_id = '192.0.2.1' default_area = '0' class TestProtocolsOSPFv3(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsOSPFv3, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(ospf6_daemon) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '20', 'action', 'permit']) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['policy', 'route-map', route_map]) super(TestProtocolsOSPFv3, cls).tearDownClass() def tearDown(self): self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) + self.assertNotIn(f'router ospf6', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(ospf6_daemon)) def test_ospfv3_01_basic(self): seq = '10' prefix = '2001:db8::/32' acl_name = 'foo-acl-100' self.cli_set(['policy', 'access-list6', acl_name, 'rule', seq, 'action', 'permit']) self.cli_set(['policy', 'access-list6', acl_name, 'rule', seq, 'source', 'any']) self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_set(base_path + ['area', default_area, 'range', prefix, 'advertise']) self.cli_set(base_path + ['area', default_area, 'export-list', acl_name]) self.cli_set(base_path + ['area', default_area, 'import-list', acl_name]) interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'area', default_area]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' area {default_area} range {prefix}', frrconfig) self.assertIn(f' ospf6 router-id {router_id}', frrconfig) self.assertIn(f' area {default_area} import-list {acl_name}', frrconfig) self.assertIn(f' area {default_area} export-list {acl_name}', frrconfig) for interface in interfaces: - if_config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + if_config = self.getFRRconfig(f'interface {interface}', daemon=ospf6_daemon) self.assertIn(f'ipv6 ospf6 area {default_area}', if_config) self.cli_delete(['policy', 'access-list6', acl_name]) def test_ospfv3_02_distance(self): dist_global = '200' dist_external = '110' dist_inter_area = '120' dist_intra_area = '130' self.cli_set(base_path + ['distance', 'global', dist_global]) self.cli_set(base_path + ['distance', 'ospfv3', 'external', dist_external]) self.cli_set(base_path + ['distance', 'ospfv3', 'inter-area', dist_inter_area]) self.cli_set(base_path + ['distance', 'ospfv3', 'intra-area', dist_intra_area]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' distance {dist_global}', frrconfig) self.assertIn(f' distance ospf6 intra-area {dist_intra_area} inter-area {dist_inter_area} external {dist_external}', frrconfig) def test_ospfv3_03_redistribute(self): metric = '15' metric_type = '1' route_map = 'foo-bar' route_map_seq = '10' redistribute = ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static'] self.cli_set(['policy', 'route-map', route_map, 'rule', route_map_seq, 'action', 'permit']) for protocol in redistribute: self.cli_set(base_path + ['redistribute', protocol, 'metric', metric]) self.cli_set(base_path + ['redistribute', protocol, 'route-map', route_map]) self.cli_set(base_path + ['redistribute', protocol, 'metric-type', metric_type]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) for protocol in redistribute: self.assertIn(f' redistribute {protocol} metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) def test_ospfv3_04_interfaces(self): bfd_profile = 'vyos-ipv6' self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_set(base_path + ['area', default_area]) cost = '100' priority = '10' interfaces = Section.interfaces('ethernet') for interface in interfaces: if_base = base_path + ['interface', interface] self.cli_set(if_base + ['bfd', 'profile', bfd_profile]) self.cli_set(if_base + ['cost', cost]) self.cli_set(if_base + ['instance-id', '0']) self.cli_set(if_base + ['mtu-ignore']) self.cli_set(if_base + ['network', 'point-to-point']) self.cli_set(if_base + ['passive']) self.cli_set(if_base + ['priority', priority]) cost = str(int(cost) + 10) priority = str(int(priority) + 5) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) cost = '100' priority = '10' for interface in interfaces: - if_config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + if_config = self.getFRRconfig(f'interface {interface}', daemon=ospf6_daemon) self.assertIn(f'interface {interface}', if_config) self.assertIn(f' ipv6 ospf6 bfd', if_config) self.assertIn(f' ipv6 ospf6 bfd profile {bfd_profile}', if_config) self.assertIn(f' ipv6 ospf6 cost {cost}', if_config) self.assertIn(f' ipv6 ospf6 mtu-ignore', if_config) self.assertIn(f' ipv6 ospf6 network point-to-point', if_config) self.assertIn(f' ipv6 ospf6 passive', if_config) self.assertIn(f' ipv6 ospf6 priority {priority}', if_config) cost = str(int(cost) + 10) priority = str(int(priority) + 5) # Cleanup interfaces self.cli_delete(base_path + ['interface']) self.cli_commit() for interface in interfaces: - if_config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + if_config = self.getFRRconfig(f'interface {interface}', daemon=ospf6_daemon) # There should be no OSPF6 configuration at all after interface removal self.assertNotIn(f' ipv6 ospf6', if_config) def test_ospfv3_05_area_stub(self): area_stub = '23' area_stub_nosum = '26' self.cli_set(base_path + ['area', area_stub, 'area-type', 'stub']) self.cli_set(base_path + ['area', area_stub_nosum, 'area-type', 'stub', 'no-summary']) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' area {area_stub} stub', frrconfig) self.assertIn(f' area {area_stub_nosum} stub no-summary', frrconfig) def test_ospfv3_06_area_nssa(self): area_nssa = '1.1.1.1' area_nssa_nosum = '2.2.2.2' area_nssa_default = '3.3.3.3' self.cli_set(base_path + ['area', area_nssa, 'area-type', 'nssa']) self.cli_set(base_path + ['area', area_nssa, 'area-type', 'stub']) # can only set one area-type per OSPFv3 area with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['area', area_nssa, 'area-type', 'stub']) self.cli_set(base_path + ['area', area_nssa_nosum, 'area-type', 'nssa', 'no-summary']) self.cli_set(base_path + ['area', area_nssa_nosum, 'area-type', 'nssa', 'default-information-originate']) self.cli_set(base_path + ['area', area_nssa_default, 'area-type', 'nssa', 'default-information-originate']) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' area {area_nssa} nssa', frrconfig) self.assertIn(f' area {area_nssa_nosum} nssa default-information-originate no-summary', frrconfig) self.assertIn(f' area {area_nssa_default} nssa default-information-originate', frrconfig) def test_ospfv3_07_default_originate(self): seq = '100' metric = '50' metric_type = '1' self.cli_set(base_path + ['default-information', 'originate', 'metric', metric]) self.cli_set(base_path + ['default-information', 'originate', 'metric-type', metric_type]) self.cli_set(base_path + ['default-information', 'originate', 'route-map', route_map]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' default-information originate metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) # Now set 'always' self.cli_set(base_path + ['default-information', 'originate', 'always']) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f' default-information originate always metric {metric} metric-type {metric_type} route-map {route_map}', frrconfig) def test_ospfv3_08_vrfs(self): # It is safe to assume that when the basic VRF test works, all # other OSPF related features work, as we entirely inherit the CLI # templates and Jinja2 FRR template. table = '1000' vrf = 'blue' vrf_base = ['vrf', 'name', vrf] vrf_iface = 'eth1' router_id = '1.2.3.4' router_id_vrf = '1.2.3.5' self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'ospfv3', 'interface', vrf_iface, 'bfd']) self.cli_set(vrf_base + ['protocols', 'ospfv3', 'parameters', 'router-id', router_id_vrf]) self.cli_set(['interfaces', 'ethernet', vrf_iface, 'vrf', vrf]) # Also set a default VRF OSPF config self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' ospf6 router-id {router_id}', frrconfig) - frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=ospf6_daemon) self.assertIn(f'interface {vrf_iface}', frrconfig) self.assertIn(f' ipv6 ospf6 bfd', frrconfig) - frrconfig = self.getFRRconfig(f'router ospf6 vrf {vrf}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'router ospf6 vrf {vrf}', daemon=ospf6_daemon) self.assertIn(f'router ospf6 vrf {vrf}', frrconfig) self.assertIn(f' ospf6 router-id {router_id_vrf}', frrconfig) # T5467: Remove interface from OSPF process and VRF self.cli_delete(vrf_base + ['protocols', 'ospfv3', 'interface']) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) self.cli_commit() # T5467: It must also be removed from FRR config - frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {vrf_iface}', daemon=ospf6_daemon) self.assertNotIn(f'interface {vrf_iface}', frrconfig) # There should be no OSPF related command at all under the interface self.assertNotIn(f' ipv6 ospf6', frrconfig) # cleanup self.cli_delete(['vrf', 'name', vrf]) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) def test_ospfv3_09_graceful_restart(self): period = '300' supported_grace_time = '400' router_ids = ['192.0.2.1', '192.0.2.2'] self.cli_set(base_path + ['graceful-restart', 'grace-period', period]) self.cli_set(base_path + ['graceful-restart', 'helper', 'planned-only']) self.cli_set(base_path + ['graceful-restart', 'helper', 'lsa-check-disable']) self.cli_set(base_path + ['graceful-restart', 'helper', 'supported-grace-time', supported_grace_time]) for router_id in router_ids: self.cli_set(base_path + ['graceful-restart', 'helper', 'enable', 'router-id', router_id]) # commit changes self.cli_commit() # Verify FRR ospfd configuration - frrconfig = self.getFRRconfig('router ospf6', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('router ospf6', endsection='^exit', daemon=ospf6_daemon) self.assertIn(f'router ospf6', frrconfig) self.assertIn(f' graceful-restart grace-period {period}', frrconfig) self.assertIn(f' graceful-restart helper planned-only', frrconfig) self.assertIn(f' graceful-restart helper lsa-check-disable', frrconfig) self.assertIn(f' graceful-restart helper supported-grace-time {supported_grace_time}', frrconfig) for router_id in router_ids: self.assertIn(f' graceful-restart helper enable {router_id}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_pim.py b/smoketest/scripts/cli/test_protocols_pim.py index ccfced138..98b9d57aa 100755 --- a/smoketest/scripts/cli/test_protocols_pim.py +++ b/smoketest/scripts/cli/test_protocols_pim.py @@ -1,192 +1,192 @@ #!/usr/bin/env python3 # # Copyright (C) 2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError +from vyos.frrender import pim_daemon from vyos.ifconfig import Section from vyos.utils.process import process_named_running -PROCESS_NAME = 'pimd' base_path = ['protocols', 'pim'] class TestProtocolsPIM(VyOSUnitTestSHIM.TestCase): def tearDown(self): # pimd process must be running - self.assertTrue(process_named_running(PROCESS_NAME)) + self.assertTrue(process_named_running(pim_daemon)) self.cli_delete(base_path) self.cli_commit() # pimd process must be stopped by now - self.assertFalse(process_named_running(PROCESS_NAME)) + self.assertFalse(process_named_running(pim_daemon)) def test_01_pim_basic(self): rp = '127.0.0.1' group = '224.0.0.0/4' hello = '100' dr_priority = '64' self.cli_set(base_path + ['rp', 'address', rp, 'group', group]) interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface , 'bfd']) self.cli_set(base_path + ['interface', interface , 'dr-priority', dr_priority]) self.cli_set(base_path + ['interface', interface , 'hello', hello]) self.cli_set(base_path + ['interface', interface , 'no-bsm']) self.cli_set(base_path + ['interface', interface , 'no-unicast-bsm']) self.cli_set(base_path + ['interface', interface , 'passive']) # commit changes self.cli_commit() # Verify FRR pimd configuration - frrconfig = self.getFRRconfig(daemon=PROCESS_NAME) - self.assertIn(f'ip pim rp {rp} {group}', frrconfig) + frrconfig = self.getFRRconfig('router pim', endsection='^exit', daemon=pim_daemon) + self.assertIn(f' rp {rp} {group}', frrconfig) for interface in interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=pim_daemon) self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' ip pim', frrconfig) self.assertIn(f' ip pim bfd', frrconfig) self.assertIn(f' ip pim drpriority {dr_priority}', frrconfig) self.assertIn(f' ip pim hello {hello}', frrconfig) self.assertIn(f' no ip pim bsm', frrconfig) self.assertIn(f' no ip pim unicast-bsm', frrconfig) self.assertIn(f' ip pim passive', frrconfig) self.cli_commit() def test_02_pim_advanced(self): rp = '127.0.0.2' group = '224.0.0.0/4' join_prune_interval = '123' rp_keep_alive_timer = '190' keep_alive_timer = '180' packets = '10' prefix_list = 'pim-test' register_suppress_time = '300' self.cli_set(base_path + ['rp', 'address', rp, 'group', group]) self.cli_set(base_path + ['rp', 'keep-alive-timer', rp_keep_alive_timer]) self.cli_set(base_path + ['ecmp', 'rebalance']) self.cli_set(base_path + ['join-prune-interval', join_prune_interval]) self.cli_set(base_path + ['keep-alive-timer', keep_alive_timer]) self.cli_set(base_path + ['packets', packets]) self.cli_set(base_path + ['register-accept-list', 'prefix-list', prefix_list]) self.cli_set(base_path + ['register-suppress-time', register_suppress_time]) self.cli_set(base_path + ['no-v6-secondary']) self.cli_set(base_path + ['spt-switchover', 'infinity-and-beyond', 'prefix-list', prefix_list]) self.cli_set(base_path + ['ssm', 'prefix-list', prefix_list]) # check validate() - PIM require defined interfaces! with self.assertRaises(ConfigSessionError): self.cli_commit() interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface]) # commit changes self.cli_commit() # Verify FRR pimd configuration - frrconfig = self.getFRRconfig(daemon=PROCESS_NAME) - self.assertIn(f'ip pim rp {rp} {group}', frrconfig) - self.assertIn(f'ip pim rp keep-alive-timer {rp_keep_alive_timer}', frrconfig) - self.assertIn(f'ip pim ecmp rebalance', frrconfig) - self.assertIn(f'ip pim join-prune-interval {join_prune_interval}', frrconfig) - self.assertIn(f'ip pim keep-alive-timer {keep_alive_timer}', frrconfig) - self.assertIn(f'ip pim packets {packets}', frrconfig) - self.assertIn(f'ip pim register-accept-list {prefix_list}', frrconfig) - self.assertIn(f'ip pim register-suppress-time {register_suppress_time}', frrconfig) - self.assertIn(f'no ip pim send-v6-secondary', frrconfig) - self.assertIn(f'ip pim spt-switchover infinity-and-beyond prefix-list {prefix_list}', frrconfig) - self.assertIn(f'ip pim ssm prefix-list {prefix_list}', frrconfig) + frrconfig = self.getFRRconfig('router pim', endsection='^exit', daemon=pim_daemon) + self.assertIn(f' no send-v6-secondary', frrconfig) + self.assertIn(f' rp {rp} {group}', frrconfig) + self.assertIn(f' register-suppress-time {register_suppress_time}', frrconfig) + self.assertIn(f' join-prune-interval {join_prune_interval}', frrconfig) + self.assertIn(f' packets {packets}', frrconfig) + self.assertIn(f' keep-alive-timer {keep_alive_timer}', frrconfig) + self.assertIn(f' rp keep-alive-timer {rp_keep_alive_timer}', frrconfig) + self.assertIn(f' ssm prefix-list {prefix_list}', frrconfig) + self.assertIn(f' register-accept-list {prefix_list}', frrconfig) + self.assertIn(f' spt-switchover infinity-and-beyond prefix-list {prefix_list}', frrconfig) + self.assertIn(f' ecmp rebalance', frrconfig) def test_03_pim_igmp_proxy(self): igmp_proxy = ['protocols', 'igmp-proxy'] rp = '127.0.0.1' group = '224.0.0.0/4' self.cli_set(base_path) self.cli_set(igmp_proxy) # check validate() - can not set both IGMP proxy and PIM with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(igmp_proxy) self.cli_set(base_path + ['rp', 'address', rp, 'group', group]) interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface , 'bfd']) # commit changes self.cli_commit() def test_04_igmp(self): watermark_warning = '2000' query_interval = '1000' query_max_response_time = '200' version = '2' igmp_join = { '224.1.1.1' : { 'source' : ['1.1.1.1', '2.2.2.2', '3.3.3.3'] }, '224.1.2.2' : { 'source' : [] }, '224.1.3.3' : {}, } self.cli_set(base_path + ['igmp', 'watermark-warning', watermark_warning]) interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface , 'igmp', 'version', version]) self.cli_set(base_path + ['interface', interface , 'igmp', 'query-interval', query_interval]) self.cli_set(base_path + ['interface', interface , 'igmp', 'query-max-response-time', query_max_response_time]) for join, join_config in igmp_join.items(): self.cli_set(base_path + ['interface', interface , 'igmp', 'join', join]) if 'source' in join_config: for source in join_config['source']: self.cli_set(base_path + ['interface', interface , 'igmp', 'join', join, 'source-address', source]) self.cli_commit() - frrconfig = self.getFRRconfig(daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(daemon=pim_daemon) self.assertIn(f'ip igmp watermark-warn {watermark_warning}', frrconfig) for interface in interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=pim_daemon) self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' ip igmp', frrconfig) self.assertIn(f' ip igmp version {version}', frrconfig) self.assertIn(f' ip igmp query-interval {query_interval}', frrconfig) self.assertIn(f' ip igmp query-max-response-time {query_max_response_time}', frrconfig) for join, join_config in igmp_join.items(): if 'source' in join_config: for source in join_config['source']: - self.assertIn(f' ip igmp join {join} {source}', frrconfig) + self.assertIn(f' ip igmp join-group {join} {source}', frrconfig) else: - self.assertIn(f' ip igmp join {join}', frrconfig) + self.assertIn(f' ip igmp join-group {join}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_pim6.py b/smoketest/scripts/cli/test_protocols_pim6.py index ba24edca2..f69eb4ae1 100755 --- a/smoketest/scripts/cli/test_protocols_pim6.py +++ b/smoketest/scripts/cli/test_protocols_pim6.py @@ -1,146 +1,146 @@ #!/usr/bin/env python3 # # Copyright (C) 2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import pim6_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'pim6d' base_path = ['protocols', 'pim6'] class TestProtocolsPIMv6(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsPIMv6, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(pim6_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(pim6_daemon)) def test_pim6_01_mld_simple(self): # commit changes interfaces = Section.interfaces('ethernet') for interface in interfaces: self.cli_set(base_path + ['interface', interface]) self.cli_commit() # Verify FRR pim6d configuration for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=pim6_daemon) self.assertIn(f'interface {interface}', config) self.assertIn(f' ipv6 mld', config) self.assertNotIn(f' ipv6 mld version 1', config) # Change to MLD version 1 for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mld', 'version', '1']) self.cli_commit() # Verify FRR pim6d configuration for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=pim6_daemon) self.assertIn(f'interface {interface}', config) self.assertIn(f' ipv6 mld', config) self.assertIn(f' ipv6 mld version 1', config) def test_pim6_02_mld_join(self): interfaces = Section.interfaces('ethernet') # Use an invalid multicast group address for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mld', 'join', 'fd00::1234']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['interface']) # Use a valid multicast group address for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mld', 'join', 'ff18::1234']) self.cli_commit() # Verify FRR pim6d configuration for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=pim6_daemon) self.assertIn(f'interface {interface}', config) - self.assertIn(f' ipv6 mld join ff18::1234', config) + self.assertIn(f' ipv6 mld join-group ff18::1234', config) # Join a source-specific multicast group for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mld', 'join', 'ff38::5678', 'source', '2001:db8::5678']) self.cli_commit() # Verify FRR pim6d configuration for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=pim6_daemon) self.assertIn(f'interface {interface}', config) - self.assertIn(f' ipv6 mld join ff38::5678 2001:db8::5678', config) + self.assertIn(f' ipv6 mld join-group ff38::5678 2001:db8::5678', config) def test_pim6_03_basic(self): interfaces = Section.interfaces('ethernet') join_prune_interval = '123' keep_alive_timer = '77' packets = '5' register_suppress_time = '99' dr_priority = '100' hello = '50' self.cli_set(base_path + ['join-prune-interval', join_prune_interval]) self.cli_set(base_path + ['keep-alive-timer', keep_alive_timer]) self.cli_set(base_path + ['packets', packets]) self.cli_set(base_path + ['register-suppress-time', register_suppress_time]) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'dr-priority', dr_priority]) self.cli_set(base_path + ['interface', interface, 'hello', hello]) self.cli_set(base_path + ['interface', interface, 'no-bsm']) self.cli_set(base_path + ['interface', interface, 'no-unicast-bsm']) self.cli_set(base_path + ['interface', interface, 'passive']) self.cli_commit() # Verify FRR pim6d configuration - config = self.getFRRconfig(daemon=PROCESS_NAME) - self.assertIn(f'ipv6 pim join-prune-interval {join_prune_interval}', config) - self.assertIn(f'ipv6 pim keep-alive-timer {keep_alive_timer}', config) - self.assertIn(f'ipv6 pim packets {packets}', config) - self.assertIn(f'ipv6 pim register-suppress-time {register_suppress_time}', config) + config = self.getFRRconfig('router pim6', endsection='^exit', daemon=pim6_daemon) + self.assertIn(f' join-prune-interval {join_prune_interval}', config) + self.assertIn(f' keep-alive-timer {keep_alive_timer}', config) + self.assertIn(f' packets {packets}', config) + self.assertIn(f' register-suppress-time {register_suppress_time}', config) for interface in interfaces: - config = self.getFRRconfig(f'interface {interface}', daemon=PROCESS_NAME) + config = self.getFRRconfig(f'interface {interface}', daemon=pim6_daemon) self.assertIn(f' ipv6 pim drpriority {dr_priority}', config) self.assertIn(f' ipv6 pim hello {hello}', config) self.assertIn(f' no ipv6 pim bsm', config) self.assertIn(f' no ipv6 pim unicast-bsm', config) self.assertIn(f' ipv6 pim passive', config) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_rip.py b/smoketest/scripts/cli/test_protocols_rip.py index bfc327fd4..33706a9c9 100755 --- a/smoketest/scripts/cli/test_protocols_rip.py +++ b/smoketest/scripts/cli/test_protocols_rip.py @@ -1,183 +1,186 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.ifconfig import Section +from vyos.frrender import rip_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'ripd' acl_in = '198' acl_out = '199' prefix_list_in = 'foo-prefix' prefix_list_out = 'bar-prefix' route_map = 'FooBar123' base_path = ['protocols', 'rip'] class TestProtocolsRIP(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsRIP, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(rip_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_set(cls, ['policy', 'access-list', acl_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'access-list', acl_in, 'rule', '10', 'source', 'any']) cls.cli_set(cls, ['policy', 'access-list', acl_in, 'rule', '10', 'destination', 'any']) cls.cli_set(cls, ['policy', 'access-list', acl_out, 'rule', '20', 'action', 'deny']) cls.cli_set(cls, ['policy', 'access-list', acl_out, 'rule', '20', 'source', 'any']) cls.cli_set(cls, ['policy', 'access-list', acl_out, 'rule', '20', 'destination', 'any']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '100', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '100', 'prefix', '192.0.2.0/24']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '200', 'action', 'deny']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '200', 'prefix', '192.0.2.0/24']) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['policy', 'access-list', acl_in]) cls.cli_delete(cls, ['policy', 'access-list', acl_out]) cls.cli_delete(cls, ['policy', 'prefix-list', prefix_list_in]) cls.cli_delete(cls, ['policy', 'prefix-list', prefix_list_out]) cls.cli_delete(cls, ['policy', 'route-map', route_map]) super(TestProtocolsRIP, cls).tearDownClass() def tearDown(self): self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('router rip') + self.assertNotIn(f'router rip', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(rip_daemon)) def test_rip_01_parameters(self): distance = '40' network_distance = '66' metric = '8' interfaces = Section.interfaces('ethernet') neighbors = ['1.2.3.4', '1.2.3.5', '1.2.3.6'] networks = ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16'] redistribute = ['bgp', 'connected', 'isis', 'kernel', 'ospf', 'static'] timer_garbage = '888' timer_timeout = '1000' timer_update = '90' self.cli_set(base_path + ['default-distance', distance]) self.cli_set(base_path + ['default-information', 'originate']) self.cli_set(base_path + ['default-metric', metric]) self.cli_set(base_path + ['distribute-list', 'access-list', 'in', acl_in]) self.cli_set(base_path + ['distribute-list', 'access-list', 'out', acl_out]) self.cli_set(base_path + ['distribute-list', 'prefix-list', 'in', prefix_list_in]) self.cli_set(base_path + ['distribute-list', 'prefix-list', 'out', prefix_list_out]) self.cli_set(base_path + ['passive-interface', 'default']) self.cli_set(base_path + ['timers', 'garbage-collection', timer_garbage]) self.cli_set(base_path + ['timers', 'timeout', timer_timeout]) self.cli_set(base_path + ['timers', 'update', timer_update]) for interface in interfaces: self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'access-list', 'in', acl_in]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'access-list', 'out', acl_out]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'prefix-list', 'in', prefix_list_in]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'prefix-list', 'out', prefix_list_out]) for neighbor in neighbors: self.cli_set(base_path + ['neighbor', neighbor]) for network in networks: self.cli_set(base_path + ['network', network]) self.cli_set(base_path + ['network-distance', network, 'distance', network_distance]) self.cli_set(base_path + ['route', network]) for proto in redistribute: self.cli_set(base_path + ['redistribute', proto, 'metric', metric]) self.cli_set(base_path + ['redistribute', proto, 'route-map', route_map]) # commit changes self.cli_commit() # Verify FRR ripd configuration frrconfig = self.getFRRconfig('router rip') self.assertIn(f'router rip', frrconfig) self.assertIn(f' distance {distance}', frrconfig) self.assertIn(f' default-information originate', frrconfig) self.assertIn(f' default-metric {metric}', frrconfig) self.assertIn(f' distribute-list {acl_in} in', frrconfig) self.assertIn(f' distribute-list {acl_out} out', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_in} in', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_out} out', frrconfig) self.assertIn(f' passive-interface default', frrconfig) self.assertIn(f' timers basic {timer_update} {timer_timeout} {timer_garbage}', frrconfig) for interface in interfaces: self.assertIn(f' network {interface}', frrconfig) self.assertIn(f' distribute-list {acl_in} in {interface}', frrconfig) self.assertIn(f' distribute-list {acl_out} out {interface}', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_in} in {interface}', frrconfig) self.assertIn(f' distribute-list prefix {prefix_list_out} out {interface}', frrconfig) for neighbor in neighbors: self.assertIn(f' neighbor {neighbor}', frrconfig) for network in networks: self.assertIn(f' network {network}', frrconfig) self.assertIn(f' distance {network_distance} {network}', frrconfig) self.assertIn(f' route {network}', frrconfig) for proto in redistribute: self.assertIn(f' redistribute {proto} metric {metric} route-map {route_map}', frrconfig) def test_rip_02_zebra_route_map(self): # Implemented because of T3328 self.cli_set(base_path + ['route-map', route_map]) # commit changes self.cli_commit() # Verify FRR configuration zebra_route_map = f'ip protocol rip route-map {route_map}' frrconfig = self.getFRRconfig(zebra_route_map) self.assertIn(zebra_route_map, frrconfig) # Remove the route-map again self.cli_delete(base_path + ['route-map']) # commit changes self.cli_commit() # Verify FRR configuration frrconfig = self.getFRRconfig(zebra_route_map) self.assertNotIn(zebra_route_map, frrconfig) def test_rip_03_version(self): rx_version = '1' tx_version = '2' interface = 'eth0' self.cli_set(base_path + ['version', tx_version]) self.cli_set(base_path + ['interface', interface, 'send', 'version', tx_version]) self.cli_set(base_path + ['interface', interface, 'receive', 'version', rx_version]) # commit changes self.cli_commit() # Verify FRR configuration frrconfig = self.getFRRconfig('router rip') self.assertIn(f'version {tx_version}', frrconfig) frrconfig = self.getFRRconfig(f'interface {interface}') self.assertIn(f' ip rip receive version {rx_version}', frrconfig) self.assertIn(f' ip rip send version {tx_version}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_ripng.py b/smoketest/scripts/cli/test_protocols_ripng.py index 0cfb065c6..b10df9679 100755 --- a/smoketest/scripts/cli/test_protocols_ripng.py +++ b/smoketest/scripts/cli/test_protocols_ripng.py @@ -1,160 +1,163 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.ifconfig import Section +from vyos.frrender import ripng_daemon from vyos.utils.process import process_named_running -PROCESS_NAME = 'ripngd' acl_in = '198' acl_out = '199' prefix_list_in = 'foo-prefix' prefix_list_out = 'bar-prefix' route_map = 'FooBar123' base_path = ['protocols', 'ripng'] class TestProtocolsRIPng(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsRIPng, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(ripng_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_set(cls, ['policy', 'access-list6', acl_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'access-list6', acl_in, 'rule', '10', 'source', 'any']) cls.cli_set(cls, ['policy', 'access-list6', acl_out, 'rule', '20', 'action', 'deny']) cls.cli_set(cls, ['policy', 'access-list6', acl_out, 'rule', '20', 'source', 'any']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in, 'rule', '100', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in, 'rule', '100', 'prefix', '2001:db8::/32']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out, 'rule', '200', 'action', 'deny']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out, 'rule', '200', 'prefix', '2001:db8::/32']) cls.cli_set(cls, ['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) @classmethod def tearDownClass(cls): # call base-classes classmethod super(TestProtocolsRIPng, cls).tearDownClass() cls.cli_delete(cls, ['policy', 'access-list6', acl_in]) cls.cli_delete(cls, ['policy', 'access-list6', acl_out]) cls.cli_delete(cls, ['policy', 'prefix-list6', prefix_list_in]) cls.cli_delete(cls, ['policy', 'prefix-list6', prefix_list_out]) cls.cli_delete(cls, ['policy', 'route-map', route_map]) def tearDown(self): self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('router ripng') + self.assertNotIn(f'router ripng', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(ripng_daemon)) def test_ripng_01_parameters(self): metric = '8' interfaces = Section.interfaces('ethernet') aggregates = ['2001:db8:1000::/48', '2001:db8:2000::/48', '2001:db8:3000::/48'] networks = ['2001:db8:1000::/64', '2001:db8:1001::/64', '2001:db8:2000::/64', '2001:db8:2001::/64'] redistribute = ['bgp', 'connected', 'kernel', 'ospfv3', 'static'] timer_garbage = '888' timer_timeout = '1000' timer_update = '90' self.cli_set(base_path + ['default-information', 'originate']) self.cli_set(base_path + ['default-metric', metric]) self.cli_set(base_path + ['distribute-list', 'access-list', 'in', acl_in]) self.cli_set(base_path + ['distribute-list', 'access-list', 'out', acl_out]) self.cli_set(base_path + ['distribute-list', 'prefix-list', 'in', prefix_list_in]) self.cli_set(base_path + ['distribute-list', 'prefix-list', 'out', prefix_list_out]) self.cli_set(base_path + ['passive-interface', 'default']) self.cli_set(base_path + ['timers', 'garbage-collection', timer_garbage]) self.cli_set(base_path + ['timers', 'timeout', timer_timeout]) self.cli_set(base_path + ['timers', 'update', timer_update]) for aggregate in aggregates: self.cli_set(base_path + ['aggregate-address', aggregate]) for interface in interfaces: self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'access-list', 'in', acl_in]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'access-list', 'out', acl_out]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'prefix-list', 'in', prefix_list_in]) self.cli_set(base_path + ['distribute-list', 'interface', interface, 'prefix-list', 'out', prefix_list_out]) for network in networks: self.cli_set(base_path + ['network', network]) self.cli_set(base_path + ['route', network]) for proto in redistribute: self.cli_set(base_path + ['redistribute', proto, 'metric', metric]) self.cli_set(base_path + ['redistribute', proto, 'route-map', route_map]) # commit changes self.cli_commit() # Verify FRR ospfd configuration frrconfig = self.getFRRconfig('router ripng') self.assertIn(f'router ripng', frrconfig) self.assertIn(f' default-information originate', frrconfig) self.assertIn(f' default-metric {metric}', frrconfig) self.assertIn(f' ipv6 distribute-list {acl_in} in', frrconfig) self.assertIn(f' ipv6 distribute-list {acl_out} out', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in} in', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out} out', frrconfig) self.assertIn(f' passive-interface default', frrconfig) self.assertIn(f' timers basic {timer_update} {timer_timeout} {timer_garbage}', frrconfig) for aggregate in aggregates: self.assertIn(f' aggregate-address {aggregate}', frrconfig) for interface in interfaces: self.assertIn(f' network {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list {acl_in} in {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list {acl_out} out {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_in} in {interface}', frrconfig) self.assertIn(f' ipv6 distribute-list prefix {prefix_list_out} out {interface}', frrconfig) for network in networks: self.assertIn(f' network {network}', frrconfig) self.assertIn(f' route {network}', frrconfig) for proto in redistribute: if proto == 'ospfv3': proto = 'ospf6' self.assertIn(f' redistribute {proto} metric {metric} route-map {route_map}', frrconfig) def test_ripng_02_zebra_route_map(self): # Implemented because of T3328 self.cli_set(base_path + ['route-map', route_map]) # commit changes self.cli_commit() # Verify FRR configuration zebra_route_map = f'ipv6 protocol ripng route-map {route_map}' frrconfig = self.getFRRconfig(zebra_route_map) self.assertIn(zebra_route_map, frrconfig) # Remove the route-map again self.cli_delete(base_path + ['route-map']) # commit changes self.cli_commit() # Verify FRR configuration frrconfig = self.getFRRconfig(zebra_route_map) self.assertNotIn(zebra_route_map, frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_rpki.py b/smoketest/scripts/cli/test_protocols_rpki.py index 29f03a26a..23738717a 100755 --- a/smoketest/scripts/cli/test_protocols_rpki.py +++ b/smoketest/scripts/cli/test_protocols_rpki.py @@ -1,247 +1,249 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError +from vyos.frrender import bgp_daemon from vyos.utils.file import read_file from vyos.utils.process import process_named_running base_path = ['protocols', 'rpki'] -PROCESS_NAME = 'bgpd' - rpki_key_name = 'rpki-smoketest' rpki_key_type = 'ssh-rsa' rpki_ssh_key = """ b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn NhAAAAAwEAAQAAAQEAweDyflDFR4qyEwETbJkZ2ZZc+sJNiDTvYpwGsWIkju49lJSxHe1x Kf8FhwfyMu40Snt1yDlRmmmz4CsbLgbuZGMPvXG11e34+C0pSVUvpF6aqRTeLl1pDRK7Rn jgm3su+I8SRLQR4qbLG6VXWOFuVpwiqbExLaU0hFYTPNP+dArNpsWEEKsohk6pTXdhg3Vz Wp3vCMjl2JTshDa3lD7p2xISSAReEY0fnfEAmQzH4Z6DIwwGdFuMWoQIg+oFBM9ARrO2/F IjRsz6AecR/WeU72JEw4aJic1/cAJQA6PiQBHwkuo3Wll1tbpxeRZoB2NQG22ETyJLvhfT aooNLT9HpQAAA8joU5dM6FOXTAAAAAdzc2gtcnNhAAABAQDB4PJ+UMVHirITARNsmRnZll z6wk2INO9inAaxYiSO7j2UlLEd7XEp/wWHB/Iy7jRKe3XIOVGaabPgKxsuBu5kYw+9cbXV 7fj4LSlJVS+kXpqpFN4uXWkNErtGeOCbey74jxJEtBHipssbpVdY4W5WnCKpsTEtpTSEVh M80/50Cs2mxYQQqyiGTqlNd2GDdXNane8IyOXYlOyENreUPunbEhJIBF4RjR+d8QCZDMfh noMjDAZ0W4xahAiD6gUEz0BGs7b8UiNGzPoB5xH9Z5TvYkTDhomJzX9wAlADo+JAEfCS6j daWXW1unF5FmgHY1AbbYRPIku+F9Nqig0tP0elAAAAAwEAAQAAAQACkDlUjzfUhtJs6uY5 WNrdJB5NmHUS+HQzzxFNlhkapK6+wKqI1UNaRUtq6iF7J+gcFf7MK2nXS098BsXguWm8fQ zPuemoDvHsQhiaJhyvpSqRUrvPTB/f8t/0AhQiKiJIWgfpTaIw53inAGwjujNNxNm2eafH TThhCYxOkRT7rsT6bnSio6yeqPy5QHg7IKFztp5FXDUyiOS3aX3SvzQcDUkMXALdvzX50t 1XIk+X48Rgkq72dL4VpV2oMNDu3hM6FqBUplf9Mv3s51FNSma/cibCQoVufrIfoqYjkNTj IpYFUcq4zZ0/KvgXgzSsy9VN/4TtbalrOuu7X/SHJbvhAAAAgGPFsXgONYQvXxCnK1dIue ozgaZg1I/n522E2ZCOXBW4dYJVyNpppwRreDzuFzTDEe061MpNHfScjVBJCCulivFYWscL 6oaGsryDbFxO3QmB4I98UBqrds2yan9/JGc6EYe299yvaHy7Y64+NC0+fN8H2RAZ61T4w1 0JrCaJRyvzAAAAgQDvBfuV1U7o9k/fbU+U7W2UYnWblpOZAMfi1XQP6IJJeyWs90PdTdXh +l0eIQrCawIiRJytNfxMmbD4huwTf77fWiyCcPznmALQ7ex/yJ+W5Z0V4dPGF3h7o1uiS2 36JhQ7mfcliCkhp/1PIklBIMPcCp0zl+s9wMv2hX7w1Pah9QAAAIEAz6YgU9Xute+J+dBw oWxEQ+igR6KE55Um7O9AvSrqnCm9r7lSFsXC2ErYOxoDSJ3yIBEV0b4XAGn6tbbVIs3jS8 BnLHxclAHQecOx1PGn7PKbnPW0oJRq/X9QCIEelKYvlykpayn7uZooTXqcDaPZxfPpmPdy e8chVJvdygi7kPEAAAAMY3BvQExSMS53dWUzAQIDBAUGBw== """ rpki_ssh_pub = """ AAAAB3NzaC1yc2EAAAADAQABAAABAQDB4PJ+UMVHirITARNsmRnZllz6wk2INO9inAaxYi SO7j2UlLEd7XEp/wWHB/Iy7jRKe3XIOVGaabPgKxsuBu5kYw+9cbXV7fj4LSlJVS+kXpqp FN4uXWkNErtGeOCbey74jxJEtBHipssbpVdY4W5WnCKpsTEtpTSEVhM80/50Cs2mxYQQqy iGTqlNd2GDdXNane8IyOXYlOyENreUPunbEhJIBF4RjR+d8QCZDMfhnoMjDAZ0W4xahAiD 6gUEz0BGs7b8UiNGzPoB5xH9Z5TvYkTDhomJzX9wAlADo+JAEfCS6jdaWXW1unF5FmgHY1 AbbYRPIku+F9Nqig0tP0el """ rpki_ssh_key_replacement = """ b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn NhAAAAAwEAAQAAAQEAtLPMwiGR3o6puPDbus9Yqoah9/7rv7i6ykykPmcEZ6ERnA0N6bl7 LkQxnCuX270ukTTZOhROvQnvQYIZohCMz27Q16z7r+I755QXL0x8x4Gqhg/hQUY7UtX6ts db8+pO7G1PL4r9zT6/KJAF/wv86DezJ3I6TMaA7MCikXfQWJisBvhgAXF1+7V9CWaroGgV /hHzQJu1yd4cfsYoHyeDaZ+lwFw4egNItIy63fIGDxrnXaonJ1ODGQh7zWlpl/cwQR/KyJ P8vvOZ9olQ6syZV+DAcAo4Fe59wW2Zj4bl8bdGcdiDn0grkafxwTcg9ynr9kwQ8b66oXY4 hwB4vlPFPwAAA8jkGyX45Bsl+AAAAAdzc2gtcnNhAAABAQC0s8zCIZHejqm48Nu6z1iqhq H3/uu/uLrKTKQ+ZwRnoRGcDQ3puXsuRDGcK5fbvS6RNNk6FE69Ce9BghmiEIzPbtDXrPuv 4jvnlBcvTHzHgaqGD+FBRjtS1fq2x1vz6k7sbU8viv3NPr8okAX/C/zoN7MncjpMxoDswK KRd9BYmKwG+GABcXX7tX0JZqugaBX+EfNAm7XJ3hx+xigfJ4Npn6XAXDh6A0i0jLrd8gYP GuddqicnU4MZCHvNaWmX9zBBH8rIk/y+85n2iVDqzJlX4MBwCjgV7n3BbZmPhuXxt0Zx2I OfSCuRp/HBNyD3Kev2TBDxvrqhdjiHAHi+U8U/AAAAAwEAAQAAAQA99gkX5/rknXaE+9Hc VIzKrC+NodOkgetKwszuuNRB1HD9WVyT8A3U5307V5dSuaPmFoEF8UCugWGQzNONRq+B0T W7Po1u2dxAo/7vMQL4RfX60icjAroExWqakfFtycIWP8UPQFGWtxVFC12C/tFRrwe3Vuu2 t7otdEBKMRM3zU0Hj88/5FIk/MDhththDCKTMe4+iwNKo30dyqSCckpTd2k5de9JYz8Aom 87jtQcyDdynaELSo9CsA8KRPlozZ4VSWTVLH+Cv2TZWPL7hy79YvvIfuF/Sd6PGkNwG1Vj TAbq2Wx4uq+HmpNiz7W0LnbZtQJ7dzLA3FZlvQMC8fVBAAAAgQDWvImVZCyVWpoG+LnKY3 joegjKRYKdgKRPCqGoIHiYsqCRxqSRW3jsuQCCvk4YO3/ZmqORiGktK+5r8R1QEtwg5qbi N7GZD34m7USNuqG2G/4puEly8syMmR6VRRvEURFQrpv2wniXNSefvsDc+WDqTfXGUxr+FT 478wkzjwc/fAAAAIEA9uP0Ym3OC3cZ5FOvmu51lxo5lqPlUeE78axg2I4u/9Il8nOvSVuq B9X5wAUyGAGcUjT3EZmRAtL2sQxc5T0Vw3bnxCjzukEbFM+DRtYy1hXSOoGTTwKoMWBpho R3X5uRLUQL/22C4rd7tSJpjqnZXIH0B5z2fFh4vzu8/SrgCrUAAACBALtep4BcGJfjfhfF ODzQe7Rk7tsaX8pfNv6bQu0sR5C9pDURFRf0fRC0oqgeTuzq/vHPyNLsUUgTCpKWiLFmvU G9pelLT3XPPgzA+g0gycM0unuX8kkP3T5VQAM/7u0+h1CaJ8A6cCkzvDJxYdfio3WR60OP ulHg7HCcyomFLaSjAAAADGNwb0BMUjEud3VlMwECAwQFBg== """ rpki_ssh_pub_replacement = """ AAAAB3NzaC1yc2EAAAADAQABAAABAQC0s8zCIZHejqm48Nu6z1iqhqH3/uu/uLrKTKQ+Zw RnoRGcDQ3puXsuRDGcK5fbvS6RNNk6FE69Ce9BghmiEIzPbtDXrPuv4jvnlBcvTHzHgaqG D+FBRjtS1fq2x1vz6k7sbU8viv3NPr8okAX/C/zoN7MncjpMxoDswKKRd9BYmKwG+GABcX X7tX0JZqugaBX+EfNAm7XJ3hx+xigfJ4Npn6XAXDh6A0i0jLrd8gYPGuddqicnU4MZCHvN aWmX9zBBH8rIk/y+85n2iVDqzJlX4MBwCjgV7n3BbZmPhuXxt0Zx2IOfSCuRp/HBNyD3Ke v2TBDxvrqhdjiHAHi+U8U/ """ class TestProtocolsRPKI(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsRPKI, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(bgp_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() + frrconfig = self.getFRRconfig('rpki') + self.assertNotIn(f'rpki', frrconfig) + # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(bgp_daemon)) def test_rpki(self): expire_interval = '3600' polling_period = '600' retry_interval = '300' cache = { '192.0.2.1' : { 'port' : '8080', 'preference' : '10' }, '2001:db8::1' : { 'port' : '1234', 'preference' : '30' }, 'rpki.vyos.net' : { 'port' : '5678', 'preference' : '40' }, } self.cli_set(base_path + ['expire-interval', expire_interval]) self.cli_set(base_path + ['polling-period', polling_period]) self.cli_set(base_path + ['retry-interval', retry_interval]) for peer, peer_config in cache.items(): self.cli_set(base_path + ['cache', peer, 'port', peer_config['port']]) self.cli_set(base_path + ['cache', peer, 'preference', peer_config['preference']]) # commit changes self.cli_commit() # Verify FRR configuration frrconfig = self.getFRRconfig('rpki') self.assertIn(f'rpki expire_interval {expire_interval}', frrconfig) self.assertIn(f'rpki polling_period {polling_period}', frrconfig) self.assertIn(f'rpki retry_interval {retry_interval}', frrconfig) for peer, peer_config in cache.items(): port = peer_config['port'] preference = peer_config['preference'] - self.assertIn(f'rpki cache {peer} {port} preference {preference}', frrconfig) + self.assertIn(f'rpki cache tcp {peer} {port} preference {preference}', frrconfig) def test_rpki_ssh(self): polling = '7200' cache = { '192.0.2.3' : { 'port' : '1234', 'username' : 'foo', 'preference' : '10' }, '192.0.2.4' : { 'port' : '5678', 'username' : 'bar', 'preference' : '20' }, } self.cli_set(['pki', 'openssh', rpki_key_name, 'private', 'key', rpki_ssh_key.replace('\n','')]) self.cli_set(['pki', 'openssh', rpki_key_name, 'public', 'key', rpki_ssh_pub.replace('\n','')]) self.cli_set(['pki', 'openssh', rpki_key_name, 'public', 'type', rpki_key_type]) for cache_name, cache_config in cache.items(): self.cli_set(base_path + ['cache', cache_name, 'port', cache_config['port']]) self.cli_set(base_path + ['cache', cache_name, 'preference', cache_config['preference']]) self.cli_set(base_path + ['cache', cache_name, 'ssh', 'username', cache_config['username']]) self.cli_set(base_path + ['cache', cache_name, 'ssh', 'key', rpki_key_name]) # commit changes self.cli_commit() # Verify FRR configuration frrconfig = self.getFRRconfig('rpki') for cache_name, cache_config in cache.items(): port = cache_config['port'] preference = cache_config['preference'] username = cache_config['username'] - self.assertIn(f'rpki cache {cache_name} {port} {username} /run/frr/id_rpki_{cache_name} /run/frr/id_rpki_{cache_name}.pub preference {preference}', frrconfig) + self.assertIn(f'rpki cache ssh {cache_name} {port} {username} /run/frr/id_rpki_{cache_name} /run/frr/id_rpki_{cache_name}.pub preference {preference}', frrconfig) # Verify content of SSH keys tmp = read_file(f'/run/frr/id_rpki_{cache_name}') self.assertIn(rpki_ssh_key.replace('\n',''), tmp) tmp = read_file(f'/run/frr/id_rpki_{cache_name}.pub') self.assertIn(rpki_ssh_pub.replace('\n',''), tmp) # Change OpenSSH key and verify it was properly written to filesystem self.cli_set(['pki', 'openssh', rpki_key_name, 'private', 'key', rpki_ssh_key_replacement.replace('\n','')]) self.cli_set(['pki', 'openssh', rpki_key_name, 'public', 'key', rpki_ssh_pub_replacement.replace('\n','')]) # commit changes self.cli_commit() for cache_name, cache_config in cache.items(): port = cache_config['port'] preference = cache_config['preference'] username = cache_config['username'] - self.assertIn(f'rpki cache {cache_name} {port} {username} /run/frr/id_rpki_{cache_name} /run/frr/id_rpki_{cache_name}.pub preference {preference}', frrconfig) + self.assertIn(f'rpki cache ssh {cache_name} {port} {username} /run/frr/id_rpki_{cache_name} /run/frr/id_rpki_{cache_name}.pub preference {preference}', frrconfig) # Verify content of SSH keys tmp = read_file(f'/run/frr/id_rpki_{cache_name}') self.assertIn(rpki_ssh_key_replacement.replace('\n',''), tmp) tmp = read_file(f'/run/frr/id_rpki_{cache_name}.pub') self.assertIn(rpki_ssh_pub_replacement.replace('\n',''), tmp) self.cli_delete(['pki', 'openssh']) def test_rpki_verify_preference(self): cache = { '192.0.2.1' : { 'port' : '8080', 'preference' : '1' }, '192.0.2.2' : { 'port' : '9090', 'preference' : '1' }, } for peer, peer_config in cache.items(): self.cli_set(base_path + ['cache', peer, 'port', peer_config['port']]) self.cli_set(base_path + ['cache', peer, 'preference', peer_config['preference']]) # check validate() - preferences must be unique with self.assertRaises(ConfigSessionError): self.cli_commit() if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_segment-routing.py b/smoketest/scripts/cli/test_protocols_segment-routing.py index daa7f088f..624985476 100755 --- a/smoketest/scripts/cli/test_protocols_segment-routing.py +++ b/smoketest/scripts/cli/test_protocols_segment-routing.py @@ -1,110 +1,110 @@ #!/usr/bin/env python3 # # Copyright (C) 2023-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frrender import zebra_daemon from vyos.utils.process import process_named_running from vyos.utils.system import sysctl_read base_path = ['protocols', 'segment-routing'] -PROCESS_NAME = 'zebra' class TestProtocolsSegmentRouting(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsSegmentRouting, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same - cls.daemon_pid = process_named_running(PROCESS_NAME) + cls.daemon_pid = process_named_running(zebra_daemon) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity - self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) + self.assertEqual(self.daemon_pid, process_named_running(zebra_daemon)) def test_srv6(self): interfaces = Section.interfaces('ethernet', vlan=False) locators = { 'foo' : { 'prefix' : '2001:a::/64' }, 'foo' : { 'prefix' : '2001:b::/64', 'usid' : {} }, } for locator, locator_config in locators.items(): self.cli_set(base_path + ['srv6', 'locator', locator, 'prefix', locator_config['prefix']]) if 'usid' in locator_config: self.cli_set(base_path + ['srv6', 'locator', locator, 'behavior-usid']) # verify() - SRv6 should be enabled on at least one interface! with self.assertRaises(ConfigSessionError): self.cli_commit() for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '0') # default - frrconfig = self.getFRRconfig(f'segment-routing', daemon='zebra') + frrconfig = self.getFRRconfig(f'segment-routing', daemon=zebra_daemon) self.assertIn(f'segment-routing', frrconfig) self.assertIn(f' srv6', frrconfig) self.assertIn(f' locators', frrconfig) for locator, locator_config in locators.items(): self.assertIn(f' locator {locator}', frrconfig) self.assertIn(f' prefix {locator_config["prefix"]} block-len 40 node-len 24 func-bits 16', frrconfig) def test_srv6_sysctl(self): interfaces = Section.interfaces('ethernet', vlan=False) # HMAC accept for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_set(base_path + ['interface', interface, 'srv6', 'hmac', 'ignore']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '-1') # ignore # HMAC drop for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_set(base_path + ['interface', interface, 'srv6', 'hmac', 'drop']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '1') # drop # Disable SRv6 on first interface first_if = interfaces[-1] self.cli_delete(base_path + ['interface', first_if]) self.cli_commit() self.assertEqual(sysctl_read(f'net.ipv6.conf.{first_if}.seg6_enabled'), '0') if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_static.py b/smoketest/scripts/cli/test_protocols_static.py index f676e2a52..a2cde0237 100755 --- a/smoketest/scripts/cli/test_protocols_static.py +++ b/smoketest/scripts/cli/test_protocols_static.py @@ -1,482 +1,571 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.template import is_ipv6 from vyos.utils.network import get_interface_config from vyos.utils.network import get_vrf_tableid base_path = ['protocols', 'static'] vrf_path = ['protocols', 'vrf'] routes = { '10.0.0.0/8' : { 'next_hop' : { '192.0.2.100' : { 'distance' : '100' }, '192.0.2.110' : { 'distance' : '110', 'interface' : 'eth0' }, '192.0.2.120' : { 'distance' : '120', 'disable' : '' }, '192.0.2.130' : { 'bfd' : '' }, - '192.0.2.140' : { 'bfd_source' : '192.0.2.10' }, + '192.0.2.131' : { 'bfd' : '', + 'bfd_profile' : 'vyos1' }, + '192.0.2.140' : { 'bfd' : '', + 'bfd_source' : '192.0.2.10', + 'bfd_profile' : 'vyos2' }, }, 'interface' : { 'eth0' : { 'distance' : '130' }, 'eth1' : { 'distance' : '140' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '172.16.0.0/12' : { 'interface' : { 'eth0' : { 'distance' : '50', 'vrf' : 'black' }, 'eth1' : { 'distance' : '60', 'vrf' : 'black' }, }, 'blackhole' : { 'distance' : '90' }, }, '192.0.2.0/24' : { 'interface' : { 'eth0' : { 'distance' : '50', 'vrf' : 'black' }, 'eth1' : { 'disable' : '' }, }, 'blackhole' : { 'distance' : '90' }, }, '100.64.0.0/16' : { 'blackhole' : {}, }, '100.65.0.0/16' : { 'reject' : { 'distance' : '10', 'tag' : '200' }, }, '100.66.0.0/16' : { 'blackhole' : {}, 'reject' : { 'distance' : '10', 'tag' : '200' }, }, '2001:db8:100::/40' : { 'next_hop' : { '2001:db8::1' : { 'distance' : '10' }, '2001:db8::2' : { 'distance' : '20', 'interface' : 'eth0' }, '2001:db8::3' : { 'distance' : '30', 'disable' : '' }, '2001:db8::4' : { 'bfd' : '' }, '2001:db8::5' : { 'bfd_source' : '2001:db8::ffff' }, }, 'interface' : { 'eth0' : { 'distance' : '40', 'vrf' : 'black' }, 'eth1' : { 'distance' : '50', 'disable' : '' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:200::/40' : { 'interface' : { 'eth0' : { 'distance' : '40' }, 'eth1' : { 'distance' : '50', 'disable' : '' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:300::/40' : { 'reject' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:400::/40' : { 'next_hop' : { '2001:db8::400' : { 'segments' : '2001:db8:aaaa::400/2002::400/2003::400/2004::400' }, }, }, '2001:db8:500::/40' : { 'next_hop' : { '2001:db8::500' : { 'segments' : '2001:db8:aaaa::500/2002::500/2003::500/2004::500' }, }, }, '2001:db8:600::/40' : { 'interface' : { 'eth0' : { 'segments' : '2001:db8:aaaa::600/2002::600' }, }, }, '2001:db8:700::/40' : { 'interface' : { 'eth1' : { 'segments' : '2001:db8:aaaa::700' }, }, }, '2001:db8::/32' : { 'blackhole' : { 'distance' : '200', 'tag' : '600' } }, } +multicast_routes = { + '224.0.0.0/24' : { + 'next_hop' : { + '224.203.0.1' : { }, + '224.203.0.2' : { 'distance' : '110'}, + }, + }, + '224.1.0.0/24' : { + 'next_hop' : { + '224.205.0.1' : { 'disable' : {} }, + '224.205.0.2' : { 'distance' : '110'}, + }, + }, + '224.2.0.0/24' : { + 'next_hop' : { + '1.2.3.0' : { }, + '1.2.3.1' : { 'distance' : '110'}, + }, + }, + '224.10.0.0/24' : { + 'interface' : { + 'eth1' : { 'disable' : {} }, + 'eth2' : { 'distance' : '110'}, + }, + }, + '224.11.0.0/24' : { + 'interface' : { + 'eth0' : { }, + 'eth1' : { 'distance' : '10'}, + }, + }, + '224.12.0.0/24' : { + 'interface' : { + 'eth0' : { }, + 'eth1' : { 'distance' : '200'}, + }, + }, +} + tables = ['80', '81', '82'] class TestProtocolsStatic(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsStatic, cls).setUpClass() cls.cli_delete(cls, ['vrf']) cls.cli_set(cls, ['vrf', 'name', 'black', 'table', '43210']) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['vrf']) super(TestProtocolsStatic, cls).tearDownClass() def tearDown(self): self.cli_delete(base_path) self.cli_commit() v4route = self.getFRRconfig('ip route', end='') self.assertFalse(v4route) v6route = self.getFRRconfig('ipv6 route', end='') self.assertFalse(v6route) def test_01_static(self): - bfd_profile = 'vyos-test' for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' base = base_path + [route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(base + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'bfd' in next_hop_config: - self.cli_set(base + ['next-hop', next_hop, 'bfd', 'profile', bfd_profile ]) - if 'bfd_source' in next_hop_config: - self.cli_set(base + ['next-hop', next_hop, 'bfd', 'multi-hop', 'source', next_hop_config['bfd_source'], 'profile', bfd_profile]) + self.cli_set(base + ['next-hop', next_hop, 'bfd']) + if 'bfd_profile' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'bfd', 'profile', next_hop_config['bfd_profile']]) + if 'bfd_source' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'bfd', 'multi-hop', 'source-address', next_hop_config['bfd_source']]) if 'segments' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'segments', next_hop_config['segments']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(base + ['interface', interface]) if 'disable' in interface_config: self.cli_set(base + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(base + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(base + ['interface', interface, 'vrf', interface_config['vrf']]) if 'segments' in interface_config: self.cli_set(base + ['interface', interface, 'segments', interface_config['segments']]) if 'blackhole' in route_config: self.cli_set(base + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'tag', route_config['blackhole']['tag']]) if 'reject' in route_config: self.cli_set(base + ['reject']) if 'distance' in route_config['reject']: self.cli_set(base + ['reject', 'distance', route_config['reject']['distance']]) if 'tag' in route_config['reject']: self.cli_set(base + ['reject', 'tag', route_config['reject']['tag']]) if {'blackhole', 'reject'} <= set(route_config): # Can not use blackhole and reject at the same time with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base + ['blackhole']) self.cli_delete(base + ['reject']) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig('ip route', end='') # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] if 'bfd' in next_hop_config: - tmp += ' bfd profile ' + bfd_profile - if 'bfd_source' in next_hop_config: - tmp += ' bfd multi-hop source ' + next_hop_config['bfd_source'] + ' profile ' + bfd_profile + tmp += ' bfd' + if 'bfd_source' in next_hop_config: + tmp += ' multi-hop source ' + next_hop_config['bfd_source'] + if 'bfd_profile' in next_hop_config: + tmp += ' profile ' + next_hop_config['bfd_profile'] if 'segments' in next_hop_config: tmp += ' segments ' + next_hop_config['segments'] if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] if 'segments' in interface_config: tmp += ' segments ' + interface_config['segments'] if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if {'blackhole', 'reject'} <= set(route_config): # Can not use blackhole and reject at the same time # Config error validated above - skip this route continue if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] self.assertIn(tmp, frrconfig) if 'reject' in route_config: tmp = f'{ip_ipv6} route {route} reject' if 'tag' in route_config['reject']: tmp += ' tag ' + route_config['reject']['tag'] if 'distance' in route_config['reject']: tmp += ' ' + route_config['reject']['distance'] self.assertIn(tmp, frrconfig) def test_02_static_table(self): for table in tables: for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' base = base_path + ['table', table, route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(base + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(base + ['interface', interface]) if 'disable' in interface_config: self.cli_set(base + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(base + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(base + ['interface', interface, 'vrf', interface_config['vrf']]) if 'blackhole' in route_config: self.cli_set(base + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'tag', route_config['blackhole']['tag']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig('ip route', end='') for table in tables: # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] tmp += ' table ' + table if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] tmp += ' table ' + table if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] tmp += ' table ' + table self.assertIn(tmp, frrconfig) def test_03_static_vrf(self): # Create VRF instances and apply the static routes from above to FRR. # Re-read the configured routes and match them if they are programmed # properly. This also includes VRF leaking vrfs = { 'red' : { 'table' : '1000' }, 'green' : { 'table' : '2000' }, 'blue' : { 'table' : '3000' }, } for vrf, vrf_config in vrfs.items(): vrf_base_path = ['vrf', 'name', vrf] self.cli_set(vrf_base_path + ['table', vrf_config['table']]) for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' route_base_path = vrf_base_path + ['protocols', 'static', route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(route_base_path + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'segments' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'segments', next_hop_config['segments']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(route_base_path + ['interface', interface]) if 'disable' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'vrf', interface_config['vrf']]) if 'segments' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'segments', interface_config['segments']]) if 'blackhole' in route_config: self.cli_set(route_base_path + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(route_base_path + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(route_base_path + ['blackhole', 'tag', route_config['blackhole']['tag']]) # commit changes self.cli_commit() for vrf, vrf_config in vrfs.items(): tmp = get_interface_config(vrf) # Compare VRF table ID self.assertEqual(get_vrf_tableid(vrf), int(vrf_config['table'])) self.assertEqual(tmp['linkinfo']['info_kind'], 'vrf') # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', endsection='^exit-vrf') self.assertIn(f'vrf {vrf}', frrconfig) # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] if 'segments' in next_hop_config: tmp += ' segments ' + next_hop_config['segments'] if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] if 'segments' in interface_config: tmp += ' segments ' + interface_config['segments'] if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] self.assertIn(tmp, frrconfig) + def test_04_static_multicast(self): + for route, route_config in multicast_routes.items(): + if 'next_hop' in route_config: + base = base_path + ['mroute', route] + for next_hop, next_hop_config in route_config['next_hop'].items(): + self.cli_set(base + ['next-hop', next_hop]) + if 'distance' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) + if 'disable' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'disable']) + + if 'interface' in route_config: + base = base_path + ['mroute', route] + for next_hop, next_hop_config in route_config['interface'].items(): + self.cli_set(base + ['interface', next_hop]) + if 'distance' in next_hop_config: + self.cli_set(base + ['interface', next_hop, 'distance', next_hop_config['distance']]) + + self.cli_commit() + + # Verify FRR configuration + frrconfig = self.getFRRconfig('ip mroute', end='') + for route, route_config in multicast_routes.items(): + if 'next_hop' in route_config: + for next_hop, next_hop_config in route_config['next_hop'].items(): + tmp = f'ip mroute {route} {next_hop}' + if 'distance' in next_hop_config: + tmp += ' ' + next_hop_config['distance'] + if 'disable' in next_hop_config: + self.assertNotIn(tmp, frrconfig) + else: + self.assertIn(tmp, frrconfig) + + if 'next_hop_interface' in route_config: + for next_hop, next_hop_config in route_config['next_hop_interface'].items(): + tmp = f'ip mroute {route} {next_hop}' + if 'distance' in next_hop_config: + tmp += ' ' + next_hop_config['distance'] + if 'disable' in next_hop_config: + self.assertNotIn(tmp, frrconfig) + else: + self.assertIn(tmp, frrconfig) + if __name__ == '__main__': - unittest.main(verbosity=2, failfast=True) + unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_static_multicast.py b/smoketest/scripts/cli/test_protocols_static_multicast.py deleted file mode 100755 index 9fdda236f..000000000 --- a/smoketest/scripts/cli/test_protocols_static_multicast.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import unittest - -from base_vyostest_shim import VyOSUnitTestSHIM - - -base_path = ['protocols', 'static', 'multicast'] - - -class TestProtocolsStaticMulticast(VyOSUnitTestSHIM.TestCase): - - def tearDown(self): - self.cli_delete(base_path) - self.cli_commit() - - mroute = self.getFRRconfig('ip mroute', end='') - self.assertFalse(mroute) - - def test_01_static_multicast(self): - - self.cli_set(base_path + ['route', '224.202.0.0/24', 'next-hop', '224.203.0.1']) - self.cli_set(base_path + ['interface-route', '224.203.0.0/24', 'next-hop-interface', 'eth0']) - - self.cli_commit() - - # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig('ip mroute', end='') - - self.assertIn('ip mroute 224.202.0.0/24 224.203.0.1', frrconfig) - self.assertIn('ip mroute 224.203.0.0/24 eth0', frrconfig) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_system_ip.py b/smoketest/scripts/cli/test_system_ip.py index 5b0090237..7d730f7b2 100755 --- a/smoketest/scripts/cli/test_system_ip.py +++ b/smoketest/scripts/cli/test_system_ip.py @@ -1,126 +1,134 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError -from vyos.utils.file import read_file +from vyos.utils.system import sysctl_read +from vyos.xml_ref import default_value +from vyos.frrender import mgmt_daemon +from vyos.frrender import zebra_daemon base_path = ['system', 'ip'] class TestSystemIP(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + super(TestSystemIP, cls).setUpClass() + # ensure we can also run this test on a live system - so lets clean + # out the current configuration :) + cls.cli_delete(cls, base_path) + def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_system_ip_forwarding(self): # Test if IPv4 forwarding can be disabled globally, default is '1' # which means forwarding enabled - all_forwarding = '/proc/sys/net/ipv4/conf/all/forwarding' - self.assertEqual(read_file(all_forwarding), '1') + self.assertEqual(sysctl_read('net.ipv4.conf.all.forwarding'), '1') self.cli_set(base_path + ['disable-forwarding']) self.cli_commit() + self.assertEqual(sysctl_read('net.ipv4.conf.all.forwarding'), '0') + frrconfig = self.getFRRconfig('', end='', daemon=zebra_daemon) + self.assertIn('no ip forwarding', frrconfig) - self.assertEqual(read_file(all_forwarding), '0') + self.cli_delete(base_path + ['disable-forwarding']) + self.cli_commit() + self.assertEqual(sysctl_read('net.ipv4.conf.all.forwarding'), '1') + frrconfig = self.getFRRconfig('', end='', daemon=zebra_daemon) + self.assertNotIn('no ip forwarding', frrconfig) def test_system_ip_multipath(self): # Test IPv4 multipathing options, options default to off -> '0' - use_neigh = '/proc/sys/net/ipv4/fib_multipath_use_neigh' - hash_policy = '/proc/sys/net/ipv4/fib_multipath_hash_policy' - - self.assertEqual(read_file(use_neigh), '0') - self.assertEqual(read_file(hash_policy), '0') + self.assertEqual(sysctl_read('net.ipv4.fib_multipath_use_neigh'), '0') + self.assertEqual(sysctl_read('net.ipv4.fib_multipath_hash_policy'), '0') self.cli_set(base_path + ['multipath', 'ignore-unreachable-nexthops']) self.cli_set(base_path + ['multipath', 'layer4-hashing']) self.cli_commit() - self.assertEqual(read_file(use_neigh), '1') - self.assertEqual(read_file(hash_policy), '1') + self.assertEqual(sysctl_read('net.ipv4.fib_multipath_use_neigh'), '1') + self.assertEqual(sysctl_read('net.ipv4.fib_multipath_hash_policy'), '1') def test_system_ip_arp_table_size(self): - # Maximum number of entries to keep in the ARP cache, the - # default is 8k + cli_default = int(default_value(base_path + ['arp', 'table-size'])) + def _verify_gc_thres(table_size): + self.assertEqual(sysctl_read('net.ipv4.neigh.default.gc_thresh3'), str(table_size)) + self.assertEqual(sysctl_read('net.ipv4.neigh.default.gc_thresh2'), str(table_size // 2)) + self.assertEqual(sysctl_read('net.ipv4.neigh.default.gc_thresh1'), str(table_size // 8)) - gc_thresh3 = '/proc/sys/net/ipv4/neigh/default/gc_thresh3' - gc_thresh2 = '/proc/sys/net/ipv4/neigh/default/gc_thresh2' - gc_thresh1 = '/proc/sys/net/ipv4/neigh/default/gc_thresh1' - self.assertEqual(read_file(gc_thresh3), '8192') - self.assertEqual(read_file(gc_thresh2), '4096') - self.assertEqual(read_file(gc_thresh1), '1024') + _verify_gc_thres(cli_default) for size in [1024, 2048, 4096, 8192, 16384, 32768]: self.cli_set(base_path + ['arp', 'table-size', str(size)]) self.cli_commit() - - self.assertEqual(read_file(gc_thresh3), str(size)) - self.assertEqual(read_file(gc_thresh2), str(size // 2)) - self.assertEqual(read_file(gc_thresh1), str(size // 8)) + _verify_gc_thres(size) def test_system_ip_protocol_route_map(self): protocols = ['any', 'babel', 'bgp', 'connected', 'eigrp', 'isis', 'kernel', 'ospf', 'rip', 'static', 'table'] for protocol in protocols: self.cli_set(['policy', 'route-map', f'route-map-{protocol}', 'rule', '10', 'action', 'permit']) self.cli_set(base_path + ['protocol', protocol, 'route-map', f'route-map-{protocol}']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ip protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ip protocol', end='', daemon=mgmt_daemon) for protocol in protocols: self.assertIn(f'ip protocol {protocol} route-map route-map-{protocol}', frrconfig) # Delete route-maps self.cli_delete(['policy', 'route-map']) self.cli_delete(base_path + ['protocol']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ip protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ip protocol', end='', daemon=mgmt_daemon) self.assertNotIn(f'ip protocol', frrconfig) def test_system_ip_protocol_non_existing_route_map(self): non_existing = 'non-existing' self.cli_set(base_path + ['protocol', 'static', 'route-map', non_existing]) # VRF does yet not exist - an error must be thrown with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', non_existing, 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_system_ip_nht(self): self.cli_set(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config applied to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertIn(f'no ip nht resolve-via-default', frrconfig) self.cli_delete(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config removed to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertNotIn(f'no ip nht resolve-via-default', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_system_ipv6.py b/smoketest/scripts/cli/test_system_ipv6.py index 0c77c1dd4..ebf620204 100755 --- a/smoketest/scripts/cli/test_system_ipv6.py +++ b/smoketest/scripts/cli/test_system_ipv6.py @@ -1,145 +1,155 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError -from vyos.utils.file import read_file +from vyos.utils.system import sysctl_read +from vyos.xml_ref import default_value +from vyos.frrender import mgmt_daemon +from vyos.frrender import zebra_daemon base_path = ['system', 'ipv6'] -file_forwarding = '/proc/sys/net/ipv6/conf/all/forwarding' -file_disable = '/proc/sys/net/ipv6/conf/all/disable_ipv6' -file_dad = '/proc/sys/net/ipv6/conf/all/accept_dad' -file_multipath = '/proc/sys/net/ipv6/fib_multipath_hash_policy' - class TestSystemIPv6(VyOSUnitTestSHIM.TestCase): + @classmethod + def setUpClass(cls): + super(TestSystemIPv6, cls).setUpClass() + # ensure we can also run this test on a live system - so lets clean + # out the current configuration :) + cls.cli_delete(cls, base_path) + def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_system_ipv6_forwarding(self): # Test if IPv6 forwarding can be disabled globally, default is '1' # which means forwearding enabled - self.assertEqual(read_file(file_forwarding), '1') + self.assertEqual(sysctl_read('net.ipv6.conf.all.forwarding'), '1') self.cli_set(base_path + ['disable-forwarding']) self.cli_commit() + self.assertEqual(sysctl_read('net.ipv6.conf.all.forwarding'), '0') + frrconfig = self.getFRRconfig('', end='', daemon=zebra_daemon) + self.assertIn('no ipv6 forwarding', frrconfig) - self.assertEqual(read_file(file_forwarding), '0') + self.cli_delete(base_path + ['disable-forwarding']) + self.cli_commit() + self.assertEqual(sysctl_read('net.ipv6.conf.all.forwarding'), '1') + frrconfig = self.getFRRconfig('', end='', daemon=zebra_daemon) + self.assertNotIn('no ipv6 forwarding', frrconfig) def test_system_ipv6_strict_dad(self): # This defaults to 1 - self.assertEqual(read_file(file_dad), '1') + self.assertEqual(sysctl_read('net.ipv6.conf.all.accept_dad'), '1') # Do not assign any IPv6 address on interfaces, this requires a reboot # which can not be tested, but we can read the config file :) self.cli_set(base_path + ['strict-dad']) self.cli_commit() # Verify configuration file - self.assertEqual(read_file(file_dad), '2') + self.assertEqual(sysctl_read('net.ipv6.conf.all.accept_dad'), '2') def test_system_ipv6_multipath(self): # This defaults to 0 - self.assertEqual(read_file(file_multipath), '0') + self.assertEqual(sysctl_read('net.ipv6.fib_multipath_hash_policy'), '0') # Do not assign any IPv6 address on interfaces, this requires a reboot # which can not be tested, but we can read the config file :) self.cli_set(base_path + ['multipath', 'layer4-hashing']) self.cli_commit() # Verify configuration file - self.assertEqual(read_file(file_multipath), '1') + self.assertEqual(sysctl_read('net.ipv6.fib_multipath_hash_policy'), '1') def test_system_ipv6_neighbor_table_size(self): # Maximum number of entries to keep in the ARP cache, the # default is 8192 + cli_default = int(default_value(base_path + ['neighbor', 'table-size'])) - gc_thresh3 = '/proc/sys/net/ipv6/neigh/default/gc_thresh3' - gc_thresh2 = '/proc/sys/net/ipv6/neigh/default/gc_thresh2' - gc_thresh1 = '/proc/sys/net/ipv6/neigh/default/gc_thresh1' - self.assertEqual(read_file(gc_thresh3), '8192') - self.assertEqual(read_file(gc_thresh2), '4096') - self.assertEqual(read_file(gc_thresh1), '1024') + def _verify_gc_thres(table_size): + self.assertEqual(sysctl_read('net.ipv6.neigh.default.gc_thresh3'), str(table_size)) + self.assertEqual(sysctl_read('net.ipv6.neigh.default.gc_thresh2'), str(table_size // 2)) + self.assertEqual(sysctl_read('net.ipv6.neigh.default.gc_thresh1'), str(table_size // 8)) + + _verify_gc_thres(cli_default) for size in [1024, 2048, 4096, 8192, 16384, 32768]: self.cli_set(base_path + ['neighbor', 'table-size', str(size)]) self.cli_commit() - - self.assertEqual(read_file(gc_thresh3), str(size)) - self.assertEqual(read_file(gc_thresh2), str(size // 2)) - self.assertEqual(read_file(gc_thresh1), str(size // 8)) + _verify_gc_thres(size) def test_system_ipv6_protocol_route_map(self): protocols = ['any', 'babel', 'bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static', 'table'] for protocol in protocols: route_map = 'route-map-' + protocol.replace('ospfv3', 'ospf6') self.cli_set(['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) self.cli_set(base_path + ['protocol', protocol, 'route-map', route_map]) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon=mgmt_daemon) for protocol in protocols: # VyOS and FRR use a different name for OSPFv3 (IPv6) if protocol == 'ospfv3': protocol = 'ospf6' self.assertIn(f'ipv6 protocol {protocol} route-map route-map-{protocol}', frrconfig) # Delete route-maps self.cli_delete(['policy', 'route-map']) self.cli_delete(base_path + ['protocol']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon=mgmt_daemon) self.assertNotIn(f'ipv6 protocol', frrconfig) def test_system_ipv6_protocol_non_existing_route_map(self): non_existing = 'non-existing6' self.cli_set(base_path + ['protocol', 'static', 'route-map', non_existing]) # VRF does yet not exist - an error must be thrown with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', non_existing, 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_system_ipv6_nht(self): self.cli_set(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config applied to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertIn(f'no ipv6 nht resolve-via-default', frrconfig) self.cli_delete(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config removed to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertNotIn(f'no ipv6 nht resolve-via-default', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_vrf.py b/smoketest/scripts/cli/test_vrf.py index 2bb6c91c1..f4ed1a61f 100755 --- a/smoketest/scripts/cli/test_vrf.py +++ b/smoketest/scripts/cli/test_vrf.py @@ -1,599 +1,600 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re import os import unittest from base_vyostest_shim import VyOSUnitTestSHIM from json import loads from jmespath import search from vyos.configsession import ConfigSessionError +from vyos.frrender import mgmt_daemon from vyos.ifconfig import Interface from vyos.ifconfig import Section from vyos.utils.file import read_file from vyos.utils.network import get_interface_config from vyos.utils.network import get_vrf_tableid from vyos.utils.network import is_intf_addr_assigned from vyos.utils.network import interface_exists from vyos.utils.process import cmd from vyos.utils.system import sysctl_read base_path = ['vrf'] vrfs = ['red', 'green', 'blue', 'foo-bar', 'baz_foo'] v4_protocols = ['any', 'babel', 'bgp', 'connected', 'eigrp', 'isis', 'kernel', 'ospf', 'rip', 'static', 'table'] v6_protocols = ['any', 'babel', 'bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static', 'table'] class VRFTest(VyOSUnitTestSHIM.TestCase): _interfaces = [] @classmethod def setUpClass(cls): # we need to filter out VLAN interfaces identified by a dot (.) # in their name - just in case! if 'TEST_ETH' in os.environ: tmp = os.environ['TEST_ETH'].split() cls._interfaces = tmp else: for tmp in Section.interfaces('ethernet', vlan=False): cls._interfaces.append(tmp) # call base-classes classmethod super(VRFTest, cls).setUpClass() def setUp(self): # VRF strict_most ist always enabled tmp = read_file('/proc/sys/net/vrf/strict_mode') self.assertEqual(tmp, '1') def tearDown(self): # delete all VRFs self.cli_delete(base_path) self.cli_commit() for vrf in vrfs: self.assertFalse(interface_exists(vrf)) def test_vrf_vni_and_table_id(self): base_table = '1000' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] description = f'VyOS-VRF-{vrf}' self.cli_set(base + ['description', description]) # check validate() - a table ID is mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base + ['table', table]) self.cli_set(base + ['vni', table]) if vrf == 'green': self.cli_set(base + ['disable']) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table iproute2_config = read_file('/etc/iproute2/rt_tables.d/vyos-vrf.conf') for vrf in vrfs: description = f'VyOS-VRF-{vrf}' self.assertTrue(interface_exists(vrf)) vrf_if = Interface(vrf) # validate proper interface description self.assertEqual(vrf_if.get_alias(), description) # validate admin up/down state of VRF state = 'up' if vrf == 'green': state = 'down' self.assertEqual(vrf_if.get_admin_state(), state) # Test the iproute2 lookup file, syntax is as follows: # # # id vrf name comment # 1000 red # VyOS-VRF-red # 1001 green # VyOS-VRF-green # ... regex = f'{table}\s+{vrf}\s+#\s+{description}' self.assertTrue(re.findall(regex, iproute2_config)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) self.assertEqual(int(table), get_vrf_tableid(vrf)) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_loopbacks_ips(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration loopbacks = ['127.0.0.1', '::1'] for vrf in vrfs: # Ensure VRF was created self.assertTrue(interface_exists(vrf)) # Verify IP forwarding is 1 (enabled) self.assertEqual(sysctl_read(f'net.ipv4.conf.{vrf}.forwarding'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{vrf}.forwarding'), '1') # Test for proper loopback IP assignment for addr in loopbacks: self.assertTrue(is_intf_addr_assigned(vrf, addr)) def test_vrf_bind_all(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) table = str(int(table) + 1) self.cli_set(base_path + ['bind-to-all']) # commit changes self.cli_commit() # Verify VRF configuration self.assertEqual(sysctl_read('net.ipv4.tcp_l3mdev_accept'), '1') self.assertEqual(sysctl_read('net.ipv4.udp_l3mdev_accept'), '1') # If there is any VRF defined, strict_mode should be on self.assertEqual(sysctl_read('net.vrf.strict_mode'), '1') def test_vrf_table_id_is_unalterable(self): # Linux Kernel prohibits the change of a VRF table on the fly. # VRF must be deleted and recreated! table = '1000' vrf = vrfs[0] base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) # commit changes self.cli_commit() # Check if VRF has been created self.assertTrue(interface_exists(vrf)) table = str(int(table) + 1) self.cli_set(base + ['table', table]) # check validate() - table ID can not be altered! with self.assertRaises(ConfigSessionError): self.cli_commit() def test_vrf_assign_interface(self): vrf = vrfs[0] table = '5000' self.cli_set(['vrf', 'name', vrf, 'table', table]) for interface in self._interfaces: section = Section.section(interface) self.cli_set(['interfaces', section, interface, 'vrf', vrf]) # commit changes self.cli_commit() # Verify VRF assignmant for interface in self._interfaces: tmp = get_interface_config(interface) self.assertEqual(vrf, tmp['master']) # cleanup section = Section.section(interface) self.cli_delete(['interfaces', section, interface, 'vrf']) def test_vrf_static_route(self): base_table = '100' table = base_table for vrf in vrfs: next_hop = f'192.0.{table}.1' prefix = f'10.0.{table}.0/24' base = base_path + ['name', vrf] self.cli_set(base + ['vni', table]) # check validate() - a table ID is mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base + ['table', table]) self.cli_set(base + ['protocols', 'static', 'route', prefix, 'next-hop', next_hop]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: next_hop = f'192.0.{table}.1' prefix = f'10.0.{table}.0/24' self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) self.assertIn(f' ip route {prefix} {next_hop}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_link_local_ip_addresses(self): # Testcase for issue T4331 table = '100' vrf = 'orange' interface = 'dum9998' addresses = ['192.0.2.1/26', '2001:db8:9998::1/64', 'fe80::1/64'] for address in addresses: self.cli_set(['interfaces', 'dummy', interface, 'address', address]) # Create dummy interfaces self.cli_commit() # ... and verify IP addresses got assigned for address in addresses: self.assertTrue(is_intf_addr_assigned(interface, address)) # Move interface to VRF self.cli_set(base_path + ['name', vrf, 'table', table]) self.cli_set(['interfaces', 'dummy', interface, 'vrf', vrf]) # Apply VRF config self.cli_commit() # Ensure VRF got created self.assertTrue(interface_exists(vrf)) # ... and IP addresses are still assigned for address in addresses: self.assertTrue(is_intf_addr_assigned(interface, address)) # Verify VRF table ID self.assertEqual(int(table), get_vrf_tableid(vrf)) # Verify interface is assigned to VRF tmp = get_interface_config(interface) self.assertEqual(vrf, tmp['master']) # Delete Interface self.cli_delete(['interfaces', 'dummy', interface]) self.cli_commit() def test_vrf_disable_forwarding(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) self.cli_set(base + ['ip', 'disable-forwarding']) self.cli_set(base + ['ipv6', 'disable-forwarding']) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration loopbacks = ['127.0.0.1', '::1'] for vrf in vrfs: # Ensure VRF was created self.assertTrue(interface_exists(vrf)) # Verify IP forwarding is 0 (disabled) self.assertEqual(sysctl_read(f'net.ipv4.conf.{vrf}.forwarding'), '0') self.assertEqual(sysctl_read(f'net.ipv6.conf.{vrf}.forwarding'), '0') def test_vrf_ip_protocol_route_map(self): table = '6000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v4_protocols: self.cli_set(['policy', 'route-map', f'route-map-{vrf}-{protocol}', 'rule', '10', 'action', 'permit']) self.cli_set(base + ['ip', 'protocol', protocol, 'route-map', f'route-map-{vrf}-{protocol}']) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) for protocol in v4_protocols: self.assertIn(f' ip protocol {protocol} route-map route-map-{vrf}-{protocol}', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(['policy', 'route-map', f'route-map-{vrf}-{protocol}']) self.cli_delete(base + ['ip', 'protocol']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') - self.assertNotIn(f'vrf {vrf}', frrconfig) + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) + self.assertNotIn(f' ip protocol', frrconfig) def test_vrf_ip_ipv6_protocol_non_existing_route_map(self): table = '6100' non_existing = 'non-existing' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v4_protocols: self.cli_set(base + ['ip', 'protocol', protocol, 'route-map', f'v4-{non_existing}']) for protocol in v6_protocols: self.cli_set(base + ['ipv6', 'protocol', protocol, 'route-map', f'v6-{non_existing}']) table = str(int(table) + 1) # Both v4 and v6 route-maps do not exist yet with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', f'v4-{non_existing}', 'rule', '10', 'action', 'deny']) # v6 route-map does not exist yet with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', f'v6-{non_existing}', 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_vrf_ipv6_protocol_route_map(self): table = '6200' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v6_protocols: route_map = f'route-map-{vrf}-{protocol.replace("ospfv3", "ospf6")}' self.cli_set(['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) self.cli_set(base + ['ipv6', 'protocol', protocol, 'route-map', route_map]) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) for protocol in v6_protocols: # VyOS and FRR use a different name for OSPFv3 (IPv6) if protocol == 'ospfv3': protocol = 'ospf6' route_map = f'route-map-{vrf}-{protocol}' self.assertIn(f' ipv6 protocol {protocol} route-map {route_map}', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(['policy', 'route-map', f'route-map-{vrf}-{protocol}']) self.cli_delete(base + ['ipv6', 'protocol']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') - self.assertNotIn(f'vrf {vrf}', frrconfig) + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) + self.assertNotIn(f' ipv6 protocol', frrconfig) def test_vrf_vni_duplicates(self): base_table = '6300' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) self.cli_set(base + ['vni', '100']) table = str(int(table) + 1) # L3VNIs can only be used once with self.assertRaises(ConfigSessionError): self.cli_commit() table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_vni_add_change_remove(self): base_table = '6300' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) # Now change all L3VNIs (increment 2) # We must also change the base_table number as we probably could get # duplicate VNI's during the test as VNIs are applied 1:1 to FRR base_table = '5000' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 2) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 2) # add a new VRF with VNI - this must not delete any existing VRF/VNI purple = 'purple' table = str(int(table) + 10) self.cli_set(base_path + ['name', purple, 'table', table]) self.cli_set(base_path + ['name', purple, 'vni', table]) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 2) # Verify purple VRF/VNI self.assertTrue(interface_exists(purple)) table = str(int(table) + 10) - frrconfig = self.getFRRconfig(f'vrf {purple}') + frrconfig = self.getFRRconfig(f'vrf {purple}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Now delete all the VNIs for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(base + ['vni']) # commit changes self.cli_commit() # Verify no VNI is defined for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertNotIn('vni', frrconfig) # Verify purple VNI remains self.assertTrue(interface_exists(purple)) - frrconfig = self.getFRRconfig(f'vrf {purple}') + frrconfig = self.getFRRconfig(f'vrf {purple}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) def test_vrf_ip_ipv6_nht(self): table = '6910' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) self.cli_set(base + ['ip', 'nht', 'no-resolve-via-default']) self.cli_set(base + ['ipv6', 'nht', 'no-resolve-via-default']) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) self.assertIn(f' no ip nht resolve-via-default', frrconfig) self.assertIn(f' no ipv6 nht resolve-via-default', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(base + ['ip']) self.cli_delete(base + ['ipv6']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertNotIn(f' no ip nht resolve-via-default', frrconfig) self.assertNotIn(f' no ipv6 nht resolve-via-default', frrconfig) def test_vrf_conntrack(self): table = '8710' nftables_rules = { 'vrf_zones_ct_in': ['ct original zone set iifname map @ct_iface_map'], 'vrf_zones_ct_out': ['ct original zone set oifname map @ct_iface_map'] } self.cli_set(base_path + ['name', 'randomVRF', 'table', '1000']) self.cli_commit() # Conntrack rules should not be present for chain, rule in nftables_rules.items(): self.verify_nftables_chain(rule, 'inet vrf_zones', chain, inverse=True) # conntrack is only enabled once NAT, NAT66 or firewalling is enabled self.cli_set(['nat']) for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) table = str(int(table) + 1) # We need the commit inside the loop to trigger the bug in T6603 self.cli_commit() # Conntrack rules should now be present for chain, rule in nftables_rules.items(): self.verify_nftables_chain(rule, 'inet vrf_zones', chain, inverse=False) # T6603: there should be only ONE entry for the iifname/oifname in the chains tmp = loads(cmd('sudo nft -j list table inet vrf_zones')) num_rules = len(search("nftables[].rule[].chain", tmp)) # ['vrf_zones_ct_in', 'vrf_zones_ct_out'] self.assertEqual(num_rules, 2) self.cli_delete(['nat']) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/interfaces_bonding.py b/src/conf_mode/interfaces_bonding.py index bbbfb0385..0844d2913 100755 --- a/src/conf_mode/interfaces_bonding.py +++ b/src/conf_mode/interfaces_bonding.py @@ -1,305 +1,299 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configdict import leaf_node_changed from vyos.configdict import is_member from vyos.configdict import is_source_interface from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_eapol from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf +from vyos.frrender import FRRender from vyos.ifconfig import BondIf from vyos.ifconfig.ethernet import EthernetIf from vyos.ifconfig import Section -from vyos.template import render_to_string from vyos.utils.assertion import assert_mac from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.network import interface_exists +from vyos.utils.process import is_systemd_service_running from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured -from vyos.configdep import set_dependents, call_dependents +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_bond_mode(mode): if mode == 'round-robin': return 'balance-rr' elif mode == 'active-backup': return 'active-backup' elif mode == 'xor-hash': return 'balance-xor' elif mode == 'broadcast': return 'broadcast' elif mode == '802.3ad': return '802.3ad' elif mode == 'transmit-load-balance': return 'balance-tlb' elif mode == 'adaptive-load-balance': return 'balance-alb' else: raise ConfigError(f'invalid bond mode "{mode}"') def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'bonding'] ifname, bond = get_interface_dict(conf, base, with_pki=True) # To make our own life easier transfor the list of member interfaces # into a dictionary - we will use this to add additional information # later on for each member if 'member' in bond and 'interface' in bond['member']: # convert list of member interfaces to a dictionary bond['member']['interface'] = {k: {} for k in bond['member']['interface']} if 'mode' in bond: bond['mode'] = get_bond_mode(bond['mode']) tmp = is_node_changed(conf, base + [ifname, 'mode']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) tmp = is_node_changed(conf, base + [ifname, 'lacp-rate']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) + + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: bond.update({'frr_dict' : get_frrender_dict(conf)}) # determine which members have been removed interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface']) # Reset config level to interfaces old_level = conf.get_level() conf.set_level(['interfaces']) if interfaces_removed: bond['shutdown_required'] = {} if 'member' not in bond: bond['member'] = {} tmp = {} for interface in interfaces_removed: # if member is deleted from bond, add dependencies to call # ethernet commit again in apply function # to apply options under ethernet section set_dependents('ethernet', conf, interface) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): tmp[interface] = {'disable': ''} else: tmp[interface] = {} # also present the interfaces to be removed from the bond as dictionary bond['member']['interface_remove'] = tmp # Restore existing config level conf.set_level(old_level) if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): interface_ethernet_config = conf.get_config_dict( ['interfaces', 'ethernet', interface], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config) # Check if member interface is a new member if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]): bond['shutdown_required'] = {} interface_config['new_added'] = {} # Check if member interface is disabled conf.set_level(['interfaces']) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): interface_config['disable'] = '' conf.set_level(old_level) # Check if member interface is already member of another bridge tmp = is_member(conf, interface, 'bridge') if tmp: interface_config['is_bridge_member'] = tmp # Check if member interface is already member of a bond tmp = is_member(conf, interface, 'bonding') for tmp in is_member(conf, interface, 'bonding'): if bond['ifname'] == tmp: continue interface_config['is_bond_member'] = tmp # Check if member interface is used as source-interface on another interface tmp = is_source_interface(conf, interface) if tmp: interface_config['is_source_interface'] = tmp # bond members must not have an assigned address tmp = has_address_configured(conf, interface) if tmp: interface_config['has_address'] = {} # bond members must not have a VRF attached tmp = has_vrf_configured(conf, interface) if tmp: interface_config['has_vrf'] = {} return bond def verify(bond): if 'deleted' in bond: verify_bridge_delete(bond) return None if 'arp_monitor' in bond: if 'target' in bond['arp_monitor'] and len(bond['arp_monitor']['target']) > 16: raise ConfigError('The maximum number of arp-monitor targets is 16') if 'interval' in bond['arp_monitor'] and int(bond['arp_monitor']['interval']) > 0: if bond['mode'] in ['802.3ad', 'balance-tlb', 'balance-alb']: raise ConfigError('ARP link monitoring does not work for mode 802.3ad, ' \ 'transmit-load-balance or adaptive-load-balance') if 'primary' in bond: if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('Option primary - mode dependency failed, not' 'supported in mode {mode}!'.format(**bond)) verify_mtu_ipv6(bond) verify_address(bond) verify_dhcpv6(bond) verify_vrf(bond) verify_mirror_redirect(bond) verify_eapol(bond) # use common function to verify VLAN configuration verify_vlan_config(bond) bond_name = bond['ifname'] if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): error_msg = f'Can not add interface "{interface}" to bond, ' if interface == 'lo': raise ConfigError('Loopback interface "lo" can not be added to a bond') if not interface_exists(interface): raise ConfigError(error_msg + 'it does not exist!') if 'is_bridge_member' in interface_config: tmp = next(iter(interface_config['is_bridge_member'])) raise ConfigError(error_msg + f'it is already a member of bridge "{tmp}"!') if 'is_bond_member' in interface_config: tmp = next(iter(interface_config['is_bond_member'])) raise ConfigError(error_msg + f'it is already a member of bond "{tmp}"!') if 'is_source_interface' in interface_config: tmp = interface_config['is_source_interface'] raise ConfigError(error_msg + f'it is the source-interface of "{tmp}"!') if 'has_address' in interface_config: raise ConfigError(error_msg + 'it has an address assigned!') if 'has_vrf' in interface_config: raise ConfigError(error_msg + 'it has a VRF assigned!') if 'new_added' in interface_config and 'config_paths' in interface_config: for option_path, option_value in interface_config['config_paths'].items(): if option_path in EthernetIf.get_bond_member_allowed_options() : continue if option_path in BondIf.get_inherit_bond_options(): continue raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!') if 'primary' in bond: if bond['primary'] not in bond['member']['interface']: raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface') if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('primary interface only works for mode active-backup, ' \ 'transmit-load-balance or adaptive-load-balance') if 'system_mac' in bond: if bond['mode'] != '802.3ad': raise ConfigError('Actor MAC address only available in 802.3ad mode!') system_mac = bond['system_mac'] try: assert_mac(system_mac, test_all_zero=False) except: raise ConfigError(f'Cannot use a multicast MAC address "{system_mac}" as system-mac!') return None def generate(bond): - bond['frr_zebra_config'] = '' - if 'deleted' not in bond: - bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond) + if 'frr_dict' in bond and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(bond['frr_dict']) return None def apply(bond): - ifname = bond['ifname'] - b = BondIf(ifname) + if 'frr_dict' in bond and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() + + b = BondIf(bond['ifname']) if 'deleted' in bond: - # delete interface b.remove() else: b.update(bond) if dict_search('member.interface_remove', bond): try: call_dependents() except ConfigError: raise ConfigError('Error in updating ethernet interface ' 'after deleting it from bond') - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in bond: - frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) - return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py index 34ce7bc47..5024e6982 100755 --- a/src/conf_mode/interfaces_ethernet.py +++ b/src/conf_mode/interfaces_ethernet.py @@ -1,360 +1,347 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configverify import verify_address from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_interface_exists from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf from vyos.configverify import verify_bond_bridge_member from vyos.configverify import verify_eapol from vyos.ethtool import Ethtool +from vyos.frrender import FRRender from vyos.ifconfig import EthernetIf from vyos.ifconfig import BondIf -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.dict import dict_set from vyos.utils.dict import dict_delete +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def update_bond_options(conf: Config, eth_conf: dict) -> list: """ Return list of blocked options if interface is a bond member :param conf: Config object :type conf: Config :param eth_conf: Ethernet config dictionary :type eth_conf: dict :return: List of blocked options :rtype: list """ blocked_list = [] bond_name = list(eth_conf['is_bond_member'].keys())[0] config_without_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) config_with_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) bond_config_with_defaults = conf.get_config_dict( ['interfaces', 'bonding', bond_name], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) eth_dict_paths = dict_to_paths_values(config_without_defaults) eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']] #if option is configured under ethernet section for option_path, option_value in eth_dict_paths.items(): bond_option_value = dict_search(option_path, bond_config_with_defaults) #If option is allowed for changing then continue if option_path in EthernetIf.get_bond_member_allowed_options(): continue # if option is inherited from bond then set valued from bond interface if option_path in BondIf.get_inherit_bond_options(): # If option equals to bond option then do nothing if option_value == bond_option_value: continue else: # if ethernet has option and bond interface has # then copy it from bond if bond_option_value is not None: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) continue # if ethernet has option and bond interface does not have # then delete it form dict and do not apply it else: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_delete(option_path, eth_conf) blocked_list.append(option_path) # if inherited option is not configured under ethernet section but configured under bond section for option_path in BondIf.get_inherit_bond_options(): bond_option_value = dict_search(option_path, bond_config_with_defaults) if bond_option_value is not None: if option_path not in eth_dict_paths: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member. ' \ f'Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) eth_conf['bond_blocked_changes'] = blocked_list return None def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'ethernet'] ifname, ethernet = get_interface_dict(conf, base, with_pki=True) # T5862 - default MTU is not acceptable in some environments # There are cloud environments available where the maximum supported # ethernet MTU is e.g. 1450 bytes, thus we clamp this to the adapters # maximum MTU value or 1500 bytes - whatever is lower if 'mtu' not in ethernet: try: ethernet['mtu'] = '1500' max_mtu = EthernetIf(ifname).get_max_mtu() if max_mtu < int(ethernet['mtu']): ethernet['mtu'] = str(max_mtu) except: pass if 'is_bond_member' in ethernet: update_bond_options(conf, ethernet) tmp = is_node_changed(conf, base + [ifname, 'speed']) if tmp: ethernet.update({'speed_duplex_changed': {}}) tmp = is_node_changed(conf, base + [ifname, 'duplex']) if tmp: ethernet.update({'speed_duplex_changed': {}}) + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: ethernet.update({'frr_dict' : get_frrender_dict(conf)}) + return ethernet def verify_speed_duplex(ethernet: dict, ethtool: Ethtool): """ Verify speed and duplex :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')): raise ConfigError( 'Speed/Duplex missmatch. Must be both auto or manually configured') if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto': # We need to verify if the requested speed and duplex setting is # supported by the underlaying NIC. speed = ethernet['speed'] duplex = ethernet['duplex'] if not ethtool.check_speed_duplex(speed, duplex): raise ConfigError( f'Adapter does not support changing speed ' \ f'and duplex settings to: {speed}/{duplex}!') def verify_flow_control(ethernet: dict, ethtool: Ethtool): """ Verify flow control :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'disable_flow_control' in ethernet: if not ethtool.check_flow_control(): raise ConfigError( 'Adapter does not support changing flow-control settings!') def verify_ring_buffer(ethernet: dict, ethtool: Ethtool): """ Verify ring buffer :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'ring_buffer' in ethernet: max_rx = ethtool.get_ring_buffer_max('rx') if not max_rx: raise ConfigError( 'Driver does not support RX ring-buffer configuration!') max_tx = ethtool.get_ring_buffer_max('tx') if not max_tx: raise ConfigError( 'Driver does not support TX ring-buffer configuration!') rx = dict_search('ring_buffer.rx', ethernet) if rx and int(rx) > int(max_rx): raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \ f'size of "{max_rx}" bytes!') tx = dict_search('ring_buffer.tx', ethernet) if tx and int(tx) > int(max_tx): raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \ f'size of "{max_tx}" bytes!') def verify_offload(ethernet: dict, ethtool: Ethtool): """ Verify offloading capabilities :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if dict_search('offload.rps', ethernet) != None: if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'): raise ConfigError('Interface does not suport RPS!') driver = ethtool.get_driver_name() # T3342 - Xen driver requires special treatment if driver == 'vif': if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None: raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\ 'for MTU size larger then 1500 bytes') def verify_allowedbond_changes(ethernet: dict): """ Verify changed options if interface is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ if 'bond_blocked_changes' in ethernet: for option in ethernet['bond_blocked_changes']: raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \ f' on interface "{ethernet["ifname"]}".' \ f' Interface is a bond member') def verify(ethernet): if 'deleted' in ethernet: return None if 'is_bond_member' in ethernet: verify_bond_member(ethernet) else: verify_ethernet(ethernet) def verify_bond_member(ethernet): """ Verification function for ethernet interface which is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) verify_allowedbond_changes(ethernet) def verify_ethernet(ethernet): """ Verification function for simple ethernet interface :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_mtu(ethernet) verify_mtu_ipv6(ethernet) verify_dhcpv6(ethernet) verify_address(ethernet) verify_vrf(ethernet) verify_bond_bridge_member(ethernet) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) # No need to check speed and duplex keys as both have default values. verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) # use common function to verify VLAN configuration verify_vlan_config(ethernet) return None def generate(ethernet): - if 'deleted' in ethernet: - return None - - ethernet['frr_zebra_config'] = '' - if 'deleted' not in ethernet: - ethernet['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', ethernet) - + if 'frr_dict' in ethernet and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(ethernet['frr_dict']) return None def apply(ethernet): - ifname = ethernet['ifname'] - - e = EthernetIf(ifname) + if 'frr_dict' in ethernet and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() + e = EthernetIf(ethernet['ifname']) if 'deleted' in ethernet: - # delete interface e.remove() else: e.update(ethernet) - - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in ethernet: - frr_cfg.add_before(frr.default_add_before, ethernet['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) - apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py index a5963e72c..5e71a612d 100755 --- a/src/conf_mode/policy.py +++ b/src/conf_mode/policy.py @@ -1,323 +1,283 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2022 VyOS maintainers and contributors +# Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import frr_protocols from vyos.utils.dict import dict_search +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag - airbag.enable() - def community_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in community and large community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ('replace' in actions or 'add' in actions): return False if 'replace' in actions and 'add' in actions: return False if ('delete' in actions) and ('none' in actions or 'replace' in actions): return False return True def extcommunity_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in extended community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ( 'rt' in actions or 'soo' in actions or 'bandwidth' in actions or 'bandwidth_non_transitive' in actions): return False if ('bandwidth_non_transitive' in actions) and ('bandwidth' not in actions): return False return True def routing_policy_find(key, dictionary): # Recursively traverse a dictionary and extract the value assigned to # a given key as generator object. This is made for routing policies, # thus also import/export is checked for k, v in dictionary.items(): if k == key: if isinstance(v, dict): for a, b in v.items(): if a in ['import', 'export']: yield b else: yield v elif isinstance(v, dict): for result in routing_policy_find(key, v): yield result elif isinstance(v, list): for d in v: if isinstance(d, dict): for result in routing_policy_find(key, d): yield result def get_config(config=None): if config: conf = config else: conf = Config() - base = ['policy'] - policy = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['protocols'], key_mangling=('-', '_'), - no_tag_node_value_mangle=True) - # Merge policy dict into "regular" config dict - policy = dict_merge(tmp, policy) - return policy - - -def verify(policy): - if not policy: + return get_frrender_dict(conf) + + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'policy'): return None - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', 'extcommunity_list', - 'large_community_list', - 'prefix_list', 'prefix_list6', 'route_map']: + policy_types = ['access_list', 'access_list6', 'as_path_list', + 'community_list', 'extcommunity_list', + 'large_community_list', 'prefix_list', + 'prefix_list6', 'route_map'] + + policy = config_dict['policy'] + for protocol in frr_protocols: + if protocol not in config_dict: + continue + if 'protocol' not in policy: + policy.update({'protocol': {}}) + policy['protocol'].update({protocol : config_dict[protocol]}) + + for policy_type in policy_types: # Bail out early and continue with next policy type if policy_type not in policy: continue # instance can be an ACL name/number, prefix-list name or route-map name for instance, instance_config in policy[policy_type].items(): # If no rule was found within the instance ... sad, but we can leave # early as nothing needs to be verified if 'rule' not in instance_config: continue # human readable instance name (hypen instead of underscore) policy_hr = policy_type.replace('_', '-') entries = [] for rule, rule_config in instance_config['rule'].items(): mandatory_error = f'must be specified for "{policy_hr} {instance} rule {rule}"!' if 'action' not in rule_config: raise ConfigError(f'Action {mandatory_error}') if policy_type == 'access_list': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if int(instance) in range(100, 200) or int( instance) in range(2000, 2700): if 'destination' not in rule_config: raise ConfigError( f'A destination {mandatory_error}') if policy_type == 'access_list6': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if policy_type in ['as_path_list', 'community_list', 'extcommunity_list', 'large_community_list']: if 'regex' not in rule_config: raise ConfigError(f'A regex {mandatory_error}') if policy_type in ['prefix_list', 'prefix_list6']: if 'prefix' not in rule_config: raise ConfigError(f'A prefix {mandatory_error}') if rule_config in entries: raise ConfigError( f'Rule "{rule}" contains a duplicate prefix definition!') entries.append(rule_config) # route-maps tend to be a bit more complex so they get their own verify() section if 'route_map' in policy: for route_map, route_map_config in policy['route_map'].items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): # Action 'deny' cannot be used with "continue" or "on-match" # FRR does not validate it T4827, T6676 if rule_config['action'] == 'deny' and ('continue' in rule_config or 'on_match' in rule_config): raise ConfigError(f'rule {rule} "continue" or "on-match" cannot be used with action deny!') # Specified community-list must exist tmp = dict_search('match.community.community_list', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') # Specified extended community-list must exist tmp = dict_search('match.extcommunity', rule_config) if tmp and tmp not in policy.get('extcommunity_list', []): raise ConfigError( f'extcommunity-list {tmp} does not exist!') # Specified large-community-list must exist tmp = dict_search('match.large_community.large_community_list', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ip.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list', []): raise ConfigError(f'prefix-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ipv6.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') # Specified access_list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.access_list', rule_config) if tmp and tmp not in policy.get('access_list6', []): raise ConfigError(f'access_list6 {tmp} does not exist!') # Specified prefix-list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') tmp = dict_search('set.community.delete', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') tmp = dict_search('set.large_community.delete', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') if 'set' in rule_config: rule_action = rule_config['set'] if 'community' in rule_action: if not community_action_compatibility( rule_action['community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in community') if 'large_community' in rule_action: if not community_action_compatibility( rule_action['large_community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in large-community') if 'extcommunity' in rule_action: if not extcommunity_action_compatibility( rule_action['extcommunity']): raise ConfigError( f'Unexpected combination between none, rt, soo, bandwidth, bandwidth-non-transitive in extended-community') # When routing protocols are active some use prefix-lists, route-maps etc. # to apply the systems routing policy to the learned or redistributed routes. # When the "routing policy" changes and policies, route-maps etc. are deleted, # it is our responsibility to verify that the policy can not be deleted if it # is used by any routing protocol - if 'protocols' in policy: - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', - 'extcommunity_list', 'large_community_list', - 'prefix_list', 'route_map']: - if policy_type in policy: - for policy_name in list(set(routing_policy_find(policy_type, - policy[ - 'protocols']))): - found = False - if policy_name in policy[policy_type]: - found = True - # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related - # list - we need to go the extra mile here and check both prefix-lists - if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ - policy['prefix_list6']: - found = True - if not found: - tmp = policy_type.replace('_', '-') - raise ConfigError( - f'Can not delete {tmp} "{policy_name}", still in use!') + # Check if any routing protocol is activated + if 'protocol' in policy: + for policy_type in policy_types: + for policy_name in list(set(routing_policy_find(policy_type, policy['protocol']))): + found = False + if policy_type in policy and policy_name in policy[policy_type]: + found = True + # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related + # list - we need to go the extra mile here and check both prefix-lists + if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ + policy['prefix_list6']: + found = True + if not found: + tmp = policy_type.replace('_', '-') + raise ConfigError( + f'Can not delete {tmp} "{policy_name}", still in use!') return None -def generate(policy): - if not policy: - return None - policy['new_frr_config'] = render_to_string('frr/policy.frr.j2', policy) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None - -def apply(policy): - bgp_daemon = 'bgpd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(bgp_daemon) - frr_cfg.modify_section(r'^bgp as-path access-list .*') - frr_cfg.modify_section(r'^bgp community-list .*') - frr_cfg.modify_section(r'^bgp extcommunity-list .*') - frr_cfg.modify_section(r'^bgp large-community-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(bgp_daemon) - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^access-list .*') - frr_cfg.modify_section(r'^ipv6 access-list .*') - frr_cfg.modify_section(r'^ip prefix-list .*') - frr_cfg.modify_section(r'^ipv6 prefix-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None - if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_babel.py b/src/conf_mode/protocols_babel.py index 90b6e4a31..48b7ae734 100755 --- a/src/conf_mode/protocols_babel.py +++ b/src/conf_mode/protocols_babel.py @@ -1,159 +1,110 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'babel'] - babel = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - babel['interface_removed'] = list(interfaces_removed) + return get_frrender_dict(conf) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - babel.update({'deleted' : ''}) - return babel - - # We have gathered the dict representation of the CLI, but there are default - # values which we need to update into the dictionary retrieved. - default_values = conf.get_config_defaults(base, key_mangling=('-', '_'), - get_first_key=True, - recursive=True) - - # merge in default values - babel = config_dict_merge(default_values, babel) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - babel = dict_merge(tmp, babel) - return babel - -def verify(babel): - if not babel: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'babel'): return None + babel = config_dict['babel'] + babel['policy'] = config_dict['policy'] + # verify distribute_list if "distribute_list" in babel: acl_keys = { "ipv4": [ "distribute_list.ipv4.access_list.in", "distribute_list.ipv4.access_list.out", ], "ipv6": [ "distribute_list.ipv6.access_list.in", "distribute_list.ipv6.access_list.out", ] } prefix_list_keys = { "ipv4": [ "distribute_list.ipv4.prefix_list.in", "distribute_list.ipv4.prefix_list.out", ], "ipv6":[ "distribute_list.ipv6.prefix_list.in", "distribute_list.ipv6.prefix_list.out", ] } for address_family in ["ipv4", "ipv6"]: for iface_key in babel["distribute_list"].get(address_family, {}).get("interface", {}).keys(): acl_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.access_list.in", f"distribute_list.{address_family}.interface.{iface_key}.access_list.out" ]) prefix_list_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.in", f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.out" ]) for address_family, keys in acl_keys.items(): for key in keys: acl = dict_search(key, babel) if acl: verify_access_list(acl, babel, version='6' if address_family == 'ipv6' else '') for address_family, keys in prefix_list_keys.items(): for key in keys: prefix_list = dict_search(key, babel) if prefix_list: verify_prefix_list(prefix_list, babel, version='6' if address_family == 'ipv6' else '') -def generate(babel): - if not babel or 'deleted' in babel: - return None - - babel['new_frr_config'] = render_to_string('frr/babeld.frr.j2', babel) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(babel): - babel_daemon = 'babeld' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(babel_daemon) - frr_cfg.modify_section('^router babel', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in babel: - continue - for interface in babel[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in babel: - frr_cfg.add_before(frr.default_add_before, babel['new_frr_config']) - frr_cfg.commit_configuration(babel_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index 1361bb1a9..2e7d40676 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -1,112 +1,97 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_vrf +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.template import is_ipv6 -from vyos.template import render_to_string from vyos.utils.network import is_ipv6_link_local +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'bfd'] - bfd = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return bfd - bfd = conf.merge_defaults(bfd, recursive=True) + return get_frrender_dict(conf) - return bfd - -def verify(bfd): - if not bfd: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'bfd'): return None + bfd = config_dict['bfd'] if 'peer' in bfd: for peer, peer_config in bfd['peer'].items(): # IPv6 link local peers require an explicit local address/interface if is_ipv6_link_local(peer): if 'source' not in peer_config or len(peer_config['source']) < 2: raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting') # IPv6 peers require an explicit local address if is_ipv6(peer): if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD IPv6 peers require explicit local address setting') if 'multihop' in peer_config: # multihop require source address if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD multihop require source address') # multihop and echo-mode cannot be used together if 'echo_mode' in peer_config: raise ConfigError('BFD multihop and echo-mode cannot be used together') # multihop doesn't accept interface names if 'source' in peer_config and 'interface' in peer_config['source']: raise ConfigError('BFD multihop and source interface cannot be used together') if 'minimum_ttl' in peer_config and 'multihop' not in peer_config: raise ConfigError('Minimum TTL is only available for multihop BFD sessions!') if 'profile' in peer_config: profile_name = peer_config['profile'] if 'profile' not in bfd or profile_name not in bfd['profile']: raise ConfigError(f'BFD profile "{profile_name}" does not exist!') if 'vrf' in peer_config: verify_vrf(peer_config) return None -def generate(bfd): - if not bfd: - return None - bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.j2', bfd) - -def apply(bfd): - bfd_daemon = 'bfdd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bfd_daemon) - frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in bfd: - frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config']) - frr_cfg.commit_configuration(bfd_daemon) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 22f020099..ae32dd839 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -1,655 +1,575 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.base import Warning from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_prefix_list from vyos.configverify import verify_route_map from vyos.configverify import verify_vrf +from vyos.frrender import FRRender from vyos.template import is_ip from vyos.template import is_interface -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_vrf from vyos.utils.network import is_addr_assigned +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import process_named_running -from vyos.utils.process import call from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'bgp'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path - bgp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per interface MPLS configuration - get a list if changed - # nodes under the interface tagNode - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - bgp['interface_removed'] = list(interfaces_removed) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: - bgp.update({'vrf' : vrf}) - # We can not delete the BGP VRF instance if there is a L3VNI configured - # FRR L3VNI must be deleted first otherwise we will see error: - # "FRR error: Please unconfigure l3vni 3000" - tmp = ['vrf', 'name', vrf, 'vni'] - if conf.exists_effective(tmp): - bgp.update({'vni' : conf.return_effective_value(tmp)}) - # We can safely delete ourself from the dependent vrf list - if vrf in bgp['dependent_vrfs']: - del bgp['dependent_vrfs'][vrf] - - bgp['dependent_vrfs'].update({'default': {'protocols': { - 'bgp': conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True)}}}) - - if not conf.exists(base): - # If bgp instance is deleted then mark it - bgp.update({'deleted' : ''}) - return bgp - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - bgp = conf.merge_defaults(bgp, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - bgp = dict_merge(tmp, bgp) - - return bgp - + return get_frrender_dict(conf, argv) def verify_vrf_as_import(search_vrf_name: str, afi_name: str, vrfs_config: dict) -> bool: """ :param search_vrf_name: search vrf name in import list :type search_vrf_name: str :param afi_name: afi/safi name :type afi_name: str :param vrfs_config: configuration dependents vrfs :type vrfs_config: dict :return: if vrf in import list retrun true else false :rtype: bool """ for vrf_name, vrf_config in vrfs_config.items(): import_list = dict_search( f'protocols.bgp.address_family.{afi_name}.import.vrf', vrf_config) if import_list: if search_vrf_name in import_list: return True return False def verify_vrf_import_options(afi_config: dict) -> bool: """ Search if afi contains one of options :param afi_config: afi/safi :type afi_config: dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ options = [ f'rd.vpn.export', f'route_target.vpn.import', f'route_target.vpn.export', f'route_target.vpn.both' ] for option in options: if dict_search(option, afi_config): return True return False def verify_vrf_import(vrf_name: str, vrfs_config: dict, afi_name: str) -> bool: """ Verify if vrf exists and contain options :param vrf_name: name of VRF :type vrf_name: str :param vrfs_config: dependent vrfs config :type vrfs_config: dict :param afi_name: afi/safi name :type afi_name: str :return: if vrf contains rd and route-target options return true else false :rtype: bool """ if vrf_name != 'default': verify_vrf({'vrf': vrf_name}) if dict_search(f'{vrf_name}.protocols.bgp.address_family.{afi_name}', vrfs_config): afi_config = \ vrfs_config[vrf_name]['protocols']['bgp']['address_family'][ afi_name] if verify_vrf_import_options(afi_config): return True return False def verify_vrflist_import(afi_name: str, afi_config: dict, vrfs_config: dict) -> bool: """ Call function to verify if scpecific vrf contains rd and route-target options return true else false :param afi_name: afi/safi name :type afi_name: str :param afi_config: afi/safi configuration :type afi_config: dict :param vrfs_config: dependent vrfs config :type vrfs_config:dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ for vrf_name in afi_config['import']['vrf']: if verify_vrf_import(vrf_name, vrfs_config, afi_name): return True return False def verify_remote_as(peer_config, bgp_config): if 'remote_as' in peer_config: return peer_config['remote_as'] if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'interface' in peer_config: if 'remote_as' in peer_config['interface']: return peer_config['interface']['remote_as'] if 'peer_group' in peer_config['interface']: peer_group_name = peer_config['interface']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'v6only' in peer_config['interface']: if 'remote_as' in peer_config['interface']['v6only']: return peer_config['interface']['v6only']['remote_as'] if 'peer_group' in peer_config['interface']['v6only']: peer_group_name = peer_config['interface']['v6only']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp return None def verify_afi(peer_config, bgp_config): # If address_family configured under neighboor if 'address_family' in peer_config: return True # If address_family configured under peer-group # if neighbor interface configured peer_group_name = None if dict_search('interface.peer_group', peer_config): peer_group_name = peer_config['interface']['peer_group'] elif dict_search('interface.v6only.peer_group', peer_config): peer_group_name = peer_config['interface']['v6only']['peer_group'] # if neighbor IP configured. if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] if peer_group_name: tmp = dict_search(f'peer_group.{peer_group_name}.address_family', bgp_config) if tmp: return True return False -def verify(bgp): +def verify(config_dict): + + print('====== verify() ======') + import pprint + pprint.pprint(config_dict) + + if not has_frr_protocol_in_dict(config_dict, 'bgp'): + return None + + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] + + # eqivalent of the C foo ? 'a' : 'b' statement + bgp = vrf and config_dict['vrf']['name'][vrf]['protocols']['bgp'] or config_dict['bgp'] + bgp['policy'] = config_dict['policy'] + if 'deleted' in bgp: - if 'vrf' in bgp: + if vrf: # Cannot delete vrf if it exists in import vrf list in other vrfs for tmp_afi in ['ipv4_unicast', 'ipv6_unicast']: - if verify_vrf_as_import(bgp['vrf'], tmp_afi, bgp['dependent_vrfs']): - raise ConfigError(f'Cannot delete VRF instance "{bgp["vrf"]}", ' \ + if verify_vrf_as_import(vrf, tmp_afi, bgp['dependent_vrfs']): + raise ConfigError(f'Cannot delete VRF instance "{vrf}", ' \ 'unconfigure "import vrf" commands!') else: # We are running in the default VRF context, thus we can not delete # our main BGP instance if there are dependent BGP VRF instances. if 'dependent_vrfs' in bgp: for vrf, vrf_options in bgp['dependent_vrfs'].items(): if vrf != 'default': if dict_search('protocols.bgp', vrf_options): - raise ConfigError('Cannot delete default BGP instance, ' \ - 'dependent VRF instance(s) exist(s)!') + dependent_vrfs = ', '.join(bgp['dependent_vrfs'].keys()) + raise ConfigError(f'Cannot delete default BGP instance, ' \ + f'dependent VRF instance(s): {dependent_vrfs}') if 'vni' in vrf_options: raise ConfigError('Cannot delete default BGP instance, ' \ 'dependent L3VNI exists!') return None if 'system_as' not in bgp: raise ConfigError('BGP system-as number must be defined!') # Verify BMP if 'bmp' in bgp: # check bmp flag "bgpd -d -F traditional --daemon -A 127.0.0.1 -M rpki -M bmp" if not process_named_running('bgpd', 'bmp'): raise ConfigError( f'"bmp" flag is not found in bgpd. Configure "set system frr bmp" and restart bgp process' ) # check bmp target if 'target' in bgp['bmp']: for target, target_config in bgp['bmp']['target'].items(): if 'address' not in target_config: raise ConfigError(f'BMP target "{target}" address must be defined!') # Verify vrf on interface and bgp section if 'interface' in bgp: for interface in bgp['interface']: error_msg = f'Interface "{interface}" belongs to different VRF instance' tmp = get_interface_vrf(interface) - if 'vrf' in bgp: - if bgp['vrf'] != tmp: - vrf = bgp['vrf'] + if vrf: + if vrf != tmp: raise ConfigError(f'{error_msg} "{vrf}"!') elif tmp != 'default': raise ConfigError(f'{error_msg} "{tmp}"!') peer_groups_context = dict() # Common verification for both peer-group and neighbor statements for neighbor in ['neighbor', 'peer_group']: # bail out early if there is no neighbor or peer-group statement # this also saves one indention level if neighbor not in bgp: continue for peer, peer_config in bgp[neighbor].items(): # Only regular "neighbor" statement can have a peer-group set # Check if the configure peer-group exists if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Specified peer-group "{peer_group}" for '\ f'neighbor "{neighbor}" does not exist!') if 'remote_as' in peer_config: is_ibgp = True if peer_config['remote_as'] != 'internal' and \ peer_config['remote_as'] != bgp['system_as']: is_ibgp = False if peer_group not in peer_groups_context: peer_groups_context[peer_group] = is_ibgp elif peer_groups_context[peer_group] != is_ibgp: raise ConfigError(f'Peer-group members must be ' f'all internal or all external') if 'local_role' in peer_config: #Ensure Local Role has only one value. if len(peer_config['local_role']) > 1: raise ConfigError(f'Only one local role can be specified for peer "{peer}"!') if 'local_as' in peer_config: if len(peer_config['local_as']) > 1: raise ConfigError(f'Only one local-as number can be specified for peer "{peer}"!') # Neighbor local-as override can not be the same as the local-as # we use for this BGP instane! asn = list(peer_config['local_as'].keys())[0] if asn == bgp['system_as']: raise ConfigError('Cannot have local-as same as system-as number') # Neighbor AS specified for local-as and remote-as can not be the same if dict_search('remote_as', peer_config) == asn and neighbor != 'peer_group': raise ConfigError(f'Neighbor "{peer}" has local-as specified which is '\ 'the same as remote-as, this is not allowed!') # ttl-security and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'ttl_security' in peer_config: raise ConfigError('You can not set both ebgp-multihop and ttl-security hops') # interface and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'interface' in peer_config: raise ConfigError(f'Ebgp-multihop can not be used with directly connected '\ f'neighbor "{peer}"') # Check if neighbor has both override capability and strict capability match # configured at the same time. if 'override_capability' in peer_config and 'strict_capability_match' in peer_config: raise ConfigError(f'Neighbor "{peer}" cannot have both override-capability and '\ 'strict-capability-match configured at the same time!') # Check spaces in the password if 'password' in peer_config and ' ' in peer_config['password']: raise ConfigError('Whitespace is not allowed in passwords!') # Some checks can/must only be done on a neighbor and not a peer-group if neighbor == 'neighbor': # remote-as must be either set explicitly for the neighbor # or for the entire peer-group if not verify_remote_as(peer_config, bgp): raise ConfigError(f'Neighbor "{peer}" remote-as must be set!') if not verify_afi(peer_config, bgp): Warning(f'BGP neighbor "{peer}" requires address-family!') # Peer-group member cannot override remote-as of peer-group if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'remote_as' in peer_config and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'interface' in peer_config: if 'peer_group' in peer_config['interface']: peer_group = peer_config['interface']['peer_group'] if 'remote_as' in peer_config['interface'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'v6only' in peer_config['interface']: if 'peer_group' in peer_config['interface']['v6only']: peer_group = peer_config['interface']['v6only']['peer_group'] if 'remote_as' in peer_config['interface']['v6only'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') # Only checks for ipv4 and ipv6 neighbors # Check if neighbor address is assigned as system interface address - vrf = None vrf_error_msg = f' in default VRF!' - if 'vrf' in bgp: - vrf = bgp['vrf'] + if vrf: vrf_error_msg = f' in VRF "{vrf}"!' if is_ip(peer) and is_addr_assigned(peer, vrf): raise ConfigError(f'Can not configure local address as neighbor "{peer}"{vrf_error_msg}') elif is_interface(peer): if 'peer_group' in peer_config: raise ConfigError(f'peer-group must be set under the interface node of "{peer}"') if 'remote_as' in peer_config: raise ConfigError(f'remote-as must be set under the interface node of "{peer}"') if 'source_interface' in peer_config['interface']: raise ConfigError(f'"source-interface" option not allowed for neighbor "{peer}"') # Local-AS allowed only for EBGP peers if 'local_as' in peer_config: remote_as = verify_remote_as(peer_config, bgp) if remote_as == bgp['system_as']: raise ConfigError(f'local-as configured for "{peer}", allowed only for eBGP peers!') for afi in ['ipv4_unicast', 'ipv4_multicast', 'ipv4_labeled_unicast', 'ipv4_flowspec', 'ipv6_unicast', 'ipv6_multicast', 'ipv6_labeled_unicast', 'ipv6_flowspec', 'l2vpn_evpn']: # Bail out early if address family is not configured if 'address_family' not in peer_config or afi not in peer_config['address_family']: continue # Check if neighbor has both ipv4 unicast and ipv4 labeled unicast configured at the same time. if 'ipv4_unicast' in peer_config['address_family'] and 'ipv4_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv4-unicast and ipv4-labeled-unicast configured at the same time!') # Check if neighbor has both ipv6 unicast and ipv6 labeled unicast configured at the same time. if 'ipv6_unicast' in peer_config['address_family'] and 'ipv6_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!') afi_config = peer_config['address_family'][afi] if 'conditionally_advertise' in afi_config: if 'advertise_map' not in afi_config['conditionally_advertise']: raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!') # Verify advertise-map (which is a route-map) exists verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp) if ('exist_map' not in afi_config['conditionally_advertise'] and 'non_exist_map' not in afi_config['conditionally_advertise']): raise ConfigError('Must either speficy exist-map or non-exist-map when ' \ 'conditionally-advertise is in use!') if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']): raise ConfigError('Can not specify both exist-map and non-exist-map for ' \ 'conditionally-advertise!') if 'exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp) if 'non_exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp) # T4332: bgp deterministic-med cannot be disabled while addpath-tx-bestpath-per-AS is in use if 'addpath_tx_per_as' in afi_config: if dict_search('parameters.deterministic_med', bgp) == None: raise ConfigError('addpath-tx-per-as requires BGP deterministic-med paramtere to be set!') # Validate if configured Prefix list exists if 'prefix_list' in afi_config: for tmp in ['import', 'export']: if tmp not in afi_config['prefix_list']: # bail out early continue if afi == 'ipv4_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp) elif afi == 'ipv6_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp, version='6') if 'route_map' in afi_config: for tmp in ['import', 'export']: if tmp in afi_config['route_map']: verify_route_map(afi_config['route_map'][tmp], bgp) if 'route_reflector_client' in afi_config: peer_group_as = peer_config.get('remote_as') if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') else: if 'peer_group' in peer_config: peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') # T5833 not all AFIs are supported for VRF if 'vrf' in bgp and 'address_family' in peer_config: unsupported_vrf_afi = { 'ipv4_flowspec', 'ipv6_flowspec', 'ipv4_labeled_unicast', 'ipv6_labeled_unicast', 'ipv4_vpn', 'ipv6_vpn', } for afi in peer_config['address_family']: if afi in unsupported_vrf_afi: raise ConfigError( f"VRF is not allowed for address-family '{afi.replace('_', '-')}'" ) # Throw an error if a peer group is not configured for allow range for prefix in dict_search('listen.range', bgp) or []: # we can not use dict_search() here as prefix contains dots ... if 'peer_group' not in bgp['listen']['range'][prefix]: raise ConfigError(f'Listen range for prefix "{prefix}" has no peer group configured.') peer_group = bgp['listen']['range'][prefix]['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Peer-group "{peer_group}" for listen range "{prefix}" does not exist!') if not verify_remote_as(bgp['listen']['range'][prefix], bgp): raise ConfigError(f'Peer-group "{peer_group}" requires remote-as to be set!') # Throw an error if the global administrative distance parameters aren't all filled out. if dict_search('parameters.distance.global', bgp) != None: for key in ['external', 'internal', 'local']: if dict_search(f'parameters.distance.global.{key}', bgp) == None: raise ConfigError('Missing mandatory configuration option for '\ f'global administrative distance {key}!') # TCP keepalive requires all three parameters to be set if dict_search('parameters.tcp_keepalive', bgp) != None: if not {'idle', 'interval', 'probes'} <= set(bgp['parameters']['tcp_keepalive']): raise ConfigError('TCP keepalive incomplete - idle, keepalive and probes must be set') # Address Family specific validation if 'address_family' in bgp: for afi, afi_config in bgp['address_family'].items(): if 'distance' in afi_config: # Throw an error if the address family specific administrative # distance parameters aren't all filled out. for key in ['external', 'internal', 'local']: if key not in afi_config['distance']: raise ConfigError('Missing mandatory configuration option for '\ f'{afi} administrative distance {key}!') if afi in ['ipv4_unicast', 'ipv6_unicast']: - vrf_name = bgp['vrf'] if dict_search('vrf', bgp) else 'default' + vrf_name = vrf if vrf else 'default' # Verify if currant VRF contains rd and route-target options # and does not exist in import list in other VRFs if dict_search(f'rd.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "rd vpn export" command!') if not dict_search('parameters.router_id', bgp): Warning(f'BGP "router-id" is required when using "rd" and "route-target"!') if dict_search('route_target.vpn.both', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn both" command!') if dict_search('route_target.vpn.export', afi_config): raise ConfigError( 'Command "route-target vpn export" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): raise ConfigError( 'Command "route-target vpn import" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf conflicts" with "route-target vpn import" command!') if dict_search('route_target.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn export" command!') # Verify if VRFs in import do not contain rd # and route-target options if dict_search('import.vrf', afi_config) is not None: # Verify if VRF with import does not contain rd # and route-target options if verify_vrf_import_options(afi_config): raise ConfigError( 'Please unconfigure "import vrf" commands before using vpn commands in the same VRF!') # Verify if VRFs in import list do not contain rd # and route-target options if verify_vrflist_import(afi, afi_config, bgp['dependent_vrfs']): raise ConfigError( 'Please unconfigure import vrf commands before using vpn commands in dependent VRFs!') # FRR error: please unconfigure vpn to vrf commands before # using import vrf commands if 'vpn' in afi_config['import'] or dict_search('export.vpn', afi_config) != None: raise ConfigError('Please unconfigure VPN to VRF commands before '\ 'using "import vrf" commands!') # Verify that the export/import route-maps do exist for export_import in ['export', 'import']: tmp = dict_search(f'route_map.vpn.{export_import}', afi_config) if tmp: verify_route_map(tmp, bgp) # per-vrf sid and per-af sid are mutually exclusive if 'sid' in afi_config and 'sid' in bgp: raise ConfigError('SID per VRF and SID per address-family are mutually exclusive!') # Checks only required for L2VPN EVPN if afi in ['l2vpn_evpn']: if 'vni' in afi_config: for vni, vni_config in afi_config['vni'].items(): if 'rd' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "rd" requires "advertise-all-vni" to be set!') if 'route_target' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "route-target" requires "advertise-all-vni" to be set!') return None -def generate(bgp): - if not bgp or 'deleted' in bgp: - return None - - bgp['frr_bgpd_config'] = render_to_string('frr/bgpd.frr.j2', bgp) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(bgp): - if 'deleted' in bgp: - # We need to ensure that the L3VNI is deleted first. - # This is not possible with old config backend - # priority bug - if {'vrf', 'vni'} <= set(bgp): - call('vtysh -c "conf t" -c "vrf {vrf}" -c "no vni {vni}"'.format(**bgp)) - - bgp_daemon = 'bgpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in bgp: - vrf = ' vrf ' + bgp['vrf'] - - frr_cfg.load_configuration(bgp_daemon) - - # Remove interface specific config - for key in ['interface', 'interface_removed']: - if key not in bgp: - continue - for interface in bgp[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_bgpd_config' in bgp: - frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config']) - frr_cfg.commit_configuration(bgp_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_eigrp.py b/src/conf_mode/protocols_eigrp.py index c13e52a3d..8f49bb151 100755 --- a/src/conf_mode/protocols_eigrp.py +++ b/src/conf_mode/protocols_eigrp.py @@ -1,119 +1,74 @@ #!/usr/bin/env python3 # -# Copyright (C) 2022 VyOS maintainers and contributors +# Copyright (C) 2022-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_vrf -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() - def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'eigrp'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'eigrp'] or base_path - eigrp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) + return get_frrender_dict(conf, argv) - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: eigrp.update({'vrf' : vrf}) - - if not conf.exists(base): - eigrp.update({'deleted' : ''}) - if not vrf: - # We are running in the default VRF context, thus we can not delete - # our main EIGRP instance if there are dependent EIGRP VRF instances. - eigrp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - return eigrp - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - eigrp = dict_merge(tmp, eigrp) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'eigrp'): + return None - return eigrp + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] -def verify(eigrp): - if not eigrp or 'deleted' in eigrp: - return + # eqivalent of the C foo ? 'a' : 'b' statement + eigrp = vrf and config_dict['vrf']['name'][vrf]['protocols']['eigrp'] or config_dict['eigrp'] + eigrp['policy'] = config_dict['policy'] if 'system_as' not in eigrp: raise ConfigError('EIGRP system-as must be defined!') - if 'vrf' in eigrp: - verify_vrf(eigrp) - -def generate(eigrp): - if not eigrp or 'deleted' in eigrp: - return None - - eigrp['frr_eigrpd_config'] = render_to_string('frr/eigrpd.frr.j2', eigrp) + if vrf: + verify_vrf({'vrf': vrf}) -def apply(eigrp): - eigrp_daemon = 'eigrpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in eigrp: - vrf = ' vrf ' + eigrp['vrf'] - - frr_cfg.load_configuration(eigrp_daemon) - frr_cfg.modify_section(f'^router eigrp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_eigrpd_config' in eigrp: - frr_cfg.add_before(frr.default_add_before, eigrp['frr_eigrpd_config']) - frr_cfg.commit_configuration(eigrp_daemon) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index ba2f3cf0d..1e5f0d6e8 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -1,312 +1,253 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_interface_exists +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'isis'): + return None - base_path = ['protocols', 'isis'] + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path - isis = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: isis['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - isis['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - isis.update({'deleted' : ''}) - return isis - - # merge in default values - isis = conf.merge_defaults(isis, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - isis = dict_merge(tmp, isis) - - return isis - -def verify(isis): - # bail out early - looks like removal from running config - if not isis or 'deleted' in isis: + isis = vrf and config_dict['vrf']['name'][vrf]['protocols']['isis'] or config_dict['isis'] + isis['policy'] = config_dict['policy'] + + if 'deleted' in isis: return None if 'net' not in isis: raise ConfigError('Network entity is mandatory!') # last byte in IS-IS area address must be 0 tmp = isis['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of IS-IS network entity title must always be 0!') verify_common_route_maps(isis) # If interface not set if 'interface' not in isis: raise ConfigError('Interface used for routing updates is mandatory!') for interface in isis['interface']: verify_interface_exists(isis, interface) # Interface MTU must be >= configured lsp-mtu mtu = Interface(interface).get_mtu() area_mtu = isis['lsp_mtu'] # Recommended maximum PDU size = interface MTU - 3 bytes recom_area_mtu = mtu - 3 if mtu < int(area_mtu) or int(area_mtu) > recom_area_mtu: raise ConfigError(f'Interface {interface} has MTU {mtu}, ' \ f'current area MTU is {area_mtu}! \n' \ f'Recommended area lsp-mtu {recom_area_mtu} or less ' \ '(calculated on MTU size).') - if 'vrf' in isis: + if vrf: # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - vrf = isis['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # If md5 and plaintext-password set at the same time for password in ['area_password', 'domain_password']: if password in isis: if {'md5', 'plaintext_password'} <= set(isis[password]): tmp = password.replace('_', '-') raise ConfigError(f'Can use either md5 or plaintext-password for {tmp}!') # If one param from delay set, but not set others if 'spf_delay_ietf' in isis: required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn'] exist_timers = [] for elm_timer in required_timers: if elm_timer in isis['spf_delay_ietf']: exist_timers.append(elm_timer) exist_timers = set(required_timers).difference(set(exist_timers)) if len(exist_timers) > 0: raise ConfigError('All types of spf-delay must be configured. Missing: ' + ', '.join(exist_timers).replace('_', '-')) # If Redistribute set, but level don't set if 'redistribute' in isis: proc_level = isis.get('level','').replace('-','_') for afi in ['ipv4', 'ipv6']: if afi not in isis['redistribute']: continue for proto, proto_config in isis['redistribute'][afi].items(): if 'level_1' not in proto_config and 'level_2' not in proto_config: raise ConfigError(f'Redistribute level-1 or level-2 should be specified in ' \ f'"protocols isis redistribute {afi} {proto}"!') for redistr_level, redistr_config in proto_config.items(): if proc_level and proc_level != 'level_1_2' and proc_level != redistr_level: raise ConfigError(f'"protocols isis redistribute {afi} {proto} {redistr_level}" ' \ f'can not be used with \"protocols isis level {proc_level}\"!') # Segment routing checks if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', isis): if dict_search('segment_routing.global_block', isis) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', isis) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', isis) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if prefix_config['absolute'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} absolute value cannot be blank.') elif 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if ("explicit_null" in prefix_config['absolute']) and ("no_php_flag" in prefix_config['absolute']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') elif 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = isis['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check for LFA tiebreaker index duplication if dict_search('fast_reroute.lfa.local.tiebreaker', isis): comparison_dictionary = {} for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items(): for index, index_options in item_options.items(): for index_value, index_value_options in index_options.items(): if index_value not in comparison_dictionary.keys(): comparison_dictionary[index_value] = [item] else: comparison_dictionary[index_value].append(item) for index, index_length in comparison_dictionary.items(): if int(len(index_length)) > 1: raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.') # Check for LFA priority-limit configured multiple times per level if dict_search('fast_reroute.lfa.local.priority_limit', isis): comparison_dictionary = {} for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items(): for level, level_options in priority_options.items(): if level not in comparison_dictionary.keys(): comparison_dictionary[level] = [priority] else: comparison_dictionary[level].append(priority) for level, level_length in comparison_dictionary.items(): if int(len(level_length)) > 1: raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.') # Check for LFA remote prefix list configured with more than one list if dict_search('fast_reroute.lfa.remote.prefix_list', isis): if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1: raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.') return None -def generate(isis): - if not isis or 'deleted' in isis: - return None - - isis['frr_isisd_config'] = render_to_string('frr/isisd.frr.j2', isis) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(isis): - isis_daemon = 'isisd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in isis: - vrf = ' vrf ' + isis['vrf'] - - frr_cfg.load_configuration(isis_daemon) - frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in isis: - continue - for interface in isis[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_isisd_config' in isis: - frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config']) - - frr_cfg.commit_configuration(isis_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py index ad164db9f..e8097b7ff 100755 --- a/src/conf_mode/protocols_mpls.py +++ b/src/conf_mode/protocols_mpls.py @@ -1,148 +1,140 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020-2022 VyOS maintainers and contributors +# Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from glob import glob from vyos.config import Config -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.utils.dict import dict_search from vyos.utils.file import read_file +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write from vyos.configverify import verify_interface_exists from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() -config_file = r'/tmp/ldpd.frr' - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'mpls'] - mpls = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - return mpls + return get_frrender_dict(conf) -def verify(mpls): - # If no config, then just bail out early. - if not mpls: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'mpls'): return None + mpls = config_dict['mpls'] + if 'interface' in mpls: for interface in mpls['interface']: verify_interface_exists(mpls, interface) # Checks to see if LDP is properly configured if 'ldp' in mpls: # If router ID not defined if 'router_id' not in mpls['ldp']: raise ConfigError('Router ID missing. An LDP router id is mandatory!') # If interface not set if 'interface' not in mpls['ldp']: raise ConfigError('LDP interfaces are missing. An LDP interface is mandatory!') # If transport addresses are not set if not dict_search('ldp.discovery.transport_ipv4_address', mpls) and \ not dict_search('ldp.discovery.transport_ipv6_address', mpls): raise ConfigError('LDP transport address missing!') return None -def generate(mpls): - # If there's no MPLS config generated, create dictionary key with no value. - if not mpls or 'deleted' in mpls: - return None - - mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.j2', mpls) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(mpls): - ldpd_damon = 'ldpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() - frr_cfg.load_configuration(ldpd_damon) - frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True) + if not has_frr_protocol_in_dict(config_dict, 'mpls'): + return None - if 'frr_ldpd_config' in mpls: - frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config']) - frr_cfg.commit_configuration(ldpd_damon) + mpls = config_dict['mpls'] # Set number of entries in the platform label tables labels = '0' if 'interface' in mpls: labels = '1048575' sysctl_write('net.mpls.platform_labels', labels) # Check for changes in global MPLS options if 'parameters' in mpls: # Choose whether to copy IP TTL to MPLS header TTL if 'no_propagate_ttl' in mpls['parameters']: sysctl_write('net.mpls.ip_ttl_propagate', 0) # Choose whether to limit maximum MPLS header TTL if 'maximum_ttl' in mpls['parameters']: ttl = mpls['parameters']['maximum_ttl'] sysctl_write('net.mpls.default_ttl', ttl) else: # Set default global MPLS options if not defined. sysctl_write('net.mpls.ip_ttl_propagate', 1) sysctl_write('net.mpls.default_ttl', 255) # Enable and disable MPLS processing on interfaces per configuration if 'interface' in mpls: system_interfaces = [] # Populate system interfaces list with local MPLS capable interfaces for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) # This is where the comparison is done on if an interface needs to be enabled/disabled. for system_interface in system_interfaces: interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input') if '1' in interface_state: if system_interface not in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) elif '0' in interface_state: if system_interface in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 1) else: system_interfaces = [] # If MPLS interfaces are not configured, set MPLS processing disabled for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) for system_interface in system_interfaces: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_openfabric.py b/src/conf_mode/protocols_openfabric.py index 8e8c50c06..41c5d9544 100644 --- a/src/conf_mode/protocols_openfabric.py +++ b/src/conf_mode/protocols_openfabric.py @@ -1,145 +1,110 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.base import Warning from vyos.config import Config -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender from vyos import ConfigError -from vyos import frr from vyos import airbag - airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base_path = ['protocols', 'openfabric'] - - openfabric = conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per domain MPLS configuration - get a list of all changed Openfabric domains - # (removed and added) so that they will be properly rendered for the FRR config. - openfabric['domains_all'] = list(conf.list_nodes(' '.join(base_path) + f' domain') + - node_changed(conf, base_path + ['domain'])) - - # Get a list of all interfaces - openfabric['interfaces_all'] = [] - for domain in openfabric['domains_all']: - interfaces_modified = list(node_changed(conf, base_path + ['domain', domain, 'interface']) + - conf.list_nodes(' '.join(base_path) + f' domain {domain} interface')) - openfabric['interfaces_all'].extend(interfaces_modified) + return get_frrender_dict(conf) - if not conf.exists(base_path): - openfabric.update({'deleted': ''}) - - return openfabric +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'openfabric'): + return None -def verify(openfabric): - # bail out early - looks like removal from running config - if not openfabric or 'deleted' in openfabric: + openfabric = config_dict['openfabric'] + if 'deleted' in openfabric: return None if 'net' not in openfabric: raise ConfigError('Network entity is mandatory!') # last byte in OpenFabric area address must be 0 tmp = openfabric['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of OpenFabric network entity title must always be 0!') if 'domain' not in openfabric: raise ConfigError('OpenFabric domain name is mandatory!') interfaces_used = [] for domain, domain_config in openfabric['domain'].items(): # If interface not set if 'interface' not in domain_config: raise ConfigError(f'Interface used for routing updates in OpenFabric "{domain}" is mandatory!') for iface, iface_config in domain_config['interface'].items(): verify_interface_exists(openfabric, iface) # interface can be activated only on one OpenFabric instance if iface in interfaces_used: raise ConfigError(f'Interface {iface} is already used in different OpenFabric instance!') if 'address_family' not in iface_config or len(iface_config['address_family']) < 1: raise ConfigError(f'Need to specify address family for the interface "{iface}"!') # If md5 and plaintext-password set at the same time if 'password' in iface_config: if {'md5', 'plaintext_password'} <= set(iface_config['password']): raise ConfigError(f'Can use either md5 or plaintext-password for password for the interface!') if iface == 'lo' and 'passive' not in iface_config: Warning('For loopback interface passive mode is implied!') interfaces_used.append(iface) # If md5 and plaintext-password set at the same time password = 'domain_password' if password in domain_config: if {'md5', 'plaintext_password'} <= set(domain_config[password]): raise ConfigError(f'Can use either md5 or plaintext-password for domain-password!') return None -def generate(openfabric): - if not openfabric or 'deleted' in openfabric: - return None - - openfabric['frr_fabricd_config'] = render_to_string('frr/fabricd.frr.j2', openfabric) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(openfabric): - openfabric_daemon = 'fabricd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(openfabric_daemon) - for domain in openfabric['domains_all']: - frr_cfg.modify_section(f'^router openfabric {domain}', stop_pattern='^exit', remove_stop_mark=True) - - for interface in openfabric['interfaces_all']: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_fabricd_config' in openfabric: - frr_cfg.add_before(frr.default_add_before, openfabric['frr_fabricd_config']) - - frr_cfg.commit_configuration(openfabric_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index 7347c4faa..f2c95a63c 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -1,290 +1,197 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.configverify import verify_access_list -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'ospf'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path - ospf = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospf['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospf['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospf.update({'deleted' : ''}) - return ospf + return get_frrender_dict(conf, argv) - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospf.kwargs, recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospf) is None: - del default_values['default_information'] - if 'mpls_te' not in ospf: - del default_values['mpls_te'] - if 'graceful_restart' not in ospf: - del default_values['graceful_restart'] - for area_num in default_values.get('area', []): - if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: - del default_values['area'][area_num]['area_type']['nssa'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: - if dict_search(f'redistribute.{protocol}', ospf) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - for interface in ospf.get('interface', []): - # We need to reload the defaults on every pass b/c of - # hello-multiplier dependency on dead-interval - # If hello-multiplier is set, we need to remove the default from - # dead-interval. - if 'hello_multiplier' in ospf['interface'][interface]: - del default_values['interface'][interface]['dead_interval'] - - ospf = config_dict_merge(default_values, ospf) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospf = dict_merge(tmp, ospf) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ospf'): + return None - return ospf + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] -def verify(ospf): - if not ospf: - return None + # eqivalent of the C foo ? 'a' : 'b' statement + ospf = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospf'] or config_dict['ospf'] + ospf['policy'] = config_dict['policy'] verify_common_route_maps(ospf) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospf) if route_map_name: verify_route_map(route_map_name, ospf) # Validate if configured Access-list exists if 'area' in ospf: networks = [] for area, area_config in ospf['area'].items(): if 'import_list' in area_config: acl_import = area_config['import_list'] if acl_import: verify_access_list(acl_import, ospf) if 'export_list' in area_config: acl_export = area_config['export_list'] if acl_export: verify_access_list(acl_export, ospf) if 'network' in area_config: for network in area_config['network']: if network in networks: raise ConfigError(f'Network "{network}" already defined in different area!') networks.append(network) if 'interface' in ospf: for interface, interface_config in ospf['interface'].items(): verify_interface_exists(ospf, interface) # One can not use dead-interval and hello-multiplier at the same # time. FRR will only activate the last option set via CLI. if {'hello_multiplier', 'dead_interval'} <= set(interface_config): raise ConfigError(f'Can not use hello-multiplier and dead-interval ' \ f'concurrently for {interface}!') # One can not use the "network <prefix> area <id>" command and an # per interface area assignment at the same time. FRR will error # out using: "Please remove all network commands first." if 'area' in ospf and 'area' in interface_config: for area, area_config in ospf['area'].items(): if 'network' in area_config: raise ConfigError('Can not use OSPF interface area and area ' \ 'network configuration at the same time!') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospf: - vrf = ospf['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # Segment routing checks if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', ospf): if dict_search('segment_routing.global_block', ospf) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', ospf) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', ospf) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = ospf['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check route summarisation if 'summary_address' in ospf: for prefix, prefix_options in ospf['summary_address'].items(): if {'tag', 'no_advertise'} <= set(prefix_options): raise ConfigError(f'Can not set both "tag" and "no-advertise" for Type-5 '\ f'and Type-7 route summarisation of "{prefix}"!') return None -def generate(ospf): - if not ospf or 'deleted' in ospf: - return None - - ospf['frr_ospfd_config'] = render_to_string('frr/ospfd.frr.j2', ospf) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ospf): - ospf_daemon = 'ospfd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospf: - vrf = ' vrf ' + ospf['vrf'] - - frr_cfg.load_configuration(ospf_daemon) - frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospf: - continue - for interface in ospf[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_ospfd_config' in ospf: - frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config']) - - frr_cfg.commit_configuration(ospf_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 60c2a9b16..ac189c378 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -1,191 +1,108 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ospfv3'): + return None - base_path = ['protocols', 'ospfv3'] + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path - ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospfv3['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospfv3['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospfv3.update({'deleted' : ''}) - return ospfv3 - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospfv3.kwargs, - recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospfv3) is None: - del default_values['default_information'] - if 'graceful_restart' not in ospfv3: - del default_values['graceful_restart'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: - if dict_search(f'redistribute.{protocol}', ospfv3) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - default_values.pop('interface', {}) - - # merge in remaining default values - ospfv3 = config_dict_merge(default_values, ospfv3) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospfv3 = dict_merge(tmp, ospfv3) - - return ospfv3 - -def verify(ospfv3): - if not ospfv3: - return None + ospfv3 = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospfv3'] or config_dict['ospfv3'] + ospfv3['policy'] = config_dict['policy'] verify_common_route_maps(ospfv3) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospfv3) if route_map_name: verify_route_map(route_map_name, ospfv3) if 'area' in ospfv3: for area, area_config in ospfv3['area'].items(): if 'area_type' in area_config: if len(area_config['area_type']) > 1: raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!') if 'range' in area_config: for range, range_config in area_config['range'].items(): if {'not_advertise', 'advertise'} <= range_config.keys(): raise ConfigError(f'"not-advertise" and "advertise" for "range {range}" cannot be both configured at the same time!') if 'interface' in ospfv3: for interface, interface_config in ospfv3['interface'].items(): verify_interface_exists(ospfv3, interface) if 'ifmtu' in interface_config: mtu = Interface(interface).get_mtu() if int(interface_config['ifmtu']) > int(mtu): raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospfv3: - vrf = ospfv3['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') return None -def generate(ospfv3): - if not ospfv3 or 'deleted' in ospfv3: - return None - - ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.j2', ospfv3) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ospfv3): - ospf6_daemon = 'ospf6d' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospfv3: - vrf = ' vrf ' + ospfv3['vrf'] - - frr_cfg.load_configuration(ospf6_daemon) - frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospfv3: - continue - for interface in ospfv3[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in ospfv3: - frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config']) - - frr_cfg.commit_configuration(ospf6_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py index 79294a1f0..477895b0b 100755 --- a/src/conf_mode/protocols_pim.py +++ b/src/conf_mode/protocols_pim.py @@ -1,172 +1,119 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from ipaddress import IPv4Address from ipaddress import IPv4Network from signal import SIGTERM from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_interface_exists +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import pim_daemon +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import process_named_running from vyos.utils.process import call -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() -RESERVED_MC_NET = '224.0.0.0/24' - - def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'pim'] - - pim = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - # We can not run both IGMP proxy and PIM at the same time - get IGMP - # proxy status - if conf.exists(['protocols', 'igmp-proxy']): - pim.update({'igmp_proxy_enabled' : {}}) - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - pim['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - pim.update({'deleted' : ''}) - return pim - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**pim.kwargs, recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - for interface in pim.get('interface', []): - # We need to reload the defaults on every pass b/c of - # hello-multiplier dependency on dead-interval - # If hello-multiplier is set, we need to remove the default from - # dead-interval. - if 'igmp' not in pim['interface'][interface]: - del default_values['interface'][interface]['igmp'] - - pim = config_dict_merge(default_values, pim) - return pim - -def verify(pim): - if not pim or 'deleted' in pim: + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim'): + return None + + pim = config_dict['pim'] + + if 'deleted' in pim: return None if 'igmp_proxy_enabled' in pim: raise ConfigError('IGMP proxy and PIM cannot be configured at the same time!') if 'interface' not in pim: raise ConfigError('PIM require defined interfaces!') + RESERVED_MC_NET = '224.0.0.0/24' for interface, interface_config in pim['interface'].items(): verify_interface_exists(pim, interface) # Check join group in reserved net if 'igmp' in interface_config and 'join' in interface_config['igmp']: for join_addr in interface_config['igmp']['join']: if IPv4Address(join_addr) in IPv4Network(RESERVED_MC_NET): raise ConfigError(f'Groups within {RESERVED_MC_NET} are reserved and cannot be joined!') if 'rp' in pim: if 'address' not in pim['rp']: raise ConfigError('PIM rendezvous point needs to be defined!') # Check unique multicast groups unique = [] pim_base_error = 'PIM rendezvous point group' for address, address_config in pim['rp']['address'].items(): if 'group' not in address_config: raise ConfigError(f'{pim_base_error} should be defined for "{address}"!') # Check if it is a multicast group for gr_addr in address_config['group']: if not IPv4Network(gr_addr).is_multicast: raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!') if gr_addr in unique: raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) -def generate(pim): - if not pim or 'deleted' in pim: - return None - pim['frr_pimd_config'] = render_to_string('frr/pimd.frr.j2', pim) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(pim): - pim_daemon = 'pimd' - pim_pid = process_named_running(pim_daemon) - - if not pim or 'deleted' in pim: - if 'deleted' in pim: - os.kill(int(pim_pid), SIGTERM) +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim'): + return None + pim_pid = process_named_running(pim_daemon) + pim = config_dict['pim'] + if 'deleted' in pim: + os.kill(int(pim_pid), SIGTERM) return None if not pim_pid: call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(pim_daemon) - frr_cfg.modify_section(f'^ip pim') - frr_cfg.modify_section(f'^ip igmp') - - for key in ['interface', 'interface_removed']: - if key not in pim: - continue - for interface in pim[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_pimd_config' in pim: - frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config']) - frr_cfg.commit_configuration(pim_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py index 581ffe238..3a9b876cc 100755 --- a/src/conf_mode/protocols_pim6.py +++ b/src/conf_mode/protocols_pim6.py @@ -1,133 +1,96 @@ #!/usr/bin/env python3 # # Copyright (C) 2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from ipaddress import IPv6Address from ipaddress import IPv6Network from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running +from vyos.frrender import FRRender from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'pim6'] - pim6 = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, with_recursive_defaults=True) + return get_frrender_dict(conf) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - pim6['interface_removed'] = list(interfaces_removed) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'pim6'): + return None - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - pim6.update({'deleted' : ''}) - return pim6 - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True) - - pim6 = config_dict_merge(default_values, pim6) - return pim6 - -def verify(pim6): - if not pim6 or 'deleted' in pim6: - return + pim6 = config_dict['pim6'] + if 'deleted' in pim6: + return None for interface, interface_config in pim6.get('interface', {}).items(): verify_interface_exists(pim6, interface) if 'mld' in interface_config: mld = interface_config['mld'] for group in mld.get('join', {}).keys(): # Validate multicast group address if not IPv6Address(group).is_multicast: raise ConfigError(f"{group} is not a multicast group") if 'rp' in pim6: if 'address' not in pim6['rp']: raise ConfigError('PIM6 rendezvous point needs to be defined!') # Check unique multicast groups unique = [] pim_base_error = 'PIM6 rendezvous point group' if {'address', 'prefix-list6'} <= set(pim6['rp']): raise ConfigError(f'{pim_base_error} supports either address or a prefix-list!') for address, address_config in pim6['rp']['address'].items(): if 'group' not in address_config: raise ConfigError(f'{pim_base_error} should be defined for "{address}"!') # Check if it is a multicast group for gr_addr in address_config['group']: if not IPv6Network(gr_addr).is_multicast: raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!') if gr_addr in unique: raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) -def generate(pim6): - if not pim6 or 'deleted' in pim6: - return - pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(pim6): - if pim6 is None: - return - - pim6_daemon = 'pim6d' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(pim6_daemon) - - for key in ['interface', 'interface_removed']: - if key not in pim6: - continue - for interface in pim6[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in pim6: - frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config']) - frr_cfg.commit_configuration(pim6_daemon) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index 9afac544d..39743f965 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -1,139 +1,89 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'rip'] - rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - rip['interface_removed'] = list(interfaces_removed) + return get_frrender_dict(conf) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rip.update({'deleted' : ''}) - return rip - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rip = conf.merge_defaults(rip, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - rip = dict_merge(tmp, rip) - - return rip - -def verify(rip): - if not rip: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rip'): return None + rip = config_dict['rip'] + rip['policy'] = config_dict['policy'] + verify_common_route_maps(rip) acl_in = dict_search('distribute_list.access_list.in', rip) if acl_in: verify_access_list(acl_in, rip) acl_out = dict_search('distribute_list.access_list.out', rip) if acl_out: verify_access_list(acl_out, rip) prefix_list_in = dict_search('distribute_list.prefix-list.in', rip) if prefix_list_in: verify_prefix_list(prefix_list_in, rip) prefix_list_out = dict_search('distribute_list.prefix_list.out', rip) if prefix_list_out: verify_prefix_list(prefix_list_out, rip) if 'interface' in rip: for interface, interface_options in rip['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(rip): - if not rip or 'deleted' in rip: - return None - - rip['new_frr_config'] = render_to_string('frr/ripd.frr.j2', rip) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(rip): - rip_daemon = 'ripd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(rip_daemon) - frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in rip: - continue - for interface in rip[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in rip: - frr_cfg.add_before(frr.default_add_before, rip['new_frr_config']) - frr_cfg.commit_configuration(rip_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index 23416ff96..14f038444 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -1,124 +1,89 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'ripng'] - ripng = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return ripng + return get_frrender_dict(conf) - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - ripng = conf.merge_defaults(ripng, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ripng = dict_merge(tmp, ripng) - - return ripng - -def verify(ripng): - if not ripng: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ripng'): return None + ripng = config_dict['ripng'] + ripng['policy'] = config_dict['policy'] + verify_common_route_maps(ripng) acl_in = dict_search('distribute_list.access_list.in', ripng) if acl_in: verify_access_list(acl_in, ripng, version='6') acl_out = dict_search('distribute_list.access_list.out', ripng) if acl_out: verify_access_list(acl_out, ripng, version='6') prefix_list_in = dict_search('distribute_list.prefix_list.in', ripng) if prefix_list_in: verify_prefix_list(prefix_list_in, ripng, version='6') prefix_list_out = dict_search('distribute_list.prefix_list.out', ripng) if prefix_list_out: verify_prefix_list(prefix_list_out, ripng, version='6') if 'interface' in ripng: for interface, interface_options in ripng['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(ripng): - if not ripng: - ripng['new_frr_config'] = '' - return None - - ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.j2', ripng) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(ripng): - ripng_daemon = 'ripngd' - zebra_daemon = 'zebra' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(ripng_daemon) - frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in ripng: - frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config']) - frr_cfg.commit_configuration(ripng_daemon) - +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index a59ecf3e4..5ad656586 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -1,130 +1,115 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from glob import glob from sys import exit from vyos.config import Config +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.pki import wrap_openssh_public_key from vyos.pki import wrap_openssh_private_key -from vyos.template import render_to_string from vyos.utils.dict import dict_search_args from vyos.utils.file import write_file +from vyos.utils.process import is_systemd_service_running from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() rpki_ssh_key_base = '/run/frr/id_rpki' def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'rpki'] + return get_frrender_dict(conf) - rpki = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, with_pki=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rpki.update({'deleted' : ''}) - return rpki - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rpki = conf.merge_defaults(rpki, recursive=True) - - return rpki - -def verify(rpki): - if not rpki: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rpki'): return None + rpki = config_dict['rpki'] + if 'cache' in rpki: preferences = [] for peer, peer_config in rpki['cache'].items(): for mandatory in ['port', 'preference']: if mandatory not in peer_config: raise ConfigError(f'RPKI cache "{peer}" {mandatory} must be defined!') if 'preference' in peer_config: preference = peer_config['preference'] if preference in preferences: raise ConfigError(f'RPKI cache with preference {preference} already configured!') preferences.append(preference) if 'ssh' in peer_config: if 'username' not in peer_config['ssh']: raise ConfigError('RPKI+SSH requires username to be defined!') if 'key' not in peer_config['ssh'] or 'openssh' not in rpki['pki']: raise ConfigError('RPKI+SSH requires key to be defined!') if peer_config['ssh']['key'] not in rpki['pki']['openssh']: raise ConfigError('RPKI+SSH key not found on PKI subsystem!') return None -def generate(rpki): +def generate(config_dict): for key in glob(f'{rpki_ssh_key_base}*'): os.unlink(key) - if not rpki: - return + if not has_frr_protocol_in_dict(config_dict, 'rpki'): + return None + + rpki = config_dict['rpki'] if 'cache' in rpki: for cache, cache_config in rpki['cache'].items(): if 'ssh' in cache_config: key_name = cache_config['ssh']['key'] public_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'key') public_key_type = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'type') private_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'private', 'key') cache_config['ssh']['public_key_file'] = f'{rpki_ssh_key_base}_{cache}.pub' cache_config['ssh']['private_key_file'] = f'{rpki_ssh_key_base}_{cache}' write_file(cache_config['ssh']['public_key_file'], wrap_openssh_public_key(public_key_data, public_key_type)) write_file(cache_config['ssh']['private_key_file'], wrap_openssh_private_key(private_key_data)) - rpki['new_frr_config'] = render_to_string('frr/rpki.frr.j2', rpki) - + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(rpki): - bgp_daemon = 'bgpd' - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bgp_daemon) - frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in rpki: - frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config']) - - frr_cfg.commit_configuration(bgp_daemon) +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py index b36c2ca11..99cf87556 100755 --- a/src/conf_mode/protocols_segment-routing.py +++ b/src/conf_mode/protocols_segment-routing.py @@ -1,116 +1,104 @@ #!/usr/bin/env python3 # # Copyright (C) 2023-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import node_changed -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configdict import list_diff +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.ifconfig import Section from vyos.utils.dict import dict_search +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'segment-routing'] - sr = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True, - with_recursive_defaults=True) + return get_frrender_dict(conf) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - sr['interface_removed'] = list(interfaces_removed) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): + return None - import pprint - pprint.pprint(sr) - return sr + sr = config_dict['segment_routing'] -def verify(sr): if 'srv6' in sr: srv6_enable = False if 'interface' in sr: for interface, interface_config in sr['interface'].items(): if 'srv6' in interface_config: srv6_enable = True break if not srv6_enable: raise ConfigError('SRv6 should be enabled on at least one interface!') return None -def generate(sr): - if not sr: - return None - - sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr) +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) return None -def apply(sr): - zebra_daemon = 'zebra' +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): + return None - if 'interface_removed' in sr: - for interface in sr['interface_removed']: - # Disable processing of IPv6-SR packets - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + sr = config_dict['segment_routing'] + + current_interfaces = Section.interfaces() + sr_interfaces = list(sr.get('interface', {}).keys()) - if 'interface' in sr: - for interface, interface_config in sr['interface'].items(): - # Accept or drop SR-enabled IPv6 packets on this interface - if 'srv6' in interface_config: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') - # Define HMAC policy for ingress SR-enabled packets on this interface - # It's a redundant check as HMAC has a default value - but better safe - # then sorry - tmp = dict_search('srv6.hmac', interface_config) - if tmp == 'accept': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') - elif tmp == 'drop': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') - elif tmp == 'ignore': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') - else: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + for interface in list_diff(current_interfaces, sr_interfaces): + # Disable processing of IPv6-SR packets + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'^segment-routing') - if 'new_frr_config' in sr: - frr_cfg.add_before(frr.default_add_before, sr['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) + for interface, interface_config in sr.get('interface', {}).items(): + # Accept or drop SR-enabled IPv6 packets on this interface + if 'srv6' in interface_config: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') + # Define HMAC policy for ingress SR-enabled packets on this interface + # It's a redundant check as HMAC has a default value - but better safe + # then sorry + tmp = dict_search('srv6.hmac', interface_config) + if tmp == 'accept': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') + elif tmp == 'drop': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') + elif tmp == 'ignore': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') + else: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index 430cc69d4..9d02db6dd 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -1,132 +1,115 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +from ipaddress import IPv4Network from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import get_dhcp_interfaces -from vyos.configdict import get_pppoe_interfaces +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_vrf +from vyos.frrender import FRRender +from vyos.utils.process import is_systemd_service_running from vyos.template import render -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() config_file = '/etc/iproute2/rt_tables.d/vyos-static.conf' def get_config(config=None): if config: conf = config else: conf = Config() + return get_frrender_dict(conf, argv) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'static'): + return None + vrf = None - if len(argv) > 1: - vrf = argv[1] + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] - base_path = ['protocols', 'static'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'static'] or base_path - static = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) - - # Assign the name of our VRF context - if vrf: static['vrf'] = vrf - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - static = dict_merge(tmp, static) - - # T3680 - get a list of all interfaces currently configured to use DHCP - tmp = get_dhcp_interfaces(conf, vrf) - if tmp: static.update({'dhcp' : tmp}) - tmp = get_pppoe_interfaces(conf, vrf) - if tmp: static.update({'pppoe' : tmp}) - - return static - -def verify(static): + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] + static['policy'] = config_dict['policy'] + verify_common_route_maps(static) for route in ['route', 'route6']: # if there is no route(6) key in the dictionary we can immediately # bail out early if route not in static: continue # When leaking routes to other VRFs we must ensure that the destination # VRF exists for prefix, prefix_options in static[route].items(): # both the interface and next-hop CLI node can have a VRF subnode, # thus we check this using a for loop for type in ['interface', 'next_hop']: if type in prefix_options: for interface, interface_config in prefix_options[type].items(): verify_vrf(interface_config) if {'blackhole', 'reject'} <= set(prefix_options): raise ConfigError(f'Can not use both blackhole and reject for '\ f'prefix "{prefix}"!') + if 'multicast' in static and 'route' in static['multicast']: + for prefix, prefix_options in static['multicast']['route'].items(): + if not IPv4Network(prefix).is_multicast: + raise ConfigError(f'{prefix} is not a multicast network!') + return None -def generate(static): - if not static: +def generate(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'static'): return None - # Put routing table names in /etc/iproute2/rt_tables - render(config_file, 'iproute2/static.conf.j2', static) - static['new_frr_config'] = render_to_string('frr/staticd.frr.j2', static) - return None - -def apply(static): - static_daemon = 'staticd' + vrf = None + if 'vrf_context' in config_dict: + vrf = config_dict['vrf_context'] - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) + # eqivalent of the C foo ? 'a' : 'b' statement + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] - if 'vrf' in static: - vrf = static['vrf'] - frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit-vrf', remove_stop_mark=True) - else: - frr_cfg.modify_section(r'^ip route .*') - frr_cfg.modify_section(r'^ipv6 route .*') + # Put routing table names in /etc/iproute2/rt_tables + render(config_file, 'iproute2/static.conf.j2', static) - if 'new_frr_config' in static: - frr_cfg.add_before(frr.default_add_before, static['new_frr_config']) - frr_cfg.commit_configuration(static_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None +def apply(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static_multicast.py b/src/conf_mode/protocols_static_multicast.py deleted file mode 100755 index c8894fd41..000000000 --- a/src/conf_mode/protocols_static_multicast.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2020-2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - - -from ipaddress import IPv4Address -from sys import exit - -from vyos import ConfigError -from vyos import frr -from vyos.config import Config -from vyos.template import render_to_string - -from vyos import airbag -airbag.enable() - -config_file = r'/tmp/static_mcast.frr' - -# Get configuration for static multicast route -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - mroute = { - 'old_mroute' : {}, - 'mroute' : {} - } - - base_path = "protocols static multicast" - - if not (conf.exists(base_path) or conf.exists_effective(base_path)): - return None - - conf.set_level(base_path) - - # Get multicast effective routes - for route in conf.list_effective_nodes('route'): - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('route {0} next-hop'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast effective interface-routes - for route in conf.list_effective_nodes('interface-route'): - if not route in mroute['old_mroute']: - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - # Get multicast routes - for route in conf.list_nodes('route'): - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('route {0} next-hop'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast interface-routes - for route in conf.list_nodes('interface-route'): - if not route in mroute['mroute']: - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - return mroute - -def verify(mroute): - if mroute is None: - return None - - for mcast_route in mroute['mroute']: - route = mcast_route.split('/') - if IPv4Address(route[0]) < IPv4Address('224.0.0.0'): - raise ConfigError(f'{mcast_route} not a multicast network') - - -def generate(mroute): - if mroute is None: - return None - - mroute['new_frr_config'] = render_to_string('frr/static_mcast.frr.j2', mroute) - return None - - -def apply(mroute): - if mroute is None: - return None - static_daemon = 'staticd' - - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) - - if 'old_mroute' in mroute: - for route_gr in mroute['old_mroute']: - for nh in mroute['old_mroute'][route_gr]: - if mroute['old_mroute'][route_gr][nh]: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh} {mroute["old_mroute"][route_gr][nh]}') - else: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh}') - - if 'new_frr_config' in mroute: - frr_cfg.add_before(frr.default_add_before, mroute['new_frr_config']) - - frr_cfg.commit_configuration(static_daemon) - - return None - - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/service_snmp.py b/src/conf_mode/service_snmp.py index c9c0ed9a0..1174b1238 100755 --- a/src/conf_mode/service_snmp.py +++ b/src/conf_mode/service_snmp.py @@ -1,282 +1,273 @@ #!/usr/bin/env python3 # # Copyright (C) 2018-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_vrf from vyos.snmpv3_hashgen import plaintext_to_md5 from vyos.snmpv3_hashgen import plaintext_to_sha1 from vyos.snmpv3_hashgen import random from vyos.template import render from vyos.utils.configfs import delete_cli_node from vyos.utils.configfs import add_cli_node from vyos.utils.dict import dict_search from vyos.utils.network import is_addr_assigned from vyos.utils.process import call from vyos.utils.permission import chmod_755 from vyos.version import get_version_data from vyos import ConfigError from vyos import airbag airbag.enable() config_file_client = r'/etc/snmp/snmp.conf' config_file_daemon = r'/etc/snmp/snmpd.conf' config_file_access = r'/usr/share/snmp/snmpd.conf' config_file_user = r'/var/lib/snmp/snmpd.conf' default_script_dir = r'/config/user-data/' systemd_override = r'/run/systemd/system/snmpd.service.d/override.conf' systemd_service = 'snmpd.service' def get_config(config=None): if config: conf = config else: conf = Config() base = ['service', 'snmp'] snmp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) if not conf.exists(base): snmp.update({'deleted' : ''}) if conf.exists(['service', 'lldp', 'snmp']): snmp.update({'lldp_snmp' : ''}) if 'deleted' in snmp: return snmp version_data = get_version_data() snmp['version'] = version_data['version'] # create an internal snmpv3 user of the form 'vyosxxxxxxxxxxxxxxxx' snmp['vyos_user'] = 'vyos' + random(8) snmp['vyos_user_pass'] = random(16) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. snmp = conf.merge_defaults(snmp, recursive=True) if 'listen_address' in snmp: # Always listen on localhost if an explicit address has been configured # This is a safety measure to not end up with invalid listen addresses # that are not configured on this system. See https://vyos.dev/T850 if '127.0.0.1' not in snmp['listen_address']: tmp = {'127.0.0.1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if '::1' not in snmp['listen_address']: tmp = {'::1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for key, val in snmp['script_extensions']['extension_name'].items(): if 'script' not in val: continue script_path = val['script'] # if script has not absolute path, use pre configured path if not os.path.isabs(script_path): script_path = os.path.join(default_script_dir, script_path) snmp['script_extensions']['extension_name'][key]['script'] = script_path return snmp def verify(snmp): if 'deleted' in snmp: return None if {'deleted', 'lldp_snmp'} <= set(snmp): raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!') ### check if the configured script actually exist if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for extension, extension_opt in snmp['script_extensions']['extension_name'].items(): if 'script' not in extension_opt: raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!') tmp = extension_opt['script'] if not os.path.isfile(tmp): Warning(f'script "{tmp}" does not exist!') else: chmod_755(extension_opt['script']) if 'listen_address' in snmp: for address in snmp['listen_address']: # We only wan't to configure addresses that exist on the system. # Hint the user if they don't exist if 'vrf' in snmp: vrf_name = snmp['vrf'] if not is_addr_assigned(address, vrf_name) and address not in ['::1','127.0.0.1']: raise ConfigError(f'SNMP listen address "{address}" not configured in vrf "{vrf_name}"!') elif not is_addr_assigned(address): raise ConfigError(f'SNMP listen address "{address}" not configured in default vrf!') if 'trap_target' in snmp: for trap, trap_config in snmp['trap_target'].items(): if 'community' not in trap_config: raise ConfigError(f'Trap target "{trap}" requires a community to be set!') if 'oid_enable' in snmp: Warning(f'Custom OIDs are enabled and may lead to system instability and high resource consumption') verify_vrf(snmp) # bail out early if SNMP v3 is not configured if 'v3' not in snmp: return None if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if 'group' not in user_config: raise ConfigError(f'Group membership required for user "{user}"!') if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']: raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!') if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']: raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!') if 'group' in snmp['v3']: for group, group_config in snmp['v3']['group'].items(): if 'seclevel' not in group_config: raise ConfigError(f'Must configure "seclevel" for group "{group}"!') if 'view' not in group_config: raise ConfigError(f'Must configure "view" for group "{group}"!') # Check if 'view' exists view = group_config['view'] if 'view' not in snmp['v3'] or view not in snmp['v3']['view']: raise ConfigError(f'You must create view "{view}" first!') if 'view' in snmp['v3']: for view, view_config in snmp['v3']['view'].items(): if 'oid' not in view_config: raise ConfigError(f'Must configure an "oid" for view "{view}"!') if 'trap_target' in snmp['v3']: for trap, trap_config in snmp['v3']['trap_target'].items(): if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']: raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']): raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!') if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']: raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']): raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!') if 'type' not in trap_config: raise ConfigError('SNMP v3 trap "type" must be specified!') return None def generate(snmp): # As we are manipulating the snmpd user database we have to stop it first! # This is even save if service is going to be removed call(f'systemctl stop {systemd_service}') # Clean config files config_files = [config_file_client, config_file_daemon, config_file_access, config_file_user, systemd_override] for file in config_files: if os.path.isfile(file): os.unlink(file) if 'deleted' in snmp: return None if 'v3' in snmp: # SNMPv3 uses a hashed password. If CLI defines a plaintext password, # we will hash it in the background and replace the CLI node! if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if dict_search('auth.type', user_config) == 'sha': hash = plaintext_to_sha1 else: hash = plaintext_to_md5 if dict_search('auth.plaintext_password', user_config) is not None: tmp = hash(dict_search('auth.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp del snmp['v3']['user'][user]['auth']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'auth'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) if dict_search('privacy.plaintext_password', user_config) is not None: tmp = hash(dict_search('privacy.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp del snmp['v3']['user'][user]['privacy']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'privacy'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) # Write client config file render(config_file_client, 'snmp/etc.snmp.conf.j2', snmp) # Write server config file render(config_file_daemon, 'snmp/etc.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_access, 'snmp/usr.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_user, 'snmp/var.snmpd.conf.j2', snmp) # Write daemon configuration file render(systemd_override, 'snmp/override.conf.j2', snmp) return None def apply(snmp): # Always reload systemd manager configuration call('systemctl daemon-reload') if 'deleted' in snmp: return None # start SNMP daemon call(f'systemctl reload-or-restart {systemd_service}') - - # Enable AgentX in FRR - # This should be done for each daemon individually because common command - # works only if all the daemons started with SNMP support - # Following daemons from FRR 9.0/stable have SNMP module compiled in VyOS - frr_daemons_list = ['zebra', 'bgpd', 'ospf6d', 'ospfd', 'ripd', 'isisd', 'ldpd'] - for frr_daemon in frr_daemons_list: - call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null') - return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/system_ip.py b/src/conf_mode/system_ip.py index c8a91fd2f..86843eb78 100755 --- a/src/conf_mode/system_ip.py +++ b/src/conf_mode/system_ip.py @@ -1,146 +1,127 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_route_map -from vyos.template import render_to_string +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write -from vyos.configdep import set_dependents -from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['system', 'ip'] - - opt = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - with_recursive_defaults=True) - - # When working with FRR we need to know the corresponding address-family - opt['afi'] = 'ip' - - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - # Merge policy dict into "regular" config dict - opt = dict_merge(tmp, opt) # If IPv4 ARP table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ip'): + return None - return opt + opt = config_dict['ip'] + opt['policy'] = config_dict['policy'] -def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return -def generate(opt): - opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) - return +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None + +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ip'): + + return None + opt = config_dict['ip'] -def apply(opt): # Apply ARP threshold values # table_size has a default value - thus the key always exists size = int(dict_search('arp.table_size', opt)) # Amount upon reaching which the records begin to be cleared immediately sysctl_write('net.ipv4.neigh.default.gc_thresh3', size) # Amount after which the records begin to be cleaned after 5 seconds sysctl_write('net.ipv4.neigh.default.gc_thresh2', size // 2) # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv4.neigh.default.gc_thresh1', size // 8) - # enable/disable IPv4 forwarding - tmp = dict_search('disable_forwarding', opt) - value = '0' if (tmp != None) else '1' - write_file('/proc/sys/net/ipv4/conf/all/forwarding', value) - # configure multipath tmp = dict_search('multipath.ignore_unreachable_nexthops', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv4.fib_multipath_use_neigh', value) tmp = dict_search('multipath.layer4_hashing', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv4.fib_multipath_hash_policy', value) # configure TCP options (defaults as of Linux 6.4) tmp = dict_search('tcp.mss.probing', opt) if tmp is None: value = 0 elif tmp == 'on-icmp-black-hole': value = 1 elif tmp == 'force': value = 2 else: # Shouldn't happen raise ValueError("TCP MSS probing is neither 'on-icmp-black-hole' nor 'force'!") sysctl_write('net.ipv4.tcp_mtu_probing', value) tmp = dict_search('tcp.mss.base', opt) value = '1024' if (tmp is None) else tmp sysctl_write('net.ipv4.tcp_base_mss', value) tmp = dict_search('tcp.mss.floor', opt) value = '48' if (tmp is None) else tmp sysctl_write('net.ipv4.tcp_mtu_probe_floor', value) # During startup of vyos-router that brings up FRR, the service is not yet # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'no ip nht resolve-via-default') - frr_cfg.modify_section(r'ip protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - if 'frr_zebra_config' in opt: - frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() call_dependents() + return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/system_ipv6.py b/src/conf_mode/system_ipv6.py index a2442d009..593b8f7f3 100755 --- a/src/conf_mode/system_ipv6.py +++ b/src/conf_mode/system_ipv6.py @@ -1,130 +1,111 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2023 VyOS maintainers and contributors +# Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdep import set_dependents +from vyos.configdep import call_dependents +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_route_map -from vyos.template import render_to_string +from vyos.frrender import FRRender from vyos.utils.dict import dict_search from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active +from vyos.utils.process import is_systemd_service_running from vyos.utils.system import sysctl_write -from vyos.configdep import set_dependents -from vyos.configdep import call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['system', 'ipv6'] - - opt = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - with_recursive_defaults=True) - - # When working with FRR we need to know the corresponding address-family - opt['afi'] = 'ipv6' - - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - # Merge policy dict into "regular" config dict - opt = dict_merge(tmp, opt) # If IPv6 neighbor table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ipv6'): + return None - return opt + opt = config_dict['ipv6'] + opt['policy'] = config_dict['policy'] -def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return -def generate(opt): - opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) - return +def generate(config_dict): + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(config_dict) + return None + +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ipv6'): + return None + opt = config_dict['ipv6'] -def apply(opt): # configure multipath tmp = dict_search('multipath.layer4_hashing', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv6.fib_multipath_hash_policy', value) # Apply ND threshold values # table_size has a default value - thus the key always exists size = int(dict_search('neighbor.table_size', opt)) # Amount upon reaching which the records begin to be cleared immediately sysctl_write('net.ipv6.neigh.default.gc_thresh3', size) # Amount after which the records begin to be cleaned after 5 seconds sysctl_write('net.ipv6.neigh.default.gc_thresh2', size // 2) # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv6.neigh.default.gc_thresh1', size // 8) - # enable/disable IPv6 forwarding - tmp = dict_search('disable_forwarding', opt) - value = '0' if (tmp != None) else '1' - write_file('/proc/sys/net/ipv6/conf/all/forwarding', value) - # configure IPv6 strict-dad tmp = dict_search('strict_dad', opt) value = '2' if (tmp != None) else '1' for root, dirs, files in os.walk('/proc/sys/net/ipv6/conf'): for name in files: if name == 'accept_dad': write_file(os.path.join(root, name), value) # During startup of vyos-router that brings up FRR, the service is not yet # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(r'no ipv6 nht resolve-via-default') - frr_cfg.modify_section(r'ipv6 protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - if 'frr_zebra_config' in opt: - frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if config_dict and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() call_dependents() + return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 72b178c89..6533f493f 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -1,364 +1,358 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from jmespath import search from json import loads from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdict import get_frrender_dict from vyos.configdict import node_changed from vyos.configverify import verify_route_map from vyos.firewall import conntrack_required +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.template import render -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_vrf_tableid from vyos.utils.network import get_vrf_members from vyos.utils.network import interface_exists from vyos.utils.process import call from vyos.utils.process import cmd +from vyos.utils.process import is_systemd_service_running from vyos.utils.process import popen from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf' k_mod = ['vrf'] nftables_table = 'inet vrf_zones' nftables_rules = { 'vrf_zones_ct_in': 'counter ct original zone set iifname map @ct_iface_map', 'vrf_zones_ct_out': 'counter ct original zone set oifname map @ct_iface_map' } def has_rule(af : str, priority : int, table : str=None): """ Check if a given ip rule exists $ ip --json -4 rule show [{'l3mdev': None, 'priority': 1000, 'src': 'all'}, {'action': 'unreachable', 'l3mdev': None, 'priority': 2000, 'src': 'all'}, {'priority': 32765, 'src': 'all', 'table': 'local'}, {'priority': 32766, 'src': 'all', 'table': 'main'}, {'priority': 32767, 'src': 'all', 'table': 'default'}] """ if af not in ['-4', '-6']: raise ValueError() command = f'ip --detail --json {af} rule show' for tmp in loads(cmd(command)): if 'priority' in tmp and 'table' in tmp: if tmp['priority'] == priority and tmp['table'] == table: return True elif 'priority' in tmp and table in tmp: # l3mdev table has a different layout if tmp['priority'] == priority: return True return False def is_nft_vrf_zone_rule_setup() -> bool: """ Check if an nftables connection tracking rule already exists """ tmp = loads(cmd('sudo nft -j list table inet vrf_zones')) num_rules = len(search("nftables[].rule[].chain", tmp)) return bool(num_rules) def vrf_interfaces(c, match): matched = [] old_level = c.get_level() c.set_level(['interfaces']) section = c.get_config_dict([], get_first_key=True) for type in section: interfaces = section[type] for name in interfaces: interface = interfaces[name] if 'vrf' in interface: v = interface.get('vrf', '') if v == match: matched.append(name) c.set_level(old_level) return matched def vrf_routing(c, match): matched = [] old_level = c.get_level() c.set_level(['protocols', 'vrf']) if match in c.list_nodes([]): matched.append(match) c.set_level(old_level) return matched def get_config(config=None): if config: conf = config else: conf = Config() base = ['vrf'] vrf = conf.get_config_dict(base, key_mangling=('-', '_'), no_tag_node_value_mangle=True, get_first_key=True) # determine which VRF has been removed for name in node_changed(conf, base + ['name']): if 'vrf_remove' not in vrf: vrf.update({'vrf_remove' : {}}) vrf['vrf_remove'][name] = {} # get VRF bound interfaces interfaces = vrf_interfaces(conf, name) if interfaces: vrf['vrf_remove'][name]['interface'] = interfaces # get VRF bound routing instances routes = vrf_routing(conf, name) if routes: vrf['vrf_remove'][name]['route'] = routes if 'name' in vrf: vrf['conntrack'] = conntrack_required(conf) - # We also need the route-map information from the config - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], - get_first_key=True)}} - - # Merge policy dict into "regular" config dict - vrf = dict_merge(tmp, vrf) + # We need to merge the FRR rendering dict into the VRF dict + # this is required to get the route-map information to FRR + vrf.update({'frr_dict' : get_frrender_dict(conf)}) return vrf def verify(vrf): # ensure VRF is not assigned to any interface if 'vrf_remove' in vrf: for name, config in vrf['vrf_remove'].items(): if 'interface' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'member interfaces!') if 'route' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'static routes installed!') if 'name' in vrf: reserved_names = ["add", "all", "broadcast", "default", "delete", "dev", "get", "inet", "mtu", "link", "type", "vrf"] table_ids = [] + vnis = [] for name, vrf_config in vrf['name'].items(): # Reserved VRF names if name in reserved_names: raise ConfigError(f'VRF name "{name}" is reserved and connot be used!') # table id is mandatory if 'table' not in vrf_config: raise ConfigError(f'VRF "{name}" table id is mandatory!') # routing table id can't be changed - OS restriction if interface_exists(name): tmp = get_vrf_tableid(name) if tmp and tmp != int(vrf_config['table']): raise ConfigError(f'VRF "{name}" table id modification not possible!') # VRF routing table ID must be unique on the system if 'table' in vrf_config and vrf_config['table'] in table_ids: raise ConfigError(f'VRF "{name}" table id is not unique!') table_ids.append(vrf_config['table']) + # VRF VNIs must be unique on the system + if 'vni' in vrf_config: + vni = vrf_config['vni'] + if vni in vnis: + raise ConfigError(f'VRF "{name}" VNI "{vni}" is not unique!') + vnis.append(vni) + tmp = dict_search('ip.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: - verify_route_map(protocol_options['route_map'], vrf) + verify_route_map(protocol_options['route_map'], vrf['frr_dict']) tmp = dict_search('ipv6.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: - verify_route_map(protocol_options['route_map'], vrf) + verify_route_map(protocol_options['route_map'], vrf['frr_dict']) return None def generate(vrf): # Render iproute2 VR helper names render(config_file, 'iproute2/vrf.conf.j2', vrf) - # Render VRF Kernel/Zebra route-map filters - vrf['frr_zebra_config'] = render_to_string('frr/zebra.vrf.route-map.frr.j2', vrf) + + if 'frr_dict' in vrf and not is_systemd_service_running('vyos-configd.service'): + FRRender().generate(vrf['frr_dict']) return None def apply(vrf): # Documentation # # - https://github.com/torvalds/linux/blob/master/Documentation/networking/vrf.txt # - https://github.com/Mellanox/mlxsw/wiki/Virtual-Routing-and-Forwarding-(VRF) # - https://github.com/Mellanox/mlxsw/wiki/L3-Tunneling # - https://netdevconf.info/1.1/proceedings/slides/ahern-vrf-tutorial.pdf # - https://netdevconf.info/1.2/slides/oct6/02_ahern_what_is_l3mdev_slides.pdf # set the default VRF global behaviour bind_all = '0' if 'bind_to_all' in vrf: bind_all = '1' sysctl_write('net.ipv4.tcp_l3mdev_accept', bind_all) sysctl_write('net.ipv4.udp_l3mdev_accept', bind_all) for tmp in (dict_search('vrf_remove', vrf) or []): if interface_exists(tmp): # T5492: deleting a VRF instance may leafe processes running # (e.g. dhclient) as there is a depedency ordering issue in the CLI. # We need to ensure that we stop the dhclient processes first so # a proper DHCLP RELEASE message is sent for interface in get_vrf_members(tmp): vrf_iface = Interface(interface) vrf_iface.set_dhcp(False) vrf_iface.set_dhcpv6(False) # Remove nftables conntrack zone map item nft_del_element = f'delete element inet vrf_zones ct_iface_map {{ "{tmp}" }}' # Check if deleting is possible first to avoid raising errors _, err = popen(f'nft --check {nft_del_element}') if not err: # Remove map element cmd(f'nft {nft_del_element}') # Delete the VRF Kernel interface call(f'ip link delete dev {tmp}') if 'name' in vrf: # Linux routing uses rules to find tables - routing targets are then # looked up in those tables. If the lookup got a matching route, the # process ends. # # TL;DR; first table with a matching entry wins! # # You can see your routing table lookup rules using "ip rule", sadly the # local lookup is hit before any VRF lookup. Pinging an addresses from the # VRF will usually find a hit in the local table, and never reach the VRF # routing table - this is usually not what you want. Thus we will # re-arrange the tables and move the local lookup further down once VRFs # are enabled. # # Thanks to https://stbuehler.de/blog/article/2020/02/29/using_vrf__virtual_routing_and_forwarding__on_linux.html for afi in ['-4', '-6']: # move lookup local to pref 32765 (from 0) if not has_rule(afi, 32765, 'local'): call(f'ip {afi} rule add pref 32765 table local') if has_rule(afi, 0, 'local'): call(f'ip {afi} rule del pref 0') # make sure that in VRFs after failed lookup in the VRF specific table # nothing else is reached if not has_rule(afi, 1000, 'l3mdev'): # this should be added by the kernel when a VRF is created # add it here for completeness call(f'ip {afi} rule add pref 1000 l3mdev protocol kernel') # add another rule with an unreachable target which only triggers in VRF context # if a route could not be reached if not has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule add pref 2000 l3mdev unreachable') nft_vrf_zone_rule_setup = False for name, config in vrf['name'].items(): table = config['table'] if not interface_exists(name): # For each VRF apart from your default context create a VRF # interface with a separate routing table call(f'ip link add {name} type vrf table {table}') # set VRF description for e.g. SNMP monitoring vrf_if = Interface(name) # We also should add proper loopback IP addresses to the newly added # VRF for services bound to the loopback address (SNMP, NTP) vrf_if.add_addr('127.0.0.1/8') vrf_if.add_addr('::1/128') # add VRF description if available vrf_if.set_alias(config.get('description', '')) # Enable/Disable IPv4 forwarding tmp = dict_search('ip.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv4_forwarding(value) # Enable/Disable IPv6 forwarding tmp = dict_search('ipv6.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv6_forwarding(value) # Enable/Disable of an interface must always be done at the end of the # derived class to make use of the ref-counting set_admin_state() # function. We will only enable the interface if 'up' was called as # often as 'down'. This is required by some interface implementations # as certain parameters can only be changed when the interface is # in admin-down state. This ensures the link does not flap during # reconfiguration. state = 'down' if 'disable' in config else 'up' vrf_if.set_admin_state(state) # Add nftables conntrack zone map item nft_add_element = f'add element inet vrf_zones ct_iface_map {{ "{name}" : {table} }}' cmd(f'nft {nft_add_element}') # Only call into nftables as long as there is nothing setup to avoid wasting # CPU time and thus lenghten the commit process if not nft_vrf_zone_rule_setup: nft_vrf_zone_rule_setup = is_nft_vrf_zone_rule_setup() # Install nftables conntrack rules only once if vrf['conntrack'] and not nft_vrf_zone_rule_setup: for chain, rule in nftables_rules.items(): cmd(f'nft add rule inet vrf_zones {chain} {rule}') if 'name' not in vrf or not vrf['conntrack']: for chain, rule in nftables_rules.items(): cmd(f'nft flush chain inet vrf_zones {chain}') # Return default ip rule values if 'name' not in vrf: for afi in ['-4', '-6']: # move lookup local to pref 0 (from 32765) if not has_rule(afi, 0, 'local'): call(f'ip {afi} rule add pref 0 from all lookup local') if has_rule(afi, 32765, 'local'): call(f'ip {afi} rule del pref 32765 table local') if has_rule(afi, 1000, 'l3mdev'): call(f'ip {afi} rule del pref 1000 l3mdev protocol kernel') if has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule del pref 2000 l3mdev unreachable') - # Apply FRR filters - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) - frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True) - if 'frr_zebra_config' in vrf: - frr_cfg.add_before(frr.default_add_before, vrf['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + if 'frr_dict' in vrf and not is_systemd_service_running('vyos-configd.service'): + FRRender().apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/init/vyos-router b/src/init/vyos-router index e2e964656..00136309b 100755 --- a/src/init/vyos-router +++ b/src/init/vyos-router @@ -1,595 +1,596 @@ #!/bin/bash # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. . /lib/lsb/init-functions : ${vyatta_env:=/etc/default/vyatta} source $vyatta_env declare progname=${0##*/} declare action=$1; shift declare -x BOOTFILE=$vyatta_sysconfdir/config/config.boot declare -x DEFAULT_BOOTFILE=$vyatta_sysconfdir/config.boot.default declare -x VYCONF_CONFIG_DIR=/usr/libexec/vyos/vyconf/config # If vyos-config= boot option is present, use that file instead for x in $(cat /proc/cmdline); do [[ $x = vyos-config=* ]] || continue VYOS_CONFIG="${x#vyos-config=}" done if [ ! -z "$VYOS_CONFIG" ]; then if [ -r "$VYOS_CONFIG" ]; then echo "Config selected manually: $VYOS_CONFIG" declare -x BOOTFILE="$VYOS_CONFIG" else echo "WARNING: Could not read selected config file, using default!" fi fi declare -a subinit declare -a all_subinits=( firewall ) if [ $# -gt 0 ] ; then for s in $@ ; do [ -x ${vyatta_sbindir}/${s}.init ] && subinit[${#subinit}]=$s done else for s in ${all_subinits[@]} ; do [ -x ${vyatta_sbindir}/${s}.init ] && subinit[${#subinit}]=$s done fi GROUP=vyattacfg # easy way to make empty file without any command empty() { >$1 } # check if bootup of this portion is disabled disabled () { grep -q -w no-vyos-$1 /proc/cmdline } # Load encrypted config volume mount_encrypted_config() { persist_path=$(/opt/vyatta/sbin/vyos-persistpath) if [ $? == 0 ]; then if [ -e $persist_path/boot ]; then image_name=$(cat /proc/cmdline | sed -e s+^.*vyos-union=/boot/++ | sed -e 's/ .*$//') if [ -z "$image_name" ]; then return fi if [ ! -f $persist_path/luks/$image_name ]; then return fi vyos_tpm_key=$(python3 -c 'from vyos.tpm import read_tpm_key; print(read_tpm_key().decode())' 2>/dev/null) if [ $? -ne 0 ]; then echo "ERROR: Failed to fetch encryption key from TPM. Encrypted config volume has not been mounted" echo "Use 'encryption load' to load volume with recovery key" echo "or 'encryption disable' to decrypt volume with recovery key" return fi echo $vyos_tpm_key | tr -d '\r\n' | cryptsetup open $persist_path/luks/$image_name vyos_config --key-file=- if [ $? -ne 0 ]; then echo "ERROR: Failed to decrypt config volume. Encrypted config volume has not been mounted" echo "Use 'encryption load' to load volume with recovery key" echo "or 'encryption disable' to decrypt volume with recovery key" return fi mount /dev/mapper/vyos_config /config mount /dev/mapper/vyos_config $vyatta_sysconfdir/config echo "Mounted encrypted config volume" fi fi } unmount_encrypted_config() { persist_path=$(/opt/vyatta/sbin/vyos-persistpath) if [ $? == 0 ]; then if [ -e $persist_path/boot ]; then image_name=$(cat /proc/cmdline | sed -e s+^.*vyos-union=/boot/++ | sed -e 's/ .*$//') if [ -z "$image_name" ]; then return fi if [ ! -f $persist_path/luks/$image_name ]; then return fi umount /config umount $vyatta_sysconfdir/config cryptsetup close vyos_config fi fi } # if necessary, provide initial config init_bootfile () { # define and version default boot config if not present if [ ! -r $DEFAULT_BOOTFILE ]; then if [ -f $vyos_data_dir/config.boot.default ]; then cp $vyos_data_dir/config.boot.default $DEFAULT_BOOTFILE $vyos_libexec_dir/add-system-version.py >> $DEFAULT_BOOTFILE fi fi if [ ! -r $BOOTFILE ] ; then if [ -f $DEFAULT_BOOTFILE ]; then cp $DEFAULT_BOOTFILE $BOOTFILE else $vyos_libexec_dir/add-system-version.py > $BOOTFILE fi chgrp ${GROUP} $BOOTFILE chmod 660 $BOOTFILE fi if [ -d $VYCONF_CONFIG_DIR ] ; then cp -f $BOOTFILE $VYCONF_CONFIG_DIR/config.boot cp -f $DEFAULT_BOOTFILE $VYCONF_CONFIG_DIR/config.failsafe fi } # if necessary, migrate initial config migrate_bootfile () { if [ -x $vyos_libexec_dir/run-config-migration.py ]; then log_progress_msg migrate sg ${GROUP} -c "$vyos_libexec_dir/run-config-migration.py $BOOTFILE" # update vyconf copy after migration if [ -d $VYCONF_CONFIG_DIR ] ; then cp -f $BOOTFILE $VYCONF_CONFIG_DIR/config.boot fi fi } # configure system-specific settings system_config () { if [ -x $vyos_libexec_dir/run-config-activation.py ]; then log_progress_msg system sg ${GROUP} -c "$vyos_libexec_dir/run-config-activation.py $BOOTFILE" fi } # load the initial config load_bootfile () { log_progress_msg configure ( if [ -f /etc/default/vyatta-load-boot ]; then # build-specific environment for boot-time config loading source /etc/default/vyatta-load-boot fi if [ -x $vyos_libexec_dir/vyos-boot-config-loader.py ]; then sg ${GROUP} -c "$vyos_libexec_dir/vyos-boot-config-loader.py $BOOTFILE" fi ) } # restore if missing pre-config script restore_if_missing_preconfig_script () { if [ ! -x ${vyatta_sysconfdir}/config/scripts/vyos-preconfig-bootup.script ]; then mkdir -p ${vyatta_sysconfdir}/config/scripts chgrp ${GROUP} ${vyatta_sysconfdir}/config/scripts chmod 775 ${vyatta_sysconfdir}/config/scripts cp ${vyos_rootfs_dir}/opt/vyatta/etc/config/scripts/vyos-preconfig-bootup.script ${vyatta_sysconfdir}/config/scripts/ chgrp ${GROUP} ${vyatta_sysconfdir}/config/scripts/vyos-preconfig-bootup.script chmod 750 ${vyatta_sysconfdir}/config/scripts/vyos-preconfig-bootup.script fi } # execute the pre-config script run_preconfig_script () { if [ -x $vyatta_sysconfdir/config/scripts/vyos-preconfig-bootup.script ]; then $vyatta_sysconfdir/config/scripts/vyos-preconfig-bootup.script fi } # restore if missing post-config script restore_if_missing_postconfig_script () { if [ ! -x ${vyatta_sysconfdir}/config/scripts/vyos-postconfig-bootup.script ]; then mkdir -p ${vyatta_sysconfdir}/config/scripts chgrp ${GROUP} ${vyatta_sysconfdir}/config/scripts chmod 775 ${vyatta_sysconfdir}/config/scripts cp ${vyos_rootfs_dir}/opt/vyatta/etc/config/scripts/vyos-postconfig-bootup.script ${vyatta_sysconfdir}/config/scripts/ chgrp ${GROUP} ${vyatta_sysconfdir}/config/scripts/vyos-postconfig-bootup.script chmod 750 ${vyatta_sysconfdir}/config/scripts/vyos-postconfig-bootup.script fi } # execute the post-config scripts run_postconfig_scripts () { if [ -x $vyatta_sysconfdir/config/scripts/vyatta-postconfig-bootup.script ]; then $vyatta_sysconfdir/config/scripts/vyatta-postconfig-bootup.script fi if [ -x $vyatta_sysconfdir/config/scripts/vyos-postconfig-bootup.script ]; then $vyatta_sysconfdir/config/scripts/vyos-postconfig-bootup.script fi } run_postupgrade_script () { if [ -f $vyatta_sysconfdir/config/.upgraded ]; then # Run the system script /usr/libexec/vyos/system/post-upgrade # Run user scripts if [ -d $vyatta_sysconfdir/config/scripts/post-upgrade.d ]; then run-parts $vyatta_sysconfdir/config/scripts/post-upgrade.d fi rm -f $vyatta_sysconfdir/config/.upgraded fi } # # On image booted machines, we need to mount /boot from the image-specific # boot directory so that kernel package installation will put the # files in the right place. We also have to mount /boot/grub from the # system-wide grub directory so that tools that edit the grub.cfg # file will find it in the expected location. # bind_mount_boot () { persist_path=$(/opt/vyatta/sbin/vyos-persistpath) if [ $? == 0 ]; then if [ -e $persist_path/boot ]; then image_name=$(cat /proc/cmdline | sed -e s+^.*vyos-union=/boot/++ | sed -e 's/ .*$//') if [ -n "$image_name" ]; then mount --bind $persist_path/boot/$image_name /boot if [ $? -ne 0 ]; then echo "Couldn't bind mount /boot" fi if [ ! -d /boot/grub ]; then mkdir /boot/grub fi mount --bind $persist_path/boot/grub /boot/grub if [ $? -ne 0 ]; then echo "Couldn't bind mount /boot/grub" fi fi fi fi } clear_or_override_config_files () { for conf in snmp/snmpd.conf snmp/snmptrapd.conf snmp/snmp.conf \ keepalived/keepalived.conf cron.d/vyos-crontab \ ipvsadm.rules default/ipvsadm resolv.conf do if [ -s /etc/$conf ] ; then empty /etc/$conf chmod 0644 /etc/$conf fi done } update_interface_config () { if [ -d /run/udev/vyos ]; then $vyos_libexec_dir/vyos-interface-rescan.py $BOOTFILE fi } cleanup_post_commit_hooks () { # Remove links from the post-commit hooks directory. # note that this approach only supports hooks that are "configured", # i.e., it does not support hooks that need to always be present. cpostdir=$(cli-shell-api getPostCommitHookDir) # exclude commit hooks that need to always be present excluded="00vyos-sync 10vyatta-log-commit.pl 99vyos-user-postcommit-hooks" if [ -d "$cpostdir" ]; then for f in $cpostdir/*; do if [[ ! $excluded =~ $(basename $f) ]]; then rm -f $cpostdir/$(basename $f) fi done fi } # These are all the default security setting which are later # overridden when configuration is read. These are the values the # system defaults. security_reset () { # restore NSS cofniguration back to sane system defaults # will be overwritten later when configuration is loaded cat <<EOF >/etc/nsswitch.conf passwd: files group: files shadow: files gshadow: files # Per T2678, commenting out myhostname hosts: files dns #myhostname networks: files protocols: db files services: db files ethers: db files rpc: db files netgroup: nis EOF # restore PAM back to virgin state (no radius/tacacs services) pam-auth-update --disable radius-mandatory radius-optional rm -f /etc/pam_radius_auth.conf pam-auth-update --disable tacplus-mandatory tacplus-optional rm -f /etc/tacplus_nss.conf /etc/tacplus_servers # and no Google authenticator for 2FA/MFA pam-auth-update --disable mfa-google-authenticator # Certain configuration files are re-generated by the configuration # subsystem and must reside under /etc and can not easily be moved to /run. # So on every boot we simply delete any remaining files and let the CLI # regenearte them. # PPPoE rm -f /etc/ppp/peers/pppoe* /etc/ppp/peers/wlm* # IPSec rm -rf /etc/ipsec.conf /etc/ipsec.secrets find /etc/swanctl -type f | xargs rm -f # limit cleanup rm -f /etc/security/limits.d/10-vyos.conf # iproute2 cleanup rm -f /etc/iproute2/rt_tables.d/vyos-*.conf # Container rm -f /etc/containers/storage.conf /etc/containers/registries.conf /etc/containers/containers.conf # Clean all networks and re-create them from our CLI rm -f /etc/containers/networks/* # System Options (SSH/cURL) rm -f /etc/ssh/ssh_config.d/*vyos*.conf rm -f /etc/curlrc } # XXX: T3885 - generate persistend DHCPv6 DUID (Type4 - UUID based) gen_duid () { DUID_FILE="/var/lib/dhcpv6/dhcp6c_duid" UUID_FILE="/sys/class/dmi/id/product_uuid" UUID_FILE_ALT="/sys/class/dmi/id/product_serial" if [ ! -f ${UUID_FILE} ] && [ ! -f ${UUID_FILE_ALT} ]; then return 1 fi # DUID is based on the BIOS/EFI UUID. We omit additional - characters if [ -f ${UUID_FILE} ]; then UUID=$(cat ${UUID_FILE} | tr -d -) fi if [ -z ${UUID} ]; then UUID=$(uuidgen --sha1 --namespace @dns --name $(cat ${UUID_FILE_ALT}) | tr -d -) fi # Add DUID type4 (UUID) information DUID_TYPE="0004" # The length-information (as per RFC6355 UUID is 128 bits long) is in big-endian # format - beware when porting to ARM64. The length field consists out of the # UUID (128 bit + 16 bits DUID type) resulting in hex 12. DUID_LEN="0012" if [ "$(echo -n I | od -to2 | head -n1 | cut -f2 -d" " | cut -c6 )" -eq 1 ]; then # true on little-endian (x86) systems DUID_LEN="1200" fi for i in $(echo -n ${DUID_LEN}${DUID_TYPE}${UUID} | sed 's/../& /g'); do echo -ne "\x$i" done > ${DUID_FILE} } start () { # reset and clean config files security_reset || log_failure_msg "security reset failed" # some legacy directories migrated over from old rl-system.init mkdir -p /var/run/vyatta /var/log/vyatta chgrp vyattacfg /var/run/vyatta /var/log/vyatta chmod 775 /var/run/vyatta /var/log/vyatta log_daemon_msg "Waiting for NICs to settle down" # On boot time udev migth take a long time to reorder nic's, this will ensure that # all udev activity is completed and all nics presented at boot-time will have their # final name before continuing with vyos-router initialization. SECONDS=0 udevadm settle STATUS=$? log_progress_msg "settled in ${SECONDS}sec." log_end_msg ${STATUS} # mountpoint for bpf maps required by xdp mount -t bpf none /sys/fs/bpf # Clear out Debian APT source config file empty /etc/apt/sources.list # Generate DHCPv6 DUID gen_duid || log_failure_msg "could not generate DUID" # Mount a temporary filesystem for container networks. # Configuration should be loaded from VyOS cli. cni_dir="/etc/cni/net.d" [ ! -d ${cni_dir} ] && mkdir -p ${cni_dir} mount -t tmpfs none ${cni_dir} # Init firewall nfct helper add rpc inet tcp nfct helper add rpc inet udp nfct helper add tns inet tcp nfct helper add rpc inet6 tcp nfct helper add rpc inet6 udp nfct helper add tns inet6 tcp nft --file /usr/share/vyos/vyos-firewall-init.conf || log_failure_msg "could not initiate firewall rules" # As VyOS does not execute commands that are not present in the CLI we call # the script by hand to have a single source for the login banner and MOTD ${vyos_conf_scripts_dir}/system_console.py || log_failure_msg "could not reset serial console" ${vyos_conf_scripts_dir}/system_login_banner.py || log_failure_msg "could not reset motd and issue files" ${vyos_conf_scripts_dir}/system_option.py || log_failure_msg "could not reset system option files" ${vyos_conf_scripts_dir}/system_ip.py || log_failure_msg "could not reset system IPv4 options" ${vyos_conf_scripts_dir}/system_ipv6.py || log_failure_msg "could not reset system IPv6 options" ${vyos_conf_scripts_dir}/system_conntrack.py || log_failure_msg "could not reset conntrack subsystem" ${vyos_conf_scripts_dir}/container.py || log_failure_msg "could not reset container subsystem" clear_or_override_config_files || log_failure_msg "could not reset config files" # enable some debugging before loading the configuration if grep -q vyos-debug /proc/cmdline; then log_action_begin_msg "Enable runtime debugging options" + FRR_DEBUG=$(python3 -c "from vyos.defaults import frr_debug_enable; print(frr_debug_enable)") + touch $FRR_DEBUG touch /tmp/vyos.container.debug touch /tmp/vyos.ifconfig.debug - touch /tmp/vyos.frr.debug touch /tmp/vyos.container.debug touch /tmp/vyos.smoketest.debug fi # Cleanup PKI CAs if [ -d /usr/local/share/ca-certificates/vyos ]; then rm -f /usr/local/share/ca-certificates/vyos/*.crt update-ca-certificates >/dev/null 2>&1 fi log_action_begin_msg "Mounting VyOS Config" # ensure the vyatta_configdir supports a large number of inodes since # the config hierarchy is often inode-bound (instead of size). # impose a minimum and then scale up dynamically with the actual size # of the system memory. local tmem=$(sed -n 's/^MemTotal: \+\([0-9]\+\) kB$/\1/p' /proc/meminfo) local tpages local tmpfs_opts="nosuid,nodev,mode=775,nr_inodes=0" #automatically allocate inodes mount -o $tmpfs_opts -t tmpfs none ${vyatta_configdir} \ && chgrp ${GROUP} ${vyatta_configdir} log_action_end_msg $? mount_encrypted_config # T5239: early read of system hostname as this value is read-only once during # FRR initialisation tmp=$(${vyos_libexec_dir}/read-saved-value.py --path "system host-name") hostnamectl set-hostname --static "$tmp" ${vyos_conf_scripts_dir}/system_frr.py || log_failure_msg "could not reset FRR config" # If for any reason FRR was not started by system_frr.py - start it anyways. # This is a safety net! systemctl start frr.service disabled bootfile || init_bootfile cleanup_post_commit_hooks log_daemon_msg "Starting VyOS router" disabled migrate || migrate_bootfile restore_if_missing_preconfig_script run_preconfig_script run_postupgrade_script update_interface_config disabled system_config || system_config systemctl start vyconfd.service for s in ${subinit[@]} ; do if ! disabled $s; then log_progress_msg $s if ! ${vyatta_sbindir}/${s}.init start then log_failure_msg exit 1 fi fi done bind_mount_boot disabled configure || load_bootfile log_end_msg $? telinit q chmod g-w,o-w / restore_if_missing_postconfig_script run_postconfig_scripts tmp=$(${vyos_libexec_dir}/read-saved-value.py --path "protocols rpki cache") if [[ ! -z "$tmp" ]]; then vtysh -c "rpki start" fi } stop() { local -i status=0 log_daemon_msg "Stopping VyOS router" for ((i=${#sub_inits[@]} - 1; i >= 0; i--)) ; do s=${subinit[$i]} log_progress_msg $s ${vyatta_sbindir}/${s}.init stop let status\|=$? done log_end_msg $status log_action_begin_msg "Un-mounting VyOS Config" umount ${vyatta_configdir} log_action_end_msg $? systemctl stop vyconfd.service systemctl stop frr.service unmount_encrypted_config } case "$action" in start) start ;; stop) stop ;; restart|force-reload) stop && start ;; *) log_failure_msg "usage: $progname [ start|stop|restart ] [ subinit ... ]" ; false ;; esac exit $? # Local Variables: # mode: shell-script # sh-indentation: 4 # End: diff --git a/src/migration-scripts/quagga/11-to-12 b/src/migration-scripts/quagga/11-to-12 new file mode 100644 index 000000000..8ae2023a1 --- /dev/null +++ b/src/migration-scripts/quagga/11-to-12 @@ -0,0 +1,75 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +# T6747: +# - Migrate static BFD configuration to match FRR possibillities +# - Consolidate static multicast routing configuration under a new node + +from vyos.configtree import ConfigTree + +static_base = ['protocols', 'static'] + +def migrate(config: ConfigTree) -> None: + # Check for static route/route6 configuration + # Migrate static BFD configuration to match FRR possibillities + for route_route6 in ['route', 'route6']: + route_route6_base = static_base + [route_route6] + if not config.exists(route_route6_base): + continue + + for prefix in config.list_nodes(route_route6_base): + next_hop_base = route_route6_base + [prefix, 'next-hop'] + if not config.exists(next_hop_base): + continue + + for next_hop in config.list_nodes(next_hop_base): + multi_hop_base = next_hop_base + [next_hop, 'bfd', 'multi-hop'] + + if not config.exists(multi_hop_base): + continue + + mh_source_base = multi_hop_base + ['source'] + source = None + profile = None + for src_ip in config.list_nodes(mh_source_base): + source = src_ip + if config.exists(mh_source_base + [source, 'profile']): + profile = config.return_value(mh_source_base + [source, 'profile']) + # FRR only supports one source, we will use the first one + break + + config.delete(multi_hop_base) + config.set(multi_hop_base + ['source-address'], value=source) + config.set(next_hop_base + [next_hop, 'bfd', 'profile'], value=profile) + + # Consolidate static multicast routing configuration under a new node + if config.exists(static_base + ['multicast']): + for mroute in ['interface-route', 'route']: + mroute_base = static_base + ['multicast', mroute] + if not config.exists(mroute_base): + continue + config.set(static_base + ['mroute']) + config.set_tag(static_base + ['mroute']) + for route in config.list_nodes(mroute_base): + config.copy(mroute_base + [route], static_base + ['mroute', route]) + + mroute_base = static_base + ['mroute'] + if config.exists(mroute_base): + for mroute in config.list_nodes(mroute_base): + interface_path = mroute_base + [mroute, 'next-hop-interface'] + if config.exists(interface_path): + config.rename(interface_path, 'interface') + + config.delete(static_base + ['multicast']) diff --git a/src/services/vyos-configd b/src/services/vyos-configd index d977ba2cb..ecad85801 100755 --- a/src/services/vyos-configd +++ b/src/services/vyos-configd @@ -1,330 +1,338 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # pylint: disable=redefined-outer-name import os import sys import grp import re import json import typing import logging import signal import traceback import importlib.util import io from contextlib import redirect_stdout import zmq from vyos.defaults import directories from vyos.utils.boot import boot_configuration_complete from vyos.configsource import ConfigSourceString from vyos.configsource import ConfigSourceError from vyos.configdiff import get_commit_scripts from vyos.config import Config +from vyos.frrender import FRRender from vyos import ConfigError CFG_GROUP = 'vyattacfg' script_stdout_log = '/tmp/vyos-configd-script-stdout' debug = True logger = logging.getLogger(__name__) logs_handler = logging.StreamHandler() logger.addHandler(logs_handler) if debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) SOCKET_PATH = 'ipc:///run/vyos-configd.sock' MAX_MSG_SIZE = 65535 PAD_MSG_SIZE = 6 # Response error codes R_SUCCESS = 1 R_ERROR_COMMIT = 2 R_ERROR_DAEMON = 4 R_PASS = 8 vyos_conf_scripts_dir = directories['conf_mode'] configd_include_file = os.path.join(directories['data'], 'configd-include.json') configd_env_set_file = os.path.join(directories['data'], 'vyos-configd-env-set') configd_env_unset_file = os.path.join(directories['data'], 'vyos-configd-env-unset') # sourced on entering config session configd_env_file = '/etc/default/vyos-configd-env' def key_name_from_file_name(f): return os.path.splitext(f)[0] def module_name_from_key(k): return k.replace('-', '_') def path_from_file_name(f): return os.path.join(vyos_conf_scripts_dir, f) # opt-in to be run by daemon with open(configd_include_file) as f: try: include = json.load(f) except OSError as e: logger.critical(f'configd include file error: {e}') sys.exit(1) except json.JSONDecodeError as e: logger.critical(f'JSON load error: {e}') sys.exit(1) # import conf_mode scripts (_, _, filenames) = next(iter(os.walk(vyos_conf_scripts_dir))) filenames.sort() load_filenames = [f for f in filenames if f in include] imports = [key_name_from_file_name(f) for f in load_filenames] module_names = [module_name_from_key(k) for k in imports] paths = [path_from_file_name(f) for f in load_filenames] to_load = list(zip(module_names, paths)) modules = [] for x in to_load: spec = importlib.util.spec_from_file_location(x[0], x[1]) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) modules.append(module) conf_mode_scripts = dict(zip(imports, modules)) exclude_set = {key_name_from_file_name(f) for f in filenames if f not in include} include_set = {key_name_from_file_name(f) for f in filenames if f in include} def write_stdout_log(file_name, msg): if boot_configuration_complete(): return with open(file_name, 'a') as f: f.write(msg) def run_script(script_name, config, args) -> tuple[int, str]: # pylint: disable=broad-exception-caught script = conf_mode_scripts[script_name] script.argv = args config.set_level([]) try: c = script.get_config(config) script.verify(c) script.generate(c) script.apply(c) except ConfigError as e: logger.error(e) return R_ERROR_COMMIT, str(e) except Exception: tb = traceback.format_exc() logger.error(tb) return R_ERROR_COMMIT, tb return R_SUCCESS, '' def initialization(socket): # pylint: disable=broad-exception-caught,too-many-locals # Reset config strings: active_string = '' session_string = '' # check first for resent init msg, in case of client timeout while True: msg = socket.recv().decode('utf-8', 'ignore') try: message = json.loads(msg) if message['type'] == 'init': resp = 'init' socket.send(resp.encode()) except Exception: break # zmq synchronous for ipc from single client: active_string = msg resp = 'active' socket.send(resp.encode()) session_string = socket.recv().decode('utf-8', 'ignore') resp = 'session' socket.send(resp.encode()) pid_string = socket.recv().decode('utf-8', 'ignore') resp = 'pid' socket.send(resp.encode()) sudo_user_string = socket.recv().decode('utf-8', 'ignore') resp = 'sudo_user' socket.send(resp.encode()) temp_config_dir_string = socket.recv().decode('utf-8', 'ignore') resp = 'temp_config_dir' socket.send(resp.encode()) changes_only_dir_string = socket.recv().decode('utf-8', 'ignore') resp = 'changes_only_dir' socket.send(resp.encode()) logger.debug(f'config session pid is {pid_string}') logger.debug(f'config session sudo_user is {sudo_user_string}') os.environ['SUDO_USER'] = sudo_user_string if temp_config_dir_string: os.environ['VYATTA_TEMP_CONFIG_DIR'] = temp_config_dir_string if changes_only_dir_string: os.environ['VYATTA_CHANGES_ONLY_DIR'] = changes_only_dir_string try: configsource = ConfigSourceString(running_config_text=active_string, session_config_text=session_string) except ConfigSourceError as e: logger.debug(e) return None config = Config(config_source=configsource) dependent_func: dict[str, list[typing.Callable]] = {} setattr(config, 'dependent_func', dependent_func) commit_scripts = get_commit_scripts(config) logger.debug(f'commit_scripts: {commit_scripts}') scripts_called = [] setattr(config, 'scripts_called', scripts_called) + if not hasattr(config, 'frrender_cls'): + setattr(config, 'frrender_cls', FRRender()) + return config def process_node_data(config, data, _last: bool = False) -> tuple[int, str]: if not config: out = 'Empty config' logger.critical(out) return R_ERROR_DAEMON, out script_name = None os.environ['VYOS_TAGNODE_VALUE'] = '' args = [] config.dependency_list.clear() res = re.match(r'^(VYOS_TAGNODE_VALUE=[^/]+)?.*\/([^/]+).py(.*)', data) if res.group(1): env = res.group(1).split('=') os.environ[env[0]] = env[1] if res.group(2): script_name = res.group(2) if not script_name: out = 'Missing script_name' logger.critical(out) return R_ERROR_DAEMON, out if res.group(3): args = res.group(3).split() args.insert(0, f'{script_name}.py') tag_value = os.getenv('VYOS_TAGNODE_VALUE', '') tag_ext = f'_{tag_value}' if tag_value else '' script_record = f'{script_name}{tag_ext}' scripts_called = getattr(config, 'scripts_called', []) scripts_called.append(script_record) if script_name not in include_set: return R_PASS, '' with redirect_stdout(io.StringIO()) as o: result, err_out = run_script(script_name, config, args) amb_out = o.getvalue() o.close() out = amb_out + err_out return result, out def send_result(sock, err, msg): msg = msg if msg else '' msg_size = min(MAX_MSG_SIZE, len(msg)) err_rep = err.to_bytes(1) msg_size_rep = f'{msg_size:#0{PAD_MSG_SIZE}x}' logger.debug(f'Sending reply: error_code {err} with output') sock.send_multipart([err_rep, msg_size_rep.encode(), msg.encode()]) write_stdout_log(script_stdout_log, msg) def remove_if_file(f: str): try: os.remove(f) except FileNotFoundError: pass def shutdown(): remove_if_file(configd_env_file) os.symlink(configd_env_unset_file, configd_env_file) sys.exit(0) if __name__ == '__main__': context = zmq.Context() socket = context.socket(zmq.REP) # Set the right permissions on the socket, then change it back o_mask = os.umask(0) socket.bind(SOCKET_PATH) os.umask(o_mask) cfg_group = grp.getgrnam(CFG_GROUP) os.setgid(cfg_group.gr_gid) os.environ['VYOS_CONFIGD'] = 't' def sig_handler(signum, frame): # pylint: disable=unused-argument shutdown() signal.signal(signal.SIGTERM, sig_handler) signal.signal(signal.SIGINT, sig_handler) # Define the vyshim environment variable remove_if_file(configd_env_file) os.symlink(configd_env_set_file, configd_env_file) config = None while True: # Wait for next request from client msg = socket.recv().decode() logger.debug(f'Received message: {msg}') message = json.loads(msg) if message['type'] == 'init': resp = 'init' socket.send(resp.encode()) config = initialization(socket) elif message['type'] == 'node': res, out = process_node_data(config, message['data'], message['last']) send_result(socket, res, out) if message['last'] and config: scripts_called = getattr(config, 'scripts_called', []) logger.debug(f'scripts_called: {scripts_called}') + + if hasattr(config, 'frrender_cls') and res == R_SUCCESS: + frrender_cls = getattr(config, 'frrender_cls') + frrender_cls.apply() else: logger.critical(f'Unexpected message: {message}') diff --git a/src/tests/test_initial_setup.py b/src/tests/test_initial_setup.py index 4cd5fb169..7737f9df5 100644 --- a/src/tests/test_initial_setup.py +++ b/src/tests/test_initial_setup.py @@ -1,99 +1,99 @@ # Copyright (C) 2018-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest import vyos.configtree import vyos.initialsetup as vis from unittest import TestCase from vyos.xml_ref import definition from vyos.xml_ref.pkg_cache.vyos_1x_cache import reference class TestInitialSetup(TestCase): def setUp(self): with open('tests/data/config.boot.default', 'r') as f: config_string = f.read() self.config = vyos.configtree.ConfigTree(config_string) self.xml = definition.Xml() self.xml.define(reference) def test_set_user_password(self): vis.set_user_password(self.config, 'vyos', 'vyosvyos') # Old password hash from the default config old_pw = '$6$QxPS.uk6mfo$9QBSo8u1FkH16gMyAVhus6fU3LOzvLR9Z9.82m3tiHFAxTtIkhaZSWssSgzt4v4dGAL8rhVQxTg0oAG9/q11h/' new_pw = self.config.return_value(["system", "login", "user", "vyos", "authentication", "encrypted-password"]) # Just check it changed the hash, don't try to check if hash is good self.assertNotEqual(old_pw, new_pw) def test_disable_user_password(self): vis.disable_user_password(self.config, 'vyos') new_pw = self.config.return_value(["system", "login", "user", "vyos", "authentication", "encrypted-password"]) self.assertEqual(new_pw, '!') def test_set_ssh_key_with_name(self): test_ssh_key = " ssh-rsa fakedata vyos@vyos " vis.set_user_ssh_key(self.config, 'vyos', test_ssh_key) key_type = self.config.return_value(["system", "login", "user", "vyos", "authentication", "public-keys", "vyos@vyos", "type"]) key_data = self.config.return_value(["system", "login", "user", "vyos", "authentication", "public-keys", "vyos@vyos", "key"]) self.assertEqual(key_type, 'ssh-rsa') self.assertEqual(key_data, 'fakedata') self.assertTrue(self.xml.is_tag(["system", "login", "user", "vyos", "authentication", "public-keys"])) def test_set_ssh_key_without_name(self): # If key file doesn't include a name, the function will use user name for the key name test_ssh_key = " ssh-rsa fakedata " vis.set_user_ssh_key(self.config, 'vyos', test_ssh_key) key_type = self.config.return_value(["system", "login", "user", "vyos", "authentication", "public-keys", "vyos", "type"]) key_data = self.config.return_value(["system", "login", "user", "vyos", "authentication", "public-keys", "vyos", "key"]) self.assertEqual(key_type, 'ssh-rsa') self.assertEqual(key_data, 'fakedata') self.assertTrue(self.xml.is_tag(["system", "login", "user", "vyos", "authentication", "public-keys"])) def test_create_user(self): vis.create_user(self.config, 'jrandomhacker', password='qwerty', key=" ssh-rsa fakedata jrandomhacker@foovax ") self.assertTrue(self.config.exists(["system", "login", "user", "jrandomhacker"])) self.assertTrue(self.config.exists(["system", "login", "user", "jrandomhacker", "authentication", "public-keys", "jrandomhacker@foovax"])) self.assertTrue(self.config.exists(["system", "login", "user", "jrandomhacker", "authentication", "encrypted-password"])) self.assertEqual(self.config.return_value(["system", "login", "user", "jrandomhacker", "level"]), "admin") def test_set_hostname(self): vis.set_host_name(self.config, "vyos-test") self.assertEqual(self.config.return_value(["system", "host-name"]), "vyos-test") def test_set_name_servers(self): vis.set_name_servers(self.config, ["192.0.2.10", "203.0.113.20"]) servers = self.config.return_values(["system", "name-server"]) self.assertIn("192.0.2.10", servers) self.assertIn("203.0.113.20", servers) def test_set_gateway(self): vis.set_default_gateway(self.config, '192.0.2.1') self.assertTrue(self.config.exists(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop', '192.0.2.1'])) - self.assertTrue(self.xml.is_tag(['protocols', 'static', 'multicast', 'route', '0.0.0.0/0', 'next-hop'])) - self.assertTrue(self.xml.is_tag(['protocols', 'static', 'multicast', 'route'])) + self.assertTrue(self.xml.is_tag(['protocols', 'static', 'mroute', '0.0.0.0/0', 'next-hop'])) + self.assertTrue(self.xml.is_tag(['protocols', 'static', 'mroute'])) if __name__ == "__main__": unittest.main()