diff --git a/doc/source/ovn/index.rst b/doc/source/ovn/index.rst index 9a4eeedc620..b2973e5e518 100644 --- a/doc/source/ovn/index.rst +++ b/doc/source/ovn/index.rst @@ -13,3 +13,4 @@ OVN Driver ml2ovn_trace.rst faq/index.rst ovn_agent.rst + virtual_ips.rst diff --git a/doc/source/ovn/virtual_ips.rst b/doc/source/ovn/virtual_ips.rst new file mode 100644 index 00000000000..f7255205258 --- /dev/null +++ b/doc/source/ovn/virtual_ips.rst @@ -0,0 +1,193 @@ +.. _ovn_virtual_ips: + +Virtual IPs +=========== + +It is common practice to create an unbound port in a Neutron network to +allocate (reserve) an IP address that will be used as a Virtual IP (VIP) +by other ports in the same network. Such IP addresses are then added as +``allowed_address_pairs`` to the ports used by Virtual Machines. + +Applications, such as keepalived, running inside these Virtual Machines can +then configure the VIP on one of the VMs and move it between VMs dynamically. + +Implementation in OVN +~~~~~~~~~~~~~~~~~~~~~ + +For Virtual IP addresses to work properly in the OVN backend, Neutron needs to +mark the ``Logical Switch Port`` corresponding to the port with the Virtual IP +as ``virtual``. Neutron does this for ports that are unbound and have a fixed +IP address that is also configured in the ``allowed_address_pairs`` of any +other port in the same network. + +Limitations +~~~~~~~~~~~ + +* In the case when a Virtual IP address is going to be used in Virtual Machines + and configured as ``allowed_address_pairs``, it is necessary to also create + such an unbound port in Neutron in order to: + + * reserve that IP address for that use case so that it will not be later + allocated for another port in the same network as the fixed IP, + * let OVN know that this IP and ``Logical Switch Port`` is ``virtual`` so + that OVN can configure it accordingly. + +* A port created in Neutron in order to allocate virtual IP address has to be + ``unbound``, it can not be attached directly to any Virtual Machine. + +* Because of how Virtual IP addresses are implemented in the ML2/OVN backend, + the Virtual IP address must be set in the ``allowed_address_pairs`` of the VM + port as a single IP address (/32 for IPv4 or /128 for IPv6). + Setting a larger CIDR as ``allowed_address_pairs``, even if it contains + the Virtual IP address, will not mark the ``Logical Switch Port`` + corresponding to the port with that IP address as ``virtual``. + +* Another limitation is that setting an IP address that belongs to the + distributed metadata port in the same network as ``allowed_address_pairs`` is + not allowed. + + +Usage example +~~~~~~~~~~~~~ + +To use a Virtual IP address in Neutron, you need to create an unbound port in a +Neutron network and add the Virtual IP address to the ``allowed_address_pairs`` +of the port(s) that belong to the Virtual Machine(s). + +* Create an unbound port in the Neutron network to allocate the Virtual IP + address: + + .. code-block:: console + + $ openstack port create --network private virtual-ip-port + +-------------------------+-----------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------+-----------------------------------------------------------------------------------------------------+ + | admin_state_up | UP | + | allowed_address_pairs | | + | binding_host_id | | + | binding_profile | | + | binding_vif_details | | + | binding_vif_type | unbound | + | binding_vnic_type | normal | + | created_at | 2025-11-28T14:39:06Z | + | data_plane_status | None | + | description | | + | device_id | | + | device_owner | | + | device_profile | None | + | dns_assignment | | + | dns_domain | None | + | dns_name | None | + | extra_dhcp_opts | | + | fixed_ips | ip_address='10.0.0.20', subnet_id='866305cc-26db-48d7-8471-cbd267321b8b' | + | | ip_address='fde7:7c8e:8883:0:f816:3eff:feb6:559f', subnet_id='b8b0a413-6229-4c64-9d6e-65906a33b056' | + | hardware_offload_type | None | + | hints | | + | id | 3f078d1b-2f6e-41d8-99d7-70bc801f3979 | + | ip_allocation | None | + | mac_address | fa:16:3e:b6:55:9f | + | name | virtual-ip-port | + | network_id | c8e5e81c-d318-43f6-a45e-056f22a518e6 | + | numa_affinity_policy | None | + | port_security_enabled | True | + | project_id | b7907ac4c9794e5787a8d6bac0e5b80b | + | propagate_uplink_status | None | + | resource_request | None | + | revision_number | 1 | + | qos_network_policy_id | None | + | qos_policy_id | None | + | security_group_ids | 876d4c44-e2fd-48fc-bbd4-4bd295676a0e | + | status | DOWN | + | tags | | + | trunk_details | None | + | trusted | None | + | updated_at | 2025-11-28T14:39:06Z | + +-------------------------+-----------------------------------------------------------------------------------------------------+ + +* Create a Virtual Machine + + .. code-block:: console + + openstack server create --flavor m1.micro --image cirros-0.5.1-x86_64-disk --network private virtual-machine + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | None | + | OS-EXT-SRV-ATTR:host | None | + | OS-EXT-SRV-ATTR:hostname | virtual-machine | + | OS-EXT-SRV-ATTR:hypervisor_hostname | None | + | OS-EXT-SRV-ATTR:instance_name | None | + | OS-EXT-SRV-ATTR:kernel_id | None | + | OS-EXT-SRV-ATTR:launch_index | None | + | OS-EXT-SRV-ATTR:ramdisk_id | None | + | OS-EXT-SRV-ATTR:reservation_id | None | + | OS-EXT-SRV-ATTR:root_device_name | None | + | OS-EXT-SRV-ATTR:user_data | None | + | OS-EXT-STS:power_state | N/A | + | OS-EXT-STS:task_state | scheduling | + | OS-EXT-STS:vm_state | building | + | OS-SRV-USG:launched_at | None | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | None | + | accessIPv6 | None | + | addresses | N/A | + | adminPass | QNkLbpeZ72LF | + | config_drive | None | + | created | 2025-11-28T14:41:22Z | + | description | None | + | flavor | description=, disk='1', ephemeral='0', extra_specs.hw_rng:allowed='True', id='m1.micro', is_disabled=, is_public='True', location=, name='m1.micro', | + | | original_name='m1.micro', ram='256', rxtx_factor=, swap='0', vcpus='1' | + | hostId | None | + | host_status | None | + | id | d2573702-b79c-46a3-bd7a-d8aa50341082 | + | image | cirros-0.5.1-x86_64-disk (7b920c82-0879-4526-9ee8-7e3b77e7fe28) | + | key_name | None | + | locked | None | + | locked_reason | None | + | name | virtual-machine | + | pinned_availability_zone | None | + | progress | None | + | project_id | b7907ac4c9794e5787a8d6bac0e5b80b | + | properties | None | + | scheduler_hints | | + | security_groups | name='default' | + | server_groups | None | + | status | BUILD | + | tags | | + | trusted_image_certificates | None | + | updated | 2025-11-28T14:41:22Z | + | user_id | d46c7955bea644c9a45e5d95bb462e29 | + | volumes_attached | | + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + + +* List ports of the Virtual Machine + + .. code-block:: console + + $ openstack port list --device-id d2573702-b79c-46a3-bd7a-d8aa50341082 + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + | ID | Name | MAC Address | Fixed IP Addresses | Status | + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + | 692c7f41-0497-4d4c-9766-3d71ffd229df | | fa:16:3e:b6:44:9a | ip_address='10.0.0.30', subnet_id='866305cc-26db-48d7-8471-cbd267321b8b' | ACTIVE | + | | | | ip_address='fde7:7c8e:8883:0:f816:3eff:feb6:449a', subnet_id='b8b0a413-6229-4c64-9d6e-65906a33b056' | | + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + +* Set the Virtual IP address as an allowed address pair to the port of the + Virtual Machine + + .. code-block:: console + + $ openstack port set --allowed-address ip-address=10.0.0.20 692c7f41-0497-4d4c-9766-3d71ffd229df + + +After these steps, the Virtual IP address will be available on the port of the +Virtual Machine. + +If a CIDR such as ``10.0.0.0/24`` is set in the ``allowed_address_pairs`` +instead of the IP address ``10.0.0.20``, then the ``Logical Switch Port`` +related to the port with IP address ``10.0.0.20`` would +not be marked as a Virtual IP address due to the limitations mentioned above. + diff --git a/neutron/agent/ovn/agent/ovn_neutron_agent.py b/neutron/agent/ovn/agent/ovn_neutron_agent.py index ffebdc4ba91..9be7b0060c8 100644 --- a/neutron/agent/ovn/agent/ovn_neutron_agent.py +++ b/neutron/agent/ovn/agent/ovn_neutron_agent.py @@ -58,9 +58,8 @@ def __init__(self, conf): self._chassis = None self._chassis_id = None self._ovn_bridge = None - self.ext_manager_api = ext_mgr.OVNAgentExtensionAPI() - self.ext_manager = ext_mgr.OVNAgentExtensionManager(self._conf) - self.ext_manager.initialize(None, 'ovn', self) + self.ext_manager_api = None + self.ext_manager = None def __getitem__(self, name): """Return the named extension objet from ``self.ext_manager``""" @@ -159,7 +158,22 @@ def update_neutron_sb_cfg_key(self): 'Chassis_Private', self.chassis, ('external_ids', external_ids)).execute(check_error=True) + def _initialize_ext_manager(self): + """Initialize the externsion manager and the extension manager API. + + This method must be called once, outside the ``__init__`` method and + at the beginning of the ``start`` method. + """ + if not self.ext_manager: + self.ext_manager_api = ext_mgr.OVNAgentExtensionAPI() + self.ext_manager = ext_mgr.OVNAgentExtensionManager(self._conf) + self.ext_manager.initialize(None, 'ovn', self) + def start(self): + # This must be the first operation in the `start` method. + self._initialize_ext_manager() + + # Extension manager configuration. self.ext_manager_api.ovs_idl = self._load_ovs_idl() self.load_config() # Before executing "_load_sb_idl", is is needed to execute diff --git a/neutron/agent/ovn/metadata/agent.py b/neutron/agent/ovn/metadata/agent.py index b39ce8da26a..df58581d8d3 100644 --- a/neutron/agent/ovn/metadata/agent.py +++ b/neutron/agent/ovn/metadata/agent.py @@ -431,6 +431,21 @@ def _update_chassis_private_config(self): 'Chassis_Private', self.chassis, ('external_ids', external_ids)).execute(check_error=True) + def _update_metadata_sb_cfg_key(self): + """Update the Chassis_Private nb_cfg information in external_ids + + This method should be called once the Metadata Agent has been + registered (method ``register_metadata_agent`` has been called) and + the corresponding Chassis_Private register has been created/updated + and chassis private config has been updated. + """ + nb_cfg = self.sb_idl.db_get('Chassis_Private', + self.chassis, 'nb_cfg').execute() + external_ids = {ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: str(nb_cfg)} + self.sb_idl.db_set( + 'Chassis_Private', self.chassis, + ('external_ids', external_ids)).execute(check_error=True) + @_sync_lock def resync(self): """Resync the agent. @@ -439,6 +454,7 @@ def resync(self): """ self._load_config() self._update_chassis_private_config() + self._update_metadata_sb_cfg_key() self.sync() def start(self): @@ -474,6 +490,7 @@ def start(self): # Register the agent with its corresponding Chassis self.register_metadata_agent() self._update_chassis_private_config() + self._update_metadata_sb_cfg_key() self._proxy.wait() diff --git a/neutron/common/metadata.py b/neutron/common/metadata.py index b17765b8864..d3ade12235c 100644 --- a/neutron/common/metadata.py +++ b/neutron/common/metadata.py @@ -33,7 +33,10 @@ PROXY_SERVICE_NAME = 'haproxy' PROXY_SERVICE_CMD = 'haproxy' -CONTENT_ENCODERS = ('gzip', 'deflate') +CONTENT_ENCODERS = { + 'gzip': b'\x1f\x8b\x08', + 'deflate': b'\x1f\x8b\x08' +} class InvalidUserOrGroupException(Exception): @@ -159,15 +162,21 @@ class MetadataProxyHandlerBaseSocketServer( metaclass=abc.ABCMeta): @staticmethod def _http_response(http_response, request): + headerlist = list(http_response.headers.items()) + # We detect if content is compressed by magic signature, + # when `content-encoding` is not present. + if not http_response.headers.get('content-encoding'): + if http_response.content[:3] == CONTENT_ENCODERS['gzip']: + headerlist.append(('content-encoding', 'gzip')) + _res = webob.Response( body=http_response.content, status=http_response.status_code, - content_type=http_response.headers['content-type'], - charset=http_response.encoding) + headerlist=headerlist) # The content of the response is decoded depending on the # "Context-Enconding" header, if present. The operation is limited to # ("gzip", "deflate"), as is in the ``webob.response.Response`` class. - if _res.content_encoding in CONTENT_ENCODERS: + if _res.content_encoding in CONTENT_ENCODERS.keys(): _res.decode_content() # NOTE(ralonsoh): there should be a better way to format the HTTP diff --git a/neutron/common/ovn/extensions.py b/neutron/common/ovn/extensions.py index afd6bf4f900..f2b15634096 100644 --- a/neutron/common/ovn/extensions.py +++ b/neutron/common/ovn/extensions.py @@ -70,6 +70,7 @@ from neutron_lib.api.definitions import qinq from neutron_lib.api.definitions import qos from neutron_lib.api.definitions import qos_bw_limit_direction +from neutron_lib.api.definitions import qos_bw_minimum_ingress from neutron_lib.api.definitions import qos_default from neutron_lib.api.definitions import qos_gateway_ip from neutron_lib.api.definitions import qos_rule_type_details @@ -171,6 +172,7 @@ qinq.ALIAS, qos.ALIAS, qos_bw_limit_direction.ALIAS, + qos_bw_minimum_ingress.ALIAS, qos_default.ALIAS, qos_rule_type_details.ALIAS, qos_rule_type_filter.ALIAS, diff --git a/neutron/pecan_wsgi/hooks/policy_enforcement.py b/neutron/pecan_wsgi/hooks/policy_enforcement.py index a6b469edc40..cba9d1573ad 100644 --- a/neutron/pecan_wsgi/hooks/policy_enforcement.py +++ b/neutron/pecan_wsgi/hooks/policy_enforcement.py @@ -16,6 +16,7 @@ from neutron_lib import constants as const from oslo_log import log as logging from oslo_policy import policy as oslo_policy +from oslo_serialization import jsonutils from oslo_utils import excutils from pecan import hooks import webob @@ -190,6 +191,14 @@ def after(self, state): # we have to set the status_code here to prevent the catch_errors # middleware from turning this into a 500. state.response.status_code = 404 + # replace the original body on NotFound body + error_message = { + 'type': 'HTTPNotFound', + 'message': 'The resource could not be found.', + 'detail': '' + } + state.response.text = jsonutils.dumps(error_message) + state.response.content_type = 'application/json' return if is_single: diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index ec87494a1bc..52c0ddfcca6 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -655,6 +655,14 @@ def _get_common_ips(ip_addresses, ip_networks): common_ips.add(str(ip_address)) return common_ips + # NOTE(slaweq): We can safely ignore any CIDR larger than /32 (for + # IPv4) or /128 (for IPv6) in the allowed_address_pairs, since such + # CIDRs cannot be set as a Virtual IP in OVN. + # Only /32 and /128 CIDRs are allowed to be set as Virtual IPs in OVN. + address_pairs_to_check = [ + ip_net for ip_net in port_allowed_address_pairs_ip_addresses + if ip_net.size == 1] + for distributed_port in distributed_ports: distributed_port_ip_addresses = [ netaddr.IPAddress(fixed_ip['ip_address']) for fixed_ip in @@ -662,7 +670,7 @@ def _get_common_ips(ip_addresses, ip_networks): common_ips = _get_common_ips( distributed_port_ip_addresses, - port_allowed_address_pairs_ip_addresses) + address_pairs_to_check) if common_ips: err_msg = ( diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index 0265c9768d1..45f4c4a0493 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -267,7 +267,7 @@ def _get_port_dhcp_options(self, port, ip_version): wait=tenacity.wait_random(min=2, max=3), stop=tenacity.stop_after_attempt(3), reraise=True) - def _wait_for_port_bindings_host(self, context, port_id): + def _wait_for_active_port_bindings_host(self, context, port_id): db_port = ml2_db.get_port(context, port_id) # This is already checked previously but, just to stay on # the safe side in case the port is deleted mid-operation @@ -280,11 +280,18 @@ def _wait_for_port_bindings_host(self, context, port_id): _('No port bindings information found for ' 'port %s') % port_id) - if not db_port.port_bindings[0].host: + active_binding = p_utils.get_port_binding_by_status_and_host( + db_port.port_bindings, const.ACTIVE) + if not active_binding: + raise RuntimeError( + _('No active port bindings information found for ' + 'port %s') % port_id) + + if not active_binding.host: raise RuntimeError( _('No hosting information found for port %s') % port_id) - return db_port + return active_binding def update_lsp_host_info(self, context, db_port, up=True): """Update the binding hosting information for the LSP. @@ -307,24 +314,29 @@ def update_lsp_host_info(self, context, db_port, up=True): if up: if not port_up: LOG.warning('Logical_Switch_Port %s host information not ' - 'updated, the port state is down') + 'updated, the port state is down', db_port.id) return if not db_port.port_bindings: return - if not db_port.port_bindings[0].host: + # There could be more than one port binding present, we need + # to find the active one + active_binding = p_utils.get_port_binding_by_status_and_host( + db_port.port_bindings, const.ACTIVE) + + if not active_binding or not active_binding.host: # NOTE(lucasgomes): There might be a sync issue between # the moment that this port was fetched from the database # and the hosting information being set, retry a few times try: - db_port = self._wait_for_port_bindings_host( + active_binding = self._wait_for_active_port_bindings_host( context, db_port.id) except RuntimeError as e: LOG.warning(e) return - host = db_port.port_bindings[0].host + host = active_binding.host ext_ids = ('external_ids', {ovn_const.OVN_HOST_ID_EXT_ID_KEY: host}) cmd.append( @@ -333,7 +345,7 @@ def update_lsp_host_info(self, context, db_port, up=True): else: if port_up: LOG.warning('Logical_Switch_Port %s host information not ' - 'removed, the port state is up') + 'removed, the port state is up', db_port.id) return cmd.append( diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 9745ed183bb..a88d1c76a06 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -354,6 +354,33 @@ def sync_acls(self, ctx): add_acls.append(na) n_index += 1 + # Check any ACLs we found to add against existing ACLs, ignoring the + # SG rule ID key. This eliminates any false-positives where the + # normalized cidr for two SG rules is the same value, since there + # will only be a single ACL that matches exactly with the SG rule ID. + if add_acls: + def copy_acl_rem_id_key(acl): + acl_copy = acl.copy() + del acl_copy[ovn_const.OVN_SG_RULE_EXT_ID_KEY] + return acl_copy + + add_rem_acls = [] + # Make a list of non-default rule ACLs (they have a security group + # rule id). See ovn_default_acls code/comment above for more info. + nd_ovn_acls = [copy_acl_rem_id_key(oa) for oa in ovn_acls + if ovn_const.OVN_SG_RULE_EXT_ID_KEY in oa] + # We must copy here since we need to keep the original + # 'add_acl' intact for removal + for add_acl in add_acls: + add_acl_copy = copy_acl_rem_id_key(add_acl) + if add_acl_copy in nd_ovn_acls: + add_rem_acls.append(add_acl) + + # Remove any of the false-positive ACLs + LOG.warning('False-positive ACLs to remove: (%s)', add_rem_acls) + for add_rem in add_rem_acls: + add_acls.remove(add_rem) + if n_index < neutron_num: # We didn't find the OVN ACLs matching the Neutron ACLs # in "ovn_acls" and we are just adding the pending Neutron ACLs. diff --git a/neutron/services/auto_allocate/db.py b/neutron/services/auto_allocate/db.py index 21d3ec824db..5ca657d6ae1 100644 --- a/neutron/services/auto_allocate/db.py +++ b/neutron/services/auto_allocate/db.py @@ -54,7 +54,11 @@ def _ensure_external_network_default_value_callback( def _do_ensure_external_network_default_value_callback( context, request, orig, network): is_default = request.get(api_const.IS_DEFAULT) - is_external = request.get(external_net_apidef.EXTERNAL) + # the update request might not have external set to True, so + # verify by looking at the network + req_external = request.get(external_net_apidef.EXTERNAL) + net_external = network[external_net_apidef.EXTERNAL] + is_external = req_external is not None or net_external if is_default is None or not is_external: return if is_default: diff --git a/neutron/services/logapi/drivers/ovn/driver.py b/neutron/services/logapi/drivers/ovn/driver.py index dc0a025b5ed..8871dda6c80 100644 --- a/neutron/services/logapi/drivers/ovn/driver.py +++ b/neutron/services/logapi/drivers/ovn/driver.py @@ -158,6 +158,11 @@ def _set_acls_log(self, pgs, context, ovn_txn, actions_enabled, log_name): for pg in pgs: meter_name = self.meter_name if pg["name"] != ovn_const.OVN_DROP_PORT_GROUP_NAME: + if ovn_const.OVN_SG_EXT_ID_KEY not in pg["external_ids"]: + LOG.info("Port Group %s is not part of any security " + "group, skipping its network log " + "setting...", pg["name"]) + continue sg = sg_obj.SecurityGroup.get_sg_by_id( context, pg["external_ids"][ovn_const.OVN_SG_EXT_ID_KEY]) if not sg: @@ -426,6 +431,11 @@ def _set_neutron_acls_log(self, pgs, context, actions_enabled, log_name, for pg in pgs: meter_name = self.meter_name if pg['name'] != ovn_const.OVN_DROP_PORT_GROUP_NAME: + if ovn_const.OVN_SG_EXT_ID_KEY not in pg["external_ids"]: + LOG.info("Port Group %s is not part of any security " + "group, skipping its network log " + "setting...", pg["name"]) + continue sg = sg_obj.SecurityGroup.get_sg_by_id(context, pg['external_ids'][ovn_const.OVN_SG_EXT_ID_KEY]) if not sg: diff --git a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py index d25ebd57cf6..7211c8f9ee8 100644 --- a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py +++ b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py @@ -23,6 +23,7 @@ from neutron.agent.ovn.agent import ovsdb as agent_ovsdb from neutron.agent.ovn.metadata import agent as metadata_agent from neutron.agent.ovn.metadata import server_socket +from neutron.agent.ovsdb import impl_idl from neutron.common.ovn import constants as ovn_const from neutron.common import utils as n_utils from neutron.tests.common import net_helpers @@ -46,6 +47,7 @@ def setUp(self, extensions, **kwargs): self.mock_chassis_name = mock.patch.object( agent_ovsdb, 'get_own_chassis_name', return_value=self.chassis_name).start() + self.ovs_idl_events = [] with mock.patch.object(metadata_agent.MetadataAgent, '_get_own_chassis_name', return_value=self.chassis_name): @@ -57,6 +59,21 @@ def _check_loaded_and_started_extensions(self, ovn_agent): self.assertEqual(EXTENSION_NAMES.get(_ext), loaded_ext.name) self.assertTrue(loaded_ext.is_started) + def _create_ovs_idl(self, ovn_agent): + for extension in ovn_agent.ext_manager: + self.ovs_idl_events += extension.obj.ovs_idl_events + self.ovs_idl_events = [e(ovn_agent) for e in + set(self.ovs_idl_events)] + ovsdb = impl_idl.api_factory() + ovsdb.idl.notify_handler.watch_events(self.ovs_idl_events) + + ovn_agent.ext_manager_api.ovs_idl = ovsdb + return ovsdb + + def _clear_events_ovs_idl(self): + self.ovn_agent.ovs_idl.idl_monitor.notify_handler.unwatch_events( + self.ovs_idl_events) + def _start_ovn_neutron_agent(self): conf = self.useFixture(fixture_config.Config()).conf conf.set_override('extensions', ','.join(self.extensions), @@ -75,7 +92,11 @@ def _start_ovn_neutron_agent(self): # Once eventlet is completely removed, this mock can be deleted. with mock.patch.object(ovn_neutron_agent.OVNNeutronAgent, 'wait'), \ mock.patch.object(server_socket.UnixDomainMetadataProxy, - 'wait'): + 'wait'), \ + mock.patch.object(ovn_neutron_agent.OVNNeutronAgent, + '_load_ovs_idl') as mock_load_ovs_idl: + agt._initialize_ext_manager() + mock_load_ovs_idl.return_value = self._create_ovs_idl(agt) agt.start() external_ids = agt.sb_idl.db_get( 'Chassis_Private', agt.chassis, 'external_ids').execute( @@ -85,8 +106,7 @@ def _start_ovn_neutron_agent(self): '0') self._check_loaded_and_started_extensions(agt) - - self.addCleanup(agt.ext_manager_api.ovs_idl.ovsdb_connection.stop) + self.addCleanup(self._clear_events_ovs_idl) if agt.ext_manager_api.sb_idl: self.addCleanup(agt.ext_manager_api.sb_idl.ovsdb_connection.stop) if agt.ext_manager_api.nb_idl: diff --git a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py index 592df2f4610..a39570be8cd 100644 --- a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py +++ b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py @@ -120,6 +120,9 @@ def _start_metadata_agent(self): check_error=True) self.assertEqual(external_ids[ovn_const.OVN_AGENT_OVN_BRIDGE], self.OVN_BRIDGE) + self.assertEqual( + external_ids[ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY], + '0') # Metadata agent will open connections to OVS and SB databases. # Close connections to them when the test ends, @@ -129,16 +132,6 @@ def _start_metadata_agent(self): return agt def test_metadata_agent_healthcheck(self): - chassis_row = self.sb_api.db_find( - AGENT_CHASSIS_TABLE, - ('name', '=', self.chassis_name)).execute( - check_error=True)[0] - - # Assert that, prior to creating a resource the metadata agent - # didn't populate the external_ids from the Chassis - self.assertNotIn(ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, - chassis_row['external_ids']) - # Let's list the agents to force the nb_cfg to be bumped on NB # db, which will automatically increment the nb_cfg counter on # NB_Global and make ovn-controller copy it over to SB_Global. Upon diff --git a/neutron/tests/functional/agent/ovsdb/native/test_helpers.py b/neutron/tests/functional/agent/ovsdb/native/test_helpers.py index 3b365533df4..6bacde09282 100644 --- a/neutron/tests/functional/agent/ovsdb/native/test_helpers.py +++ b/neutron/tests/functional/agent/ovsdb/native/test_helpers.py @@ -14,7 +14,7 @@ # under the License. from neutron_lib import constants as const -from ovsdbapp.backend.ovs_idl import event +from ovsdbapp.backend.ovs_idl import event as idl_event from neutron.agent.common import ovs_lib from neutron.agent.ovsdb.native import helpers @@ -23,14 +23,21 @@ from neutron.tests.functional import base -class WaitOvsManagerEvent(event.WaitEvent): +class WaitOvsManagerEvent(idl_event.WaitEvent): event_name = 'WaitOvsManagerEvent' - def __init__(self, manager_target): + def __init__(self, manager_target, inactivity_probe=None, event=None): table = 'Manager' - events = (self.ROW_CREATE,) + events = (self.ROW_CREATE,) if not event else (event,) conditions = (('target', '=', manager_target),) super().__init__(events, table, conditions, timeout=10) + self.inactivity_probe = inactivity_probe + + def match_fn(self, event, row, old): + if (self.inactivity_probe is None or + self.inactivity_probe == row.inactivity_probe[0]): + return True + return False class EnableConnectionUriTestCase(base.BaseSudoTestCase): @@ -78,8 +85,13 @@ def test_add_manager_overwrites_existing_manager(self): ovsdb_cfg_connection = 'tcp:127.0.0.1:%s' % _port manager_connection = 'ptcp:%s:127.0.0.1' % _port + inactivity_probe = 10 + manager_event = WaitOvsManagerEvent( + manager_connection, inactivity_probe=inactivity_probe) + ovs.ovsdb.idl.notify_handler.watch_event(manager_event) helpers.enable_connection_uri(ovsdb_cfg_connection, - inactivity_probe=10) + inactivity_probe=inactivity_probe) + manager_event.wait() self.addCleanup(ovs.ovsdb.remove_manager(manager_connection).execute) # First call of enable_connection_uri cretes the manager # and the list returned by get_manager contains it: @@ -89,7 +101,13 @@ def test_add_manager_overwrites_existing_manager(self): # after 2nd call of enable_connection_uri with new value of # inactivity_probe will keep the original manager only the # inactivity_probe value is set: + inactivity_probe = 100 + manager_event = WaitOvsManagerEvent( + manager_connection, inactivity_probe=inactivity_probe, + event='update') + ovs.ovsdb.idl.notify_handler.watch_event(manager_event) helpers.enable_connection_uri(ovsdb_cfg_connection, - inactivity_probe=100) + inactivity_probe=inactivity_probe) + manager_event.wait() my_mans = ovs.ovsdb.get_manager().execute() self.assertIn(manager_connection, my_mans) diff --git a/neutron/tests/functional/pecan_wsgi/test_hooks.py b/neutron/tests/functional/pecan_wsgi/test_hooks.py index 9f43659aaef..ec091e3c066 100644 --- a/neutron/tests/functional/pecan_wsgi/test_hooks.py +++ b/neutron/tests/functional/pecan_wsgi/test_hooks.py @@ -288,6 +288,12 @@ def test_after_on_get_not_found(self): headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) + self.assertEqual( + { + 'type': 'HTTPNotFound', + 'message': 'The resource could not be found.', + 'detail': '' + }, jsonutils.loads(response.body)) self.assertEqual(1, self.mock_plugin.get_meh.call_count) def test_after_on_get_excludes_admin_attribute(self): diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py index 5140361b099..6fb51367be3 100644 --- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py +++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py @@ -1146,7 +1146,7 @@ def _build_acl_to_compare(self, acl, extra_fields=None): pass return acl_utils.filter_acl_dict(acl_to_compare, extra_fields) - def _validate_acls(self, should_match=True): + def _validate_acls(self, should_match=True, db_duplicate_port=None): # Get the neutron DB ACLs. db_acls = [] @@ -1194,6 +1194,19 @@ def _validate_acls(self, should_match=True): # Values taken out from list for comparison, since ACLs from OVN DB # have certain values on a list of just one object if should_match: + if db_duplicate_port: + # If we have a duplicate port, that indicates there are two + # DB entries that map to the same ACL. Remove the extra from + # our comparison. + dup_acl = None + for acl in db_acls: + if (str(db_duplicate_port) in acl['match'] and + acl not in plugin_acls): + dup_acl = acl + break + # There should have been a duplicate + self.assertIsNotNone(dup_acl) + db_acls.remove(dup_acl) for acl in plugin_acls: if isinstance(acl['severity'], list) and acl['severity']: acl['severity'] = acl['severity'][0] @@ -1786,13 +1799,16 @@ def test_sync_fip_qos_policies(self): nb_synchronizer.sync_fip_qos_policies(ctx) self._validate_qos_records() - def _create_security_group_rule(self, sg_id, direction, tcp_port): + def _create_security_group_rule(self, sg_id, direction, tcp_port, + remote_ip_prefix=None): data = {'security_group_rule': {'security_group_id': sg_id, 'direction': direction, 'protocol': constants.PROTO_NAME_TCP, 'ethertype': constants.IPv4, 'port_range_min': tcp_port, 'port_range_max': tcp_port}} + if remote_ip_prefix: + data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix req = self.new_create_request('security-group-rules', data, self.fmt) res = req.get_response(self.api) sgr = self.deserialize(self.fmt, res) @@ -1863,6 +1879,20 @@ def test_sync_acls_with_logging(self): self._test_sync_acls_helper(test_log=True, log_event=log_const.DROP_EVENT) + def test_sync_acls_overlapping_cidr(self): + data = {'security_group': {'name': 'sgdup'}} + sg_req = self.new_create_request('security-groups', data) + res = sg_req.get_response(self.api) + sg = self.deserialize(self.fmt, res)['security_group'] + + # Add SG rules that map to the same ACL due to normalizing the cidr + for ip_suffix in range(10, 12): + remote_ip_prefix = '192.168.0.' + str(ip_suffix) + '/24' + self._create_security_group_rule( + sg['id'], 'ingress', 9000, remote_ip_prefix=remote_ip_prefix) + + self._validate_acls(db_duplicate_port=9000) + class TestOvnSbSync(base.TestOVNFunctionalBase): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py index 2e94d96ee44..c03c8ebf892 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py @@ -99,9 +99,13 @@ def setUp(self): super().setUp() self.get_plugin = mock.patch( 'neutron_lib.plugins.directory.get_plugin').start() + self.get_pb_bsah = mock.patch( + 'neutron_lib.plugins.utils.' + 'get_port_binding_by_status_and_host').start() # Disable tenacity wait for UT - self.ovn_client._wait_for_port_bindings_host.retry.wait = wait_none() + self.ovn_client._wait_for_active_port_bindings_host.retry.wait = ( + wait_none()) def test__add_router_ext_gw_default_route(self): plugin = mock.MagicMock() @@ -246,8 +250,9 @@ def test_update_lsp_host_info_up(self): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' - db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + port_binding = mock.Mock(host=host_id) + db_port = mock.Mock(id=port_id, port_bindings=[port_binding]) + self.get_pb_bsah.return_value = port_binding self.ovn_client.update_lsp_host_info(context, db_port) @@ -259,14 +264,16 @@ def test_update_lsp_host_info_up_retry(self): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' + port_binding = mock.Mock(host=host_id) + port_binding_no_host = mock.Mock(host="") db_port_no_host = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host="")]) - db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + id=port_id, port_bindings=[port_binding_no_host]) + self.get_pb_bsah.return_value = None with mock.patch.object( - self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: - mock_wait.return_value = db_port + self.ovn_client, + '_wait_for_active_port_bindings_host') as mock_wait: + mock_wait.return_value = port_binding self.ovn_client.update_lsp_host_info(context, db_port_no_host) # Assert _wait_for_port_bindings_host was called @@ -282,9 +289,11 @@ def test_update_lsp_host_info_up_retry_fail(self): port_id = 'fake-port-id' db_port_no_host = mock.Mock( id=port_id, port_bindings=[mock.Mock(host="")]) + self.get_pb_bsah.return_value = None with mock.patch.object( - self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: + self.ovn_client, + '_wait_for_active_port_bindings_host') as mock_wait: mock_wait.side_effect = RuntimeError("boom") self.ovn_client.update_lsp_host_info(context, db_port_no_host) @@ -316,39 +325,45 @@ def test_update_lsp_host_info_trunk_subport(self): self.nb_idl.db_set.assert_not_called() @mock.patch.object(ml2_db, 'get_port') - def test__wait_for_port_bindings_host(self, mock_get_port): + def test__wait_for_active_port_bindings_host(self, mock_get_port): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' + port_binding = mock.Mock(host=host_id) + port_binding_no_host = mock.Mock(host="") db_port_no_host = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host="")]) + id=port_id, port_bindings=[port_binding_no_host]) db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + id=port_id, port_bindings=[port_binding]) + # no active binding, no binding with host, binding with host + self.get_pb_bsah.side_effect = (None, port_binding_no_host, + port_binding) - mock_get_port.side_effect = (db_port_no_host, db_port) + mock_get_port.side_effect = (db_port_no_host, db_port_no_host, db_port) - ret = self.ovn_client._wait_for_port_bindings_host( + ret = self.ovn_client._wait_for_active_port_bindings_host( context, port_id) - self.assertEqual(ret, db_port) + self.assertEqual(ret, port_binding) expected_calls = [mock.call(context, port_id), mock.call(context, port_id)] mock_get_port.assert_has_calls(expected_calls) @mock.patch.object(ml2_db, 'get_port') - def test__wait_for_port_bindings_host_fail(self, mock_get_port): + def test__wait_for_active_port_bindings_host_fail(self, mock_get_port): context = mock.MagicMock() port_id = 'fake-port-id' db_port_no_pb = mock.Mock(id=port_id, port_bindings=[]) db_port_no_host = mock.Mock( id=port_id, port_bindings=[mock.Mock(host="")]) + self.get_pb_bsah.return_value = None mock_get_port.side_effect = ( db_port_no_pb, db_port_no_host, db_port_no_host) self.assertRaises( - RuntimeError, self.ovn_client._wait_for_port_bindings_host, + RuntimeError, self.ovn_client._wait_for_active_port_bindings_host, context, port_id) expected_calls = [mock.call(context, port_id), diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py index 7bd6e1bc386..27ac90d9d2b 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -3212,17 +3212,23 @@ def test_create_port_with_allowed_address_pairs(self): 'mac_address': port2['mac_address']}], port2['allowed_address_pairs']) - # Now test the same but giving a subnet as allowed address pair - self._make_port( + # Now test the same but giving a subnet as allowed address + # pair, this should be fine as we treat only /32 and /128 IPs + # in allowed_address_pairs as Virtual IPs, there is no block + # anything when bigger CIDR is set as that don't break metadata + new_port = self._make_port( self.fmt, network['network']['id'], allowed_address_pairs=[{'ip_address': '10.0.0.2/26'}], - expected_res_status=exc.HTTPBadRequest.code, - arg_list=('allowed_address_pairs',)) + arg_list=('allowed_address_pairs',))['port'] port3 = self._show('ports', port1['id'])['port'] self.assertEqual( [{'ip_address': '10.0.0.3', 'mac_address': port3['mac_address']}], port3['allowed_address_pairs']) + self.assertEqual( + [{'ip_address': '10.0.0.2/26', + 'mac_address': new_port['mac_address']}], + new_port['allowed_address_pairs']) class OVNMechanismDriverTestCase(MechDriverSetupBase, diff --git a/neutron/tests/unit/services/auto_allocate/test_db.py b/neutron/tests/unit/services/auto_allocate/test_db.py index 7eb505a41c0..e3c2c77021a 100644 --- a/neutron/tests/unit/services/auto_allocate/test_db.py +++ b/neutron/tests/unit/services/auto_allocate/test_db.py @@ -107,7 +107,7 @@ def test_ensure_external_network_default_value_no_default(self): "NETWORK", "precommit_update", "test_plugin", payload=events.DBEventPayload( self.ctx, request_body=kwargs['request'], - states=(kwargs['network'],))) + states=({}, kwargs['network']))) get_external_nets.assert_not_called() get_external_net.assert_not_called() network_mock.update.assert_not_called() @@ -190,6 +190,111 @@ def test_ensure_external_network_default_value_default_existed(self): get_external_net.assert_not_called() network_mock.update.assert_not_called() + def test_ensure_external_network_default_value_update(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: True, + }, + "original_network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: True, + } + } + network_mock = mock.MagicMock(network_id=network_id, is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=(kwargs['original_network'], kwargs['network']))) + get_external_nets.assert_called_once_with( + self.ctx, _pager=mock.ANY, is_default=True) + get_external_net.assert_called_once_with( + self.ctx, network_id=network_id) + network_mock.update.assert_called_once_with() + + def test_ensure_external_network_default_value_internal_update(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + }, + "original_network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + } + } + network_mock = mock.MagicMock(network_id='fake_id', is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=(kwargs['original_network'], kwargs['network']))) + get_external_nets.assert_not_called() + get_external_net.assert_not_called() + network_mock.update.assert_not_called() + + def test_ensure_external_network_default_value_internal_create(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + }, + } + network_mock = mock.MagicMock(network_id='fake_id', is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=({}, kwargs['network']))) + get_external_nets.assert_not_called() + get_external_net.assert_not_called() + network_mock.update.assert_not_called() + def test__provision_external_connectivity_expected_cleanup(self): """Test that the right resources are cleaned up.""" subnets = [