From 4fb3d6f5a512eb6598dd282f7759d8066ac640f3 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Tue, 5 Aug 2025 19:27:21 +0000 Subject: [PATCH 01/14] [OVN] The OVS IDL connection is global for all tests In the functional tests, the OVN databases are created per test and removed at the end. That leaves no trace the local OVN databases if running in a system running OpenStack. But the OVS database is global and shared with all the tests and running processes. It is needed to use the OVS IDL ``api_factory`` method that returns a global object for all tests. Closes-Bug: #2119453 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I7b438ba76e6f95d533d8d7bc4aaeeba0007551e3 (cherry picked from commit 23994f6ee09d7a3d726bf7ba36d770dcdb85945c) --- .../agent/ovn/agent/test_ovn_neutron_agent.py | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py index d25ebd57cf6..3ff53017ed9 100644 --- a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py +++ b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py @@ -23,6 +23,7 @@ from neutron.agent.ovn.agent import ovsdb as agent_ovsdb from neutron.agent.ovn.metadata import agent as metadata_agent from neutron.agent.ovn.metadata import server_socket +from neutron.agent.ovsdb import impl_idl from neutron.common.ovn import constants as ovn_const from neutron.common import utils as n_utils from neutron.tests.common import net_helpers @@ -46,6 +47,7 @@ def setUp(self, extensions, **kwargs): self.mock_chassis_name = mock.patch.object( agent_ovsdb, 'get_own_chassis_name', return_value=self.chassis_name).start() + self.ovs_idl_events = [] with mock.patch.object(metadata_agent.MetadataAgent, '_get_own_chassis_name', return_value=self.chassis_name): @@ -57,6 +59,21 @@ def _check_loaded_and_started_extensions(self, ovn_agent): self.assertEqual(EXTENSION_NAMES.get(_ext), loaded_ext.name) self.assertTrue(loaded_ext.is_started) + def _create_ovs_idl(self, ovn_agent): + for extension in ovn_agent.ext_manager: + self.ovs_idl_events += extension.obj.ovs_idl_events + self.ovs_idl_events = [e(ovn_agent) for e in + set(self.ovs_idl_events)] + ovsdb = impl_idl.api_factory() + ovsdb.idl.notify_handler.watch_events(self.ovs_idl_events) + + ovn_agent.ext_manager_api.ovs_idl = ovsdb + return ovsdb + + def _clear_events_ovs_idl(self): + self.ovn_agent.ovs_idl.idl_monitor.notify_handler.unwatch_events( + self.ovs_idl_events) + def _start_ovn_neutron_agent(self): conf = self.useFixture(fixture_config.Config()).conf conf.set_override('extensions', ','.join(self.extensions), @@ -75,7 +92,10 @@ def _start_ovn_neutron_agent(self): # Once eventlet is completely removed, this mock can be deleted. with mock.patch.object(ovn_neutron_agent.OVNNeutronAgent, 'wait'), \ mock.patch.object(server_socket.UnixDomainMetadataProxy, - 'wait'): + 'wait'), \ + mock.patch.object(ovn_neutron_agent.OVNNeutronAgent, + '_load_ovs_idl') as mock_load_ovs_idl: + mock_load_ovs_idl.return_value = self._create_ovs_idl(agt) agt.start() external_ids = agt.sb_idl.db_get( 'Chassis_Private', agt.chassis, 'external_ids').execute( @@ -85,8 +105,7 @@ def _start_ovn_neutron_agent(self): '0') self._check_loaded_and_started_extensions(agt) - - self.addCleanup(agt.ext_manager_api.ovs_idl.ovsdb_connection.stop) + self.addCleanup(self._clear_events_ovs_idl) if agt.ext_manager_api.sb_idl: self.addCleanup(agt.ext_manager_api.sb_idl.ovsdb_connection.stop) if agt.ext_manager_api.nb_idl: From fc1039f8eb946825ffbd3d716af9d40c16038223 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Fri, 6 Dec 2024 18:33:57 -0500 Subject: [PATCH 02/14] Remove false-positive ACLs in OVN DB sync If there are two (or more) security group rules that have the same normalized cidr, only one of them will match the OVN ACL exactly. The other will match everything except the rule ID. Go through any items found a second time, ignoring the rule ID to remove the false-positives. Closes-bug: #2087822 Change-Id: Ia5a76973f80be1369753ebf42fba2fc19690a229 Signed-off-by: Brian Haley (cherry picked from commit 1c3eac52b724dd5f095ede0d17db432796c41516) --- .../ovn/mech_driver/ovsdb/ovn_db_sync.py | 27 +++++++++++++++ .../ovn/mech_driver/ovsdb/test_ovn_db_sync.py | 34 +++++++++++++++++-- 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 9745ed183bb..a88d1c76a06 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -354,6 +354,33 @@ def sync_acls(self, ctx): add_acls.append(na) n_index += 1 + # Check any ACLs we found to add against existing ACLs, ignoring the + # SG rule ID key. This eliminates any false-positives where the + # normalized cidr for two SG rules is the same value, since there + # will only be a single ACL that matches exactly with the SG rule ID. + if add_acls: + def copy_acl_rem_id_key(acl): + acl_copy = acl.copy() + del acl_copy[ovn_const.OVN_SG_RULE_EXT_ID_KEY] + return acl_copy + + add_rem_acls = [] + # Make a list of non-default rule ACLs (they have a security group + # rule id). See ovn_default_acls code/comment above for more info. + nd_ovn_acls = [copy_acl_rem_id_key(oa) for oa in ovn_acls + if ovn_const.OVN_SG_RULE_EXT_ID_KEY in oa] + # We must copy here since we need to keep the original + # 'add_acl' intact for removal + for add_acl in add_acls: + add_acl_copy = copy_acl_rem_id_key(add_acl) + if add_acl_copy in nd_ovn_acls: + add_rem_acls.append(add_acl) + + # Remove any of the false-positive ACLs + LOG.warning('False-positive ACLs to remove: (%s)', add_rem_acls) + for add_rem in add_rem_acls: + add_acls.remove(add_rem) + if n_index < neutron_num: # We didn't find the OVN ACLs matching the Neutron ACLs # in "ovn_acls" and we are just adding the pending Neutron ACLs. diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py index 5140361b099..6fb51367be3 100644 --- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py +++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py @@ -1146,7 +1146,7 @@ def _build_acl_to_compare(self, acl, extra_fields=None): pass return acl_utils.filter_acl_dict(acl_to_compare, extra_fields) - def _validate_acls(self, should_match=True): + def _validate_acls(self, should_match=True, db_duplicate_port=None): # Get the neutron DB ACLs. db_acls = [] @@ -1194,6 +1194,19 @@ def _validate_acls(self, should_match=True): # Values taken out from list for comparison, since ACLs from OVN DB # have certain values on a list of just one object if should_match: + if db_duplicate_port: + # If we have a duplicate port, that indicates there are two + # DB entries that map to the same ACL. Remove the extra from + # our comparison. + dup_acl = None + for acl in db_acls: + if (str(db_duplicate_port) in acl['match'] and + acl not in plugin_acls): + dup_acl = acl + break + # There should have been a duplicate + self.assertIsNotNone(dup_acl) + db_acls.remove(dup_acl) for acl in plugin_acls: if isinstance(acl['severity'], list) and acl['severity']: acl['severity'] = acl['severity'][0] @@ -1786,13 +1799,16 @@ def test_sync_fip_qos_policies(self): nb_synchronizer.sync_fip_qos_policies(ctx) self._validate_qos_records() - def _create_security_group_rule(self, sg_id, direction, tcp_port): + def _create_security_group_rule(self, sg_id, direction, tcp_port, + remote_ip_prefix=None): data = {'security_group_rule': {'security_group_id': sg_id, 'direction': direction, 'protocol': constants.PROTO_NAME_TCP, 'ethertype': constants.IPv4, 'port_range_min': tcp_port, 'port_range_max': tcp_port}} + if remote_ip_prefix: + data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix req = self.new_create_request('security-group-rules', data, self.fmt) res = req.get_response(self.api) sgr = self.deserialize(self.fmt, res) @@ -1863,6 +1879,20 @@ def test_sync_acls_with_logging(self): self._test_sync_acls_helper(test_log=True, log_event=log_const.DROP_EVENT) + def test_sync_acls_overlapping_cidr(self): + data = {'security_group': {'name': 'sgdup'}} + sg_req = self.new_create_request('security-groups', data) + res = sg_req.get_response(self.api) + sg = self.deserialize(self.fmt, res)['security_group'] + + # Add SG rules that map to the same ACL due to normalizing the cidr + for ip_suffix in range(10, 12): + remote_ip_prefix = '192.168.0.' + str(ip_suffix) + '/24' + self._create_security_group_rule( + sg['id'], 'ingress', 9000, remote_ip_prefix=remote_ip_prefix) + + self._validate_acls(db_duplicate_port=9000) + class TestOvnSbSync(base.TestOVNFunctionalBase): From cf83a5906e26ba3dceeef2d1ce03995b14fb5b7c Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Fri, 14 Nov 2025 15:29:18 +0100 Subject: [PATCH 03/14] [OVN] Initialize OVN agent in ``start`` method The ``OVNAgentExtensionAPI`` and ``OVNAgentExtensionManager`` instances create different threads instances. Now the OVN agent is using the ``oslo.service`` threading implementation (that uses ``cotyledon``). This library creates a fork of the main process to execute the ``ServiceWrapper`` instance, where the agent is running. Any thread should be created in the child process, at the ``oslo_service.service.Service.start`` method. Closes-Bug: #2131540 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I97d52e8fd2236b7d9210174d05f01c81a2edcd0d (cherry picked from commit 5e92bbfa5080b219de21829f42e5f372001fc78f) --- neutron/agent/ovn/agent/ovn_neutron_agent.py | 20 ++++++++++++++++--- .../agent/ovn/agent/test_ovn_neutron_agent.py | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/neutron/agent/ovn/agent/ovn_neutron_agent.py b/neutron/agent/ovn/agent/ovn_neutron_agent.py index ffebdc4ba91..9be7b0060c8 100644 --- a/neutron/agent/ovn/agent/ovn_neutron_agent.py +++ b/neutron/agent/ovn/agent/ovn_neutron_agent.py @@ -58,9 +58,8 @@ def __init__(self, conf): self._chassis = None self._chassis_id = None self._ovn_bridge = None - self.ext_manager_api = ext_mgr.OVNAgentExtensionAPI() - self.ext_manager = ext_mgr.OVNAgentExtensionManager(self._conf) - self.ext_manager.initialize(None, 'ovn', self) + self.ext_manager_api = None + self.ext_manager = None def __getitem__(self, name): """Return the named extension objet from ``self.ext_manager``""" @@ -159,7 +158,22 @@ def update_neutron_sb_cfg_key(self): 'Chassis_Private', self.chassis, ('external_ids', external_ids)).execute(check_error=True) + def _initialize_ext_manager(self): + """Initialize the externsion manager and the extension manager API. + + This method must be called once, outside the ``__init__`` method and + at the beginning of the ``start`` method. + """ + if not self.ext_manager: + self.ext_manager_api = ext_mgr.OVNAgentExtensionAPI() + self.ext_manager = ext_mgr.OVNAgentExtensionManager(self._conf) + self.ext_manager.initialize(None, 'ovn', self) + def start(self): + # This must be the first operation in the `start` method. + self._initialize_ext_manager() + + # Extension manager configuration. self.ext_manager_api.ovs_idl = self._load_ovs_idl() self.load_config() # Before executing "_load_sb_idl", is is needed to execute diff --git a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py index 3ff53017ed9..7211c8f9ee8 100644 --- a/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py +++ b/neutron/tests/functional/agent/ovn/agent/test_ovn_neutron_agent.py @@ -95,6 +95,7 @@ def _start_ovn_neutron_agent(self): 'wait'), \ mock.patch.object(ovn_neutron_agent.OVNNeutronAgent, '_load_ovs_idl') as mock_load_ovs_idl: + agt._initialize_ext_manager() mock_load_ovs_idl.return_value = self._create_ovs_idl(agt) agt.start() external_ids = agt.sb_idl.db_get( From 53c2c8d6e0cd7dd889518c34e4a495e557d74eec Mon Sep 17 00:00:00 2001 From: Elvira Garcia Date: Tue, 18 Nov 2025 17:30:04 +0100 Subject: [PATCH 04/14] [SGL] Ignore port groups that don't come from SGs Previously, OVN logapi driver only considered that Port Groups and ACLs were created by Security Groups. This is not true, since the FWaaS plugin does also manage ACLs and Port Groups to apply its rules. This means that port groups do not necessarily need to have a security group ID referenced on their external IDs. Closes-Bug: #2131209 Change-Id: I7f85077ce344d4d0e46190674fc79c42a9eeae9a Signed-off-by: Elvira Garcia (cherry picked from commit c0f6145b871de7fa750fbf09c125d6feb1829031) --- neutron/services/logapi/drivers/ovn/driver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/neutron/services/logapi/drivers/ovn/driver.py b/neutron/services/logapi/drivers/ovn/driver.py index dc0a025b5ed..8871dda6c80 100644 --- a/neutron/services/logapi/drivers/ovn/driver.py +++ b/neutron/services/logapi/drivers/ovn/driver.py @@ -158,6 +158,11 @@ def _set_acls_log(self, pgs, context, ovn_txn, actions_enabled, log_name): for pg in pgs: meter_name = self.meter_name if pg["name"] != ovn_const.OVN_DROP_PORT_GROUP_NAME: + if ovn_const.OVN_SG_EXT_ID_KEY not in pg["external_ids"]: + LOG.info("Port Group %s is not part of any security " + "group, skipping its network log " + "setting...", pg["name"]) + continue sg = sg_obj.SecurityGroup.get_sg_by_id( context, pg["external_ids"][ovn_const.OVN_SG_EXT_ID_KEY]) if not sg: @@ -426,6 +431,11 @@ def _set_neutron_acls_log(self, pgs, context, actions_enabled, log_name, for pg in pgs: meter_name = self.meter_name if pg['name'] != ovn_const.OVN_DROP_PORT_GROUP_NAME: + if ovn_const.OVN_SG_EXT_ID_KEY not in pg["external_ids"]: + LOG.info("Port Group %s is not part of any security " + "group, skipping its network log " + "setting...", pg["name"]) + continue sg = sg_obj.SecurityGroup.get_sg_by_id(context, pg['external_ids'][ovn_const.OVN_SG_EXT_ID_KEY]) if not sg: From 72165d09aa203cdb0b204a2c33ebbee3358d1f63 Mon Sep 17 00:00:00 2001 From: Sergey Kraynev Date: Sat, 8 Nov 2025 21:19:17 +0400 Subject: [PATCH 05/14] Replace response body in after hook for 404 error Closes-Bug: 2130972 Change-Id: I662b3eb7fb8c17d0ad30b10e8537d4a142003256 Signed-off-by: Sergey Kraynev (cherry picked from commit d95e3e5eaf24ee788d65255d669671e175f25208) --- neutron/pecan_wsgi/hooks/policy_enforcement.py | 9 +++++++++ neutron/tests/functional/pecan_wsgi/test_hooks.py | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/neutron/pecan_wsgi/hooks/policy_enforcement.py b/neutron/pecan_wsgi/hooks/policy_enforcement.py index a6b469edc40..cba9d1573ad 100644 --- a/neutron/pecan_wsgi/hooks/policy_enforcement.py +++ b/neutron/pecan_wsgi/hooks/policy_enforcement.py @@ -16,6 +16,7 @@ from neutron_lib import constants as const from oslo_log import log as logging from oslo_policy import policy as oslo_policy +from oslo_serialization import jsonutils from oslo_utils import excutils from pecan import hooks import webob @@ -190,6 +191,14 @@ def after(self, state): # we have to set the status_code here to prevent the catch_errors # middleware from turning this into a 500. state.response.status_code = 404 + # replace the original body on NotFound body + error_message = { + 'type': 'HTTPNotFound', + 'message': 'The resource could not be found.', + 'detail': '' + } + state.response.text = jsonutils.dumps(error_message) + state.response.content_type = 'application/json' return if is_single: diff --git a/neutron/tests/functional/pecan_wsgi/test_hooks.py b/neutron/tests/functional/pecan_wsgi/test_hooks.py index 9f43659aaef..ec091e3c066 100644 --- a/neutron/tests/functional/pecan_wsgi/test_hooks.py +++ b/neutron/tests/functional/pecan_wsgi/test_hooks.py @@ -288,6 +288,12 @@ def test_after_on_get_not_found(self): headers={'X-Project-Id': 'tenid'}, expect_errors=True) self.assertEqual(404, response.status_int) + self.assertEqual( + { + 'type': 'HTTPNotFound', + 'message': 'The resource could not be found.', + 'detail': '' + }, jsonutils.loads(response.body)) self.assertEqual(1, self.mock_plugin.get_meh.call_count) def test_after_on_get_excludes_admin_attribute(self): From f3a399717130cb9b0451b635e3dd109bfbe8b257 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 26 Nov 2025 12:40:42 +0100 Subject: [PATCH 06/14] [OVN] Add 'qos-bw-minimum-ingress' ML2 extension Closes-Bug: #2132982 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I86ed1aa473a41ece4478e19b1e0f37aadaf93238 (cherry picked from commit 45b8dfe6a8d06ba5dc39ab28d2cb5adc4e7a8f5a) --- neutron/common/ovn/extensions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neutron/common/ovn/extensions.py b/neutron/common/ovn/extensions.py index afd6bf4f900..f2b15634096 100644 --- a/neutron/common/ovn/extensions.py +++ b/neutron/common/ovn/extensions.py @@ -70,6 +70,7 @@ from neutron_lib.api.definitions import qinq from neutron_lib.api.definitions import qos from neutron_lib.api.definitions import qos_bw_limit_direction +from neutron_lib.api.definitions import qos_bw_minimum_ingress from neutron_lib.api.definitions import qos_default from neutron_lib.api.definitions import qos_gateway_ip from neutron_lib.api.definitions import qos_rule_type_details @@ -171,6 +172,7 @@ qinq.ALIAS, qos.ALIAS, qos_bw_limit_direction.ALIAS, + qos_bw_minimum_ingress.ALIAS, qos_default.ALIAS, qos_rule_type_details.ALIAS, qos_rule_type_filter.ALIAS, From c1219812bf18d6babbac13f2affbf2e4eb3df826 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 20 Nov 2025 18:19:33 -0500 Subject: [PATCH 07/14] Fix incorrect log message in ovn_client String contained %s but had no argument, added port id. Closes-bug: #2132088 Change-Id: I36d3c036c9795603d93b01bfd94d2ae80dc2ec7b Signed-off-by: Brian Haley (cherry picked from commit 807e6a5cf633cf7e51534bec655b98a1281861bb) --- .../plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index 0265c9768d1..f62f91ee95a 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -307,7 +307,7 @@ def update_lsp_host_info(self, context, db_port, up=True): if up: if not port_up: LOG.warning('Logical_Switch_Port %s host information not ' - 'updated, the port state is down') + 'updated, the port state is down', db_port.id) return if not db_port.port_bindings: @@ -333,7 +333,7 @@ def update_lsp_host_info(self, context, db_port, up=True): else: if port_up: LOG.warning('Logical_Switch_Port %s host information not ' - 'removed, the port state is up') + 'removed, the port state is up', db_port.id) return cmd.append( From 7360e1f1d682f3ce042dd4901164c17b1efd992d Mon Sep 17 00:00:00 2001 From: Dmitriy Rabotyagov Date: Fri, 3 Oct 2025 18:16:13 +0200 Subject: [PATCH 08/14] Append content-encoding header webob does not automatically detect and add content-encoding header if body is compressed. Instead, this header needs to be passed to webob.Response as a headerlist argument. Only in this case `decode_content()` performs decompression. However, Nova does not add 'content-encoding' in response, and even if it would - only selected headers are currenly consumed by agent. Thus, we generalize approach for handling headers and ensure that 'content-encoding' is present in case we detect magic file signature at the beginning of data. Closes-Bug: #2120723 Change-Id: I45d99e2bae3768f2258f39c9b92e7c2cbd080e7c Signed-off-by: Dmitriy Rabotyagov (cherry picked from commit 67969c147baaca18dc436b674f73c0eb7485cc6c) --- neutron/common/metadata.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/neutron/common/metadata.py b/neutron/common/metadata.py index b17765b8864..d3ade12235c 100644 --- a/neutron/common/metadata.py +++ b/neutron/common/metadata.py @@ -33,7 +33,10 @@ PROXY_SERVICE_NAME = 'haproxy' PROXY_SERVICE_CMD = 'haproxy' -CONTENT_ENCODERS = ('gzip', 'deflate') +CONTENT_ENCODERS = { + 'gzip': b'\x1f\x8b\x08', + 'deflate': b'\x1f\x8b\x08' +} class InvalidUserOrGroupException(Exception): @@ -159,15 +162,21 @@ class MetadataProxyHandlerBaseSocketServer( metaclass=abc.ABCMeta): @staticmethod def _http_response(http_response, request): + headerlist = list(http_response.headers.items()) + # We detect if content is compressed by magic signature, + # when `content-encoding` is not present. + if not http_response.headers.get('content-encoding'): + if http_response.content[:3] == CONTENT_ENCODERS['gzip']: + headerlist.append(('content-encoding', 'gzip')) + _res = webob.Response( body=http_response.content, status=http_response.status_code, - content_type=http_response.headers['content-type'], - charset=http_response.encoding) + headerlist=headerlist) # The content of the response is decoded depending on the # "Context-Enconding" header, if present. The operation is limited to # ("gzip", "deflate"), as is in the ``webob.response.Response`` class. - if _res.content_encoding in CONTENT_ENCODERS: + if _res.content_encoding in CONTENT_ENCODERS.keys(): _res.decode_content() # NOTE(ralonsoh): there should be a better way to format the HTTP From abfca726e59492a727297f1935875765f4bad577 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Thu, 27 Nov 2025 14:04:53 +0100 Subject: [PATCH 09/14] Don't prevent setting CIDRs as allowed_address_pair for port Previously with [1] we blocked possiblility to set as allowed address pair for port any IP or CIDR which contains IP address assigned to the distributed metadata IP address in same network. It was done that way because setting distributed metadata IP address as allowed address pair for any port in the network breaks metadata service for all of the ports in that network. But this restriction was too strict as it also prevented to set CIDRs bigger then /32 or /128 in the allowed_address_pair if CIDR contained distributed metadata port IP. For example: - distributed metadata port IP address 10.0.0.2 - allowed address pairs set for port in that network: - 10.0.0.3 - allowed - 10.0.0.1/26 - not allowed as 10.0.0.2 belongs to that CIDR. In such case however, when CIDR is set as allowed_address_pair, it is not set in OVN as Virtual IP so it won't break connectivity to the metadata service as was reported in [2] thus we should allow that. This patch is reducing that restriction. Now CIDRs can be set as allowed_address_pair for the port even if it includes IP assigned for the distributed metadata port. It is only forbidden to set as allowed_address_pair same, single IP address as set for the distributed metadata port. Closes-Bug: #2131928 [1] https://review.opendev.org/c/openstack/neutron/+/955757 [2] https://bugs.launchpad.net/neutron/+bug/2116249 Change-Id: Ieb98a126b6d380894456ed892c0a19787e7fbb04 Signed-off-by: Slawek Kaplonski (cherry picked from commit 71ae26e3529161bd5101bd42ade4d2f1b3b4c781) --- .../ml2/drivers/ovn/mech_driver/mech_driver.py | 10 +++++++++- .../drivers/ovn/mech_driver/test_mech_driver.py | 14 ++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index ec87494a1bc..52c0ddfcca6 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -655,6 +655,14 @@ def _get_common_ips(ip_addresses, ip_networks): common_ips.add(str(ip_address)) return common_ips + # NOTE(slaweq): We can safely ignore any CIDR larger than /32 (for + # IPv4) or /128 (for IPv6) in the allowed_address_pairs, since such + # CIDRs cannot be set as a Virtual IP in OVN. + # Only /32 and /128 CIDRs are allowed to be set as Virtual IPs in OVN. + address_pairs_to_check = [ + ip_net for ip_net in port_allowed_address_pairs_ip_addresses + if ip_net.size == 1] + for distributed_port in distributed_ports: distributed_port_ip_addresses = [ netaddr.IPAddress(fixed_ip['ip_address']) for fixed_ip in @@ -662,7 +670,7 @@ def _get_common_ips(ip_addresses, ip_networks): common_ips = _get_common_ips( distributed_port_ip_addresses, - port_allowed_address_pairs_ip_addresses) + address_pairs_to_check) if common_ips: err_msg = ( diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py index 7bd6e1bc386..27ac90d9d2b 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -3212,17 +3212,23 @@ def test_create_port_with_allowed_address_pairs(self): 'mac_address': port2['mac_address']}], port2['allowed_address_pairs']) - # Now test the same but giving a subnet as allowed address pair - self._make_port( + # Now test the same but giving a subnet as allowed address + # pair, this should be fine as we treat only /32 and /128 IPs + # in allowed_address_pairs as Virtual IPs, there is no block + # anything when bigger CIDR is set as that don't break metadata + new_port = self._make_port( self.fmt, network['network']['id'], allowed_address_pairs=[{'ip_address': '10.0.0.2/26'}], - expected_res_status=exc.HTTPBadRequest.code, - arg_list=('allowed_address_pairs',)) + arg_list=('allowed_address_pairs',))['port'] port3 = self._show('ports', port1['id'])['port'] self.assertEqual( [{'ip_address': '10.0.0.3', 'mac_address': port3['mac_address']}], port3['allowed_address_pairs']) + self.assertEqual( + [{'ip_address': '10.0.0.2/26', + 'mac_address': new_port['mac_address']}], + new_port['allowed_address_pairs']) class OVNMechanismDriverTestCase(MechDriverSetupBase, From 607c3c4c45cdc474541621dc4b5d47bbd576f5a5 Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Mon, 1 Dec 2025 16:51:43 +0100 Subject: [PATCH 10/14] [FT] Wait for the manager to be created (2) This patch reuses the `WaitEvent` created in [1], to check that the expected `Manager` register is created before checking it. [1]https://review.opendev.org/c/openstack/neutron/+/966673 Related-Bug: #2131024 Signed-off-by: Rodolfo Alonso Hernandez Change-Id: I0b6568ca6c3844bd32892a2ebb8c36e1fec144c9 (cherry picked from commit 31b78bf23e691c423ae1abcb0da8561a13383b3c) --- .../agent/ovsdb/native/test_helpers.py | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/neutron/tests/functional/agent/ovsdb/native/test_helpers.py b/neutron/tests/functional/agent/ovsdb/native/test_helpers.py index 3b365533df4..6bacde09282 100644 --- a/neutron/tests/functional/agent/ovsdb/native/test_helpers.py +++ b/neutron/tests/functional/agent/ovsdb/native/test_helpers.py @@ -14,7 +14,7 @@ # under the License. from neutron_lib import constants as const -from ovsdbapp.backend.ovs_idl import event +from ovsdbapp.backend.ovs_idl import event as idl_event from neutron.agent.common import ovs_lib from neutron.agent.ovsdb.native import helpers @@ -23,14 +23,21 @@ from neutron.tests.functional import base -class WaitOvsManagerEvent(event.WaitEvent): +class WaitOvsManagerEvent(idl_event.WaitEvent): event_name = 'WaitOvsManagerEvent' - def __init__(self, manager_target): + def __init__(self, manager_target, inactivity_probe=None, event=None): table = 'Manager' - events = (self.ROW_CREATE,) + events = (self.ROW_CREATE,) if not event else (event,) conditions = (('target', '=', manager_target),) super().__init__(events, table, conditions, timeout=10) + self.inactivity_probe = inactivity_probe + + def match_fn(self, event, row, old): + if (self.inactivity_probe is None or + self.inactivity_probe == row.inactivity_probe[0]): + return True + return False class EnableConnectionUriTestCase(base.BaseSudoTestCase): @@ -78,8 +85,13 @@ def test_add_manager_overwrites_existing_manager(self): ovsdb_cfg_connection = 'tcp:127.0.0.1:%s' % _port manager_connection = 'ptcp:%s:127.0.0.1' % _port + inactivity_probe = 10 + manager_event = WaitOvsManagerEvent( + manager_connection, inactivity_probe=inactivity_probe) + ovs.ovsdb.idl.notify_handler.watch_event(manager_event) helpers.enable_connection_uri(ovsdb_cfg_connection, - inactivity_probe=10) + inactivity_probe=inactivity_probe) + manager_event.wait() self.addCleanup(ovs.ovsdb.remove_manager(manager_connection).execute) # First call of enable_connection_uri cretes the manager # and the list returned by get_manager contains it: @@ -89,7 +101,13 @@ def test_add_manager_overwrites_existing_manager(self): # after 2nd call of enable_connection_uri with new value of # inactivity_probe will keep the original manager only the # inactivity_probe value is set: + inactivity_probe = 100 + manager_event = WaitOvsManagerEvent( + manager_connection, inactivity_probe=inactivity_probe, + event='update') + ovs.ovsdb.idl.notify_handler.watch_event(manager_event) helpers.enable_connection_uri(ovsdb_cfg_connection, - inactivity_probe=100) + inactivity_probe=inactivity_probe) + manager_event.wait() my_mans = ovs.ovsdb.get_manager().execute() self.assertIn(manager_connection, my_mans) From 33a318dcae04635665b73052f13fda45a2651e60 Mon Sep 17 00:00:00 2001 From: minsu Date: Thu, 22 May 2025 23:56:51 +0800 Subject: [PATCH 11/14] Update OVN_AGENT_METADATA_SB_CFG_KEY when OVN metadata agent starts If the metadata agent is not present in the local AgentCache and the report time (agent_down_time) is too long, the Neutron API will delay the Chassis_Private update. As a result, the agent will not update the corresponding record properly. Consequently, the newly added metadata agent will not appear in the output of the openstack network agent list command. This patch aims to update OVN_AGENT_METADATA_SB_CFG_KEY to nb_cfg as soon as the metadata agent starts. Closes-Bug: #2111475 Signed-off-by: Min Sun Change-Id: Ic12b6ed3cd84dca16f5081dc646717d0a5be318b (cherry picked from commit c6f5c05778076c653e47c6cb7e726bff7016b860) --- neutron/agent/ovn/metadata/agent.py | 17 +++++++++++++++++ .../agent/ovn/metadata/test_metadata_agent.py | 13 +++---------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/neutron/agent/ovn/metadata/agent.py b/neutron/agent/ovn/metadata/agent.py index b39ce8da26a..df58581d8d3 100644 --- a/neutron/agent/ovn/metadata/agent.py +++ b/neutron/agent/ovn/metadata/agent.py @@ -431,6 +431,21 @@ def _update_chassis_private_config(self): 'Chassis_Private', self.chassis, ('external_ids', external_ids)).execute(check_error=True) + def _update_metadata_sb_cfg_key(self): + """Update the Chassis_Private nb_cfg information in external_ids + + This method should be called once the Metadata Agent has been + registered (method ``register_metadata_agent`` has been called) and + the corresponding Chassis_Private register has been created/updated + and chassis private config has been updated. + """ + nb_cfg = self.sb_idl.db_get('Chassis_Private', + self.chassis, 'nb_cfg').execute() + external_ids = {ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: str(nb_cfg)} + self.sb_idl.db_set( + 'Chassis_Private', self.chassis, + ('external_ids', external_ids)).execute(check_error=True) + @_sync_lock def resync(self): """Resync the agent. @@ -439,6 +454,7 @@ def resync(self): """ self._load_config() self._update_chassis_private_config() + self._update_metadata_sb_cfg_key() self.sync() def start(self): @@ -474,6 +490,7 @@ def start(self): # Register the agent with its corresponding Chassis self.register_metadata_agent() self._update_chassis_private_config() + self._update_metadata_sb_cfg_key() self._proxy.wait() diff --git a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py index 592df2f4610..a39570be8cd 100644 --- a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py +++ b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py @@ -120,6 +120,9 @@ def _start_metadata_agent(self): check_error=True) self.assertEqual(external_ids[ovn_const.OVN_AGENT_OVN_BRIDGE], self.OVN_BRIDGE) + self.assertEqual( + external_ids[ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY], + '0') # Metadata agent will open connections to OVS and SB databases. # Close connections to them when the test ends, @@ -129,16 +132,6 @@ def _start_metadata_agent(self): return agt def test_metadata_agent_healthcheck(self): - chassis_row = self.sb_api.db_find( - AGENT_CHASSIS_TABLE, - ('name', '=', self.chassis_name)).execute( - check_error=True)[0] - - # Assert that, prior to creating a resource the metadata agent - # didn't populate the external_ids from the Chassis - self.assertNotIn(ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, - chassis_row['external_ids']) - # Let's list the agents to force the nb_cfg to be bumped on NB # db, which will automatically increment the nb_cfg counter on # NB_Global and make ovn-controller copy it over to SB_Global. Upon From 7c5e178ab2366e4e796af183bf449897819e96da Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 8 Dec 2025 18:50:12 -0500 Subject: [PATCH 12/14] Check if network is external in auto-allocate code When using OSC to change the auto-allocate default value of a network (is_default), the 'router:external' value is not supplied. For example the request will just have: {'is_default': True} or {'is_default': False} This causes the call to fast-exit, even when the change to the network would/should have succeeded. We should instead also check if there is an existing network, and verify the 'router:external' value of that before fast-exiting. Broken in [0] in 2025.1. [0] https://review.opendev.org/c/openstack/neutron/+/933132 Closes-bug: #2134387 Change-Id: I0f95712be44426fb5184702c1969d7fe4bf4147e Signed-off-by: Brian Haley (cherry picked from commit 4d196d1cd12182082e51f574626fd3002ac4c6b8) --- neutron/services/auto_allocate/db.py | 6 +- .../unit/services/auto_allocate/test_db.py | 107 +++++++++++++++++- 2 files changed, 111 insertions(+), 2 deletions(-) diff --git a/neutron/services/auto_allocate/db.py b/neutron/services/auto_allocate/db.py index 21d3ec824db..5ca657d6ae1 100644 --- a/neutron/services/auto_allocate/db.py +++ b/neutron/services/auto_allocate/db.py @@ -54,7 +54,11 @@ def _ensure_external_network_default_value_callback( def _do_ensure_external_network_default_value_callback( context, request, orig, network): is_default = request.get(api_const.IS_DEFAULT) - is_external = request.get(external_net_apidef.EXTERNAL) + # the update request might not have external set to True, so + # verify by looking at the network + req_external = request.get(external_net_apidef.EXTERNAL) + net_external = network[external_net_apidef.EXTERNAL] + is_external = req_external is not None or net_external if is_default is None or not is_external: return if is_default: diff --git a/neutron/tests/unit/services/auto_allocate/test_db.py b/neutron/tests/unit/services/auto_allocate/test_db.py index 7eb505a41c0..e3c2c77021a 100644 --- a/neutron/tests/unit/services/auto_allocate/test_db.py +++ b/neutron/tests/unit/services/auto_allocate/test_db.py @@ -107,7 +107,7 @@ def test_ensure_external_network_default_value_no_default(self): "NETWORK", "precommit_update", "test_plugin", payload=events.DBEventPayload( self.ctx, request_body=kwargs['request'], - states=(kwargs['network'],))) + states=({}, kwargs['network']))) get_external_nets.assert_not_called() get_external_net.assert_not_called() network_mock.update.assert_not_called() @@ -190,6 +190,111 @@ def test_ensure_external_network_default_value_default_existed(self): get_external_net.assert_not_called() network_mock.update.assert_not_called() + def test_ensure_external_network_default_value_update(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: True, + }, + "original_network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: True, + } + } + network_mock = mock.MagicMock(network_id=network_id, is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=(kwargs['original_network'], kwargs['network']))) + get_external_nets.assert_called_once_with( + self.ctx, _pager=mock.ANY, is_default=True) + get_external_net.assert_called_once_with( + self.ctx, network_id=network_id) + network_mock.update.assert_called_once_with() + + def test_ensure_external_network_default_value_internal_update(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + }, + "original_network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + } + } + network_mock = mock.MagicMock(network_id='fake_id', is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=(kwargs['original_network'], kwargs['network']))) + get_external_nets.assert_not_called() + get_external_net.assert_not_called() + network_mock.update.assert_not_called() + + def test_ensure_external_network_default_value_internal_create(self): + network_id = uuidutils.generate_uuid() + kwargs = { + "context": self.ctx, + "request": { + "id": network_id, + api_const.IS_DEFAULT: True, + }, + "network": { + "id": network_id, + api_const.IS_DEFAULT: False, + external_net_apidef.EXTERNAL: False, + }, + } + network_mock = mock.MagicMock(network_id='fake_id', is_default=False) + with mock.patch( + 'neutron.objects.network.ExternalNetwork.get_objects', + return_value=[network_mock] + ) as get_external_nets, mock.patch( + 'neutron.objects.network.ExternalNetwork.get_object', + return_value=network_mock + ) as get_external_net: + db._ensure_external_network_default_value_callback( + "NETWORK", "precommit_update", "test_plugin", + payload=events.DBEventPayload( + self.ctx, request_body=kwargs['request'], + states=({}, kwargs['network']))) + get_external_nets.assert_not_called() + get_external_net.assert_not_called() + network_mock.update.assert_not_called() + def test__provision_external_connectivity_expected_cleanup(self): """Test that the right resources are cleaned up.""" subnets = [ From 7b7d38baee5ce452fe581698d7271124e6d42357 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 10 Dec 2025 18:27:25 -0500 Subject: [PATCH 13/14] Search for active port binding when updating LSP host info When updating LSP host info for an UP port, the code always uses the first port binding in the DB entry (db_port.port_bindings[0].host). Since there could be multiple port bindings present, search for the active one when determining which host value to use. Closes-bug: #2134504 Change-Id: Ia07610a17afb55b802290c8910a17dace7794c4f Signed-off-by: Brian Haley (cherry picked from commit 137949ee0781e138de8955c74cefb9bea612ce13) --- .../ovn/mech_driver/ovsdb/ovn_client.py | 24 ++++++--- .../ovn/mech_driver/ovsdb/test_ovn_client.py | 49 ++++++++++++------- 2 files changed, 50 insertions(+), 23 deletions(-) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index f62f91ee95a..45f4c4a0493 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -267,7 +267,7 @@ def _get_port_dhcp_options(self, port, ip_version): wait=tenacity.wait_random(min=2, max=3), stop=tenacity.stop_after_attempt(3), reraise=True) - def _wait_for_port_bindings_host(self, context, port_id): + def _wait_for_active_port_bindings_host(self, context, port_id): db_port = ml2_db.get_port(context, port_id) # This is already checked previously but, just to stay on # the safe side in case the port is deleted mid-operation @@ -280,11 +280,18 @@ def _wait_for_port_bindings_host(self, context, port_id): _('No port bindings information found for ' 'port %s') % port_id) - if not db_port.port_bindings[0].host: + active_binding = p_utils.get_port_binding_by_status_and_host( + db_port.port_bindings, const.ACTIVE) + if not active_binding: + raise RuntimeError( + _('No active port bindings information found for ' + 'port %s') % port_id) + + if not active_binding.host: raise RuntimeError( _('No hosting information found for port %s') % port_id) - return db_port + return active_binding def update_lsp_host_info(self, context, db_port, up=True): """Update the binding hosting information for the LSP. @@ -313,18 +320,23 @@ def update_lsp_host_info(self, context, db_port, up=True): if not db_port.port_bindings: return - if not db_port.port_bindings[0].host: + # There could be more than one port binding present, we need + # to find the active one + active_binding = p_utils.get_port_binding_by_status_and_host( + db_port.port_bindings, const.ACTIVE) + + if not active_binding or not active_binding.host: # NOTE(lucasgomes): There might be a sync issue between # the moment that this port was fetched from the database # and the hosting information being set, retry a few times try: - db_port = self._wait_for_port_bindings_host( + active_binding = self._wait_for_active_port_bindings_host( context, db_port.id) except RuntimeError as e: LOG.warning(e) return - host = db_port.port_bindings[0].host + host = active_binding.host ext_ids = ('external_ids', {ovn_const.OVN_HOST_ID_EXT_ID_KEY: host}) cmd.append( diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py index 2e94d96ee44..c03c8ebf892 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py @@ -99,9 +99,13 @@ def setUp(self): super().setUp() self.get_plugin = mock.patch( 'neutron_lib.plugins.directory.get_plugin').start() + self.get_pb_bsah = mock.patch( + 'neutron_lib.plugins.utils.' + 'get_port_binding_by_status_and_host').start() # Disable tenacity wait for UT - self.ovn_client._wait_for_port_bindings_host.retry.wait = wait_none() + self.ovn_client._wait_for_active_port_bindings_host.retry.wait = ( + wait_none()) def test__add_router_ext_gw_default_route(self): plugin = mock.MagicMock() @@ -246,8 +250,9 @@ def test_update_lsp_host_info_up(self): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' - db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + port_binding = mock.Mock(host=host_id) + db_port = mock.Mock(id=port_id, port_bindings=[port_binding]) + self.get_pb_bsah.return_value = port_binding self.ovn_client.update_lsp_host_info(context, db_port) @@ -259,14 +264,16 @@ def test_update_lsp_host_info_up_retry(self): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' + port_binding = mock.Mock(host=host_id) + port_binding_no_host = mock.Mock(host="") db_port_no_host = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host="")]) - db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + id=port_id, port_bindings=[port_binding_no_host]) + self.get_pb_bsah.return_value = None with mock.patch.object( - self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: - mock_wait.return_value = db_port + self.ovn_client, + '_wait_for_active_port_bindings_host') as mock_wait: + mock_wait.return_value = port_binding self.ovn_client.update_lsp_host_info(context, db_port_no_host) # Assert _wait_for_port_bindings_host was called @@ -282,9 +289,11 @@ def test_update_lsp_host_info_up_retry_fail(self): port_id = 'fake-port-id' db_port_no_host = mock.Mock( id=port_id, port_bindings=[mock.Mock(host="")]) + self.get_pb_bsah.return_value = None with mock.patch.object( - self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: + self.ovn_client, + '_wait_for_active_port_bindings_host') as mock_wait: mock_wait.side_effect = RuntimeError("boom") self.ovn_client.update_lsp_host_info(context, db_port_no_host) @@ -316,39 +325,45 @@ def test_update_lsp_host_info_trunk_subport(self): self.nb_idl.db_set.assert_not_called() @mock.patch.object(ml2_db, 'get_port') - def test__wait_for_port_bindings_host(self, mock_get_port): + def test__wait_for_active_port_bindings_host(self, mock_get_port): context = mock.MagicMock() host_id = 'fake-binding-host-id' port_id = 'fake-port-id' + port_binding = mock.Mock(host=host_id) + port_binding_no_host = mock.Mock(host="") db_port_no_host = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host="")]) + id=port_id, port_bindings=[port_binding_no_host]) db_port = mock.Mock( - id=port_id, port_bindings=[mock.Mock(host=host_id)]) + id=port_id, port_bindings=[port_binding]) + # no active binding, no binding with host, binding with host + self.get_pb_bsah.side_effect = (None, port_binding_no_host, + port_binding) - mock_get_port.side_effect = (db_port_no_host, db_port) + mock_get_port.side_effect = (db_port_no_host, db_port_no_host, db_port) - ret = self.ovn_client._wait_for_port_bindings_host( + ret = self.ovn_client._wait_for_active_port_bindings_host( context, port_id) - self.assertEqual(ret, db_port) + self.assertEqual(ret, port_binding) expected_calls = [mock.call(context, port_id), mock.call(context, port_id)] mock_get_port.assert_has_calls(expected_calls) @mock.patch.object(ml2_db, 'get_port') - def test__wait_for_port_bindings_host_fail(self, mock_get_port): + def test__wait_for_active_port_bindings_host_fail(self, mock_get_port): context = mock.MagicMock() port_id = 'fake-port-id' db_port_no_pb = mock.Mock(id=port_id, port_bindings=[]) db_port_no_host = mock.Mock( id=port_id, port_bindings=[mock.Mock(host="")]) + self.get_pb_bsah.return_value = None mock_get_port.side_effect = ( db_port_no_pb, db_port_no_host, db_port_no_host) self.assertRaises( - RuntimeError, self.ovn_client._wait_for_port_bindings_host, + RuntimeError, self.ovn_client._wait_for_active_port_bindings_host, context, port_id) expected_calls = [mock.call(context, port_id), From c75e972a4db294b5d275761d10eef5ea1906c757 Mon Sep 17 00:00:00 2001 From: Slawek Kaplonski Date: Fri, 28 Nov 2025 15:59:04 +0100 Subject: [PATCH 14/14] [Docs] Add doc about Virtual IP addresses in Neutron ML2/OVN This patch adds new documentation about usage of Virtual IPs and their implementation and limitations in the ML2/OVN backend. Closes-bug: #2132977 Change-Id: Idff70d56e29a3b7206ba8f994bb7b9eee8a9029d Signed-off-by: Slawek Kaplonski (cherry picked from commit 28a1483b70aeeb0b2b2c1eee95fe2649efc0f0e1) --- doc/source/ovn/index.rst | 1 + doc/source/ovn/virtual_ips.rst | 193 +++++++++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100644 doc/source/ovn/virtual_ips.rst diff --git a/doc/source/ovn/index.rst b/doc/source/ovn/index.rst index 9a4eeedc620..b2973e5e518 100644 --- a/doc/source/ovn/index.rst +++ b/doc/source/ovn/index.rst @@ -13,3 +13,4 @@ OVN Driver ml2ovn_trace.rst faq/index.rst ovn_agent.rst + virtual_ips.rst diff --git a/doc/source/ovn/virtual_ips.rst b/doc/source/ovn/virtual_ips.rst new file mode 100644 index 00000000000..f7255205258 --- /dev/null +++ b/doc/source/ovn/virtual_ips.rst @@ -0,0 +1,193 @@ +.. _ovn_virtual_ips: + +Virtual IPs +=========== + +It is common practice to create an unbound port in a Neutron network to +allocate (reserve) an IP address that will be used as a Virtual IP (VIP) +by other ports in the same network. Such IP addresses are then added as +``allowed_address_pairs`` to the ports used by Virtual Machines. + +Applications, such as keepalived, running inside these Virtual Machines can +then configure the VIP on one of the VMs and move it between VMs dynamically. + +Implementation in OVN +~~~~~~~~~~~~~~~~~~~~~ + +For Virtual IP addresses to work properly in the OVN backend, Neutron needs to +mark the ``Logical Switch Port`` corresponding to the port with the Virtual IP +as ``virtual``. Neutron does this for ports that are unbound and have a fixed +IP address that is also configured in the ``allowed_address_pairs`` of any +other port in the same network. + +Limitations +~~~~~~~~~~~ + +* In the case when a Virtual IP address is going to be used in Virtual Machines + and configured as ``allowed_address_pairs``, it is necessary to also create + such an unbound port in Neutron in order to: + + * reserve that IP address for that use case so that it will not be later + allocated for another port in the same network as the fixed IP, + * let OVN know that this IP and ``Logical Switch Port`` is ``virtual`` so + that OVN can configure it accordingly. + +* A port created in Neutron in order to allocate virtual IP address has to be + ``unbound``, it can not be attached directly to any Virtual Machine. + +* Because of how Virtual IP addresses are implemented in the ML2/OVN backend, + the Virtual IP address must be set in the ``allowed_address_pairs`` of the VM + port as a single IP address (/32 for IPv4 or /128 for IPv6). + Setting a larger CIDR as ``allowed_address_pairs``, even if it contains + the Virtual IP address, will not mark the ``Logical Switch Port`` + corresponding to the port with that IP address as ``virtual``. + +* Another limitation is that setting an IP address that belongs to the + distributed metadata port in the same network as ``allowed_address_pairs`` is + not allowed. + + +Usage example +~~~~~~~~~~~~~ + +To use a Virtual IP address in Neutron, you need to create an unbound port in a +Neutron network and add the Virtual IP address to the ``allowed_address_pairs`` +of the port(s) that belong to the Virtual Machine(s). + +* Create an unbound port in the Neutron network to allocate the Virtual IP + address: + + .. code-block:: console + + $ openstack port create --network private virtual-ip-port + +-------------------------+-----------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------+-----------------------------------------------------------------------------------------------------+ + | admin_state_up | UP | + | allowed_address_pairs | | + | binding_host_id | | + | binding_profile | | + | binding_vif_details | | + | binding_vif_type | unbound | + | binding_vnic_type | normal | + | created_at | 2025-11-28T14:39:06Z | + | data_plane_status | None | + | description | | + | device_id | | + | device_owner | | + | device_profile | None | + | dns_assignment | | + | dns_domain | None | + | dns_name | None | + | extra_dhcp_opts | | + | fixed_ips | ip_address='10.0.0.20', subnet_id='866305cc-26db-48d7-8471-cbd267321b8b' | + | | ip_address='fde7:7c8e:8883:0:f816:3eff:feb6:559f', subnet_id='b8b0a413-6229-4c64-9d6e-65906a33b056' | + | hardware_offload_type | None | + | hints | | + | id | 3f078d1b-2f6e-41d8-99d7-70bc801f3979 | + | ip_allocation | None | + | mac_address | fa:16:3e:b6:55:9f | + | name | virtual-ip-port | + | network_id | c8e5e81c-d318-43f6-a45e-056f22a518e6 | + | numa_affinity_policy | None | + | port_security_enabled | True | + | project_id | b7907ac4c9794e5787a8d6bac0e5b80b | + | propagate_uplink_status | None | + | resource_request | None | + | revision_number | 1 | + | qos_network_policy_id | None | + | qos_policy_id | None | + | security_group_ids | 876d4c44-e2fd-48fc-bbd4-4bd295676a0e | + | status | DOWN | + | tags | | + | trunk_details | None | + | trusted | None | + | updated_at | 2025-11-28T14:39:06Z | + +-------------------------+-----------------------------------------------------------------------------------------------------+ + +* Create a Virtual Machine + + .. code-block:: console + + openstack server create --flavor m1.micro --image cirros-0.5.1-x86_64-disk --network private virtual-machine + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | None | + | OS-EXT-SRV-ATTR:host | None | + | OS-EXT-SRV-ATTR:hostname | virtual-machine | + | OS-EXT-SRV-ATTR:hypervisor_hostname | None | + | OS-EXT-SRV-ATTR:instance_name | None | + | OS-EXT-SRV-ATTR:kernel_id | None | + | OS-EXT-SRV-ATTR:launch_index | None | + | OS-EXT-SRV-ATTR:ramdisk_id | None | + | OS-EXT-SRV-ATTR:reservation_id | None | + | OS-EXT-SRV-ATTR:root_device_name | None | + | OS-EXT-SRV-ATTR:user_data | None | + | OS-EXT-STS:power_state | N/A | + | OS-EXT-STS:task_state | scheduling | + | OS-EXT-STS:vm_state | building | + | OS-SRV-USG:launched_at | None | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | None | + | accessIPv6 | None | + | addresses | N/A | + | adminPass | QNkLbpeZ72LF | + | config_drive | None | + | created | 2025-11-28T14:41:22Z | + | description | None | + | flavor | description=, disk='1', ephemeral='0', extra_specs.hw_rng:allowed='True', id='m1.micro', is_disabled=, is_public='True', location=, name='m1.micro', | + | | original_name='m1.micro', ram='256', rxtx_factor=, swap='0', vcpus='1' | + | hostId | None | + | host_status | None | + | id | d2573702-b79c-46a3-bd7a-d8aa50341082 | + | image | cirros-0.5.1-x86_64-disk (7b920c82-0879-4526-9ee8-7e3b77e7fe28) | + | key_name | None | + | locked | None | + | locked_reason | None | + | name | virtual-machine | + | pinned_availability_zone | None | + | progress | None | + | project_id | b7907ac4c9794e5787a8d6bac0e5b80b | + | properties | None | + | scheduler_hints | | + | security_groups | name='default' | + | server_groups | None | + | status | BUILD | + | tags | | + | trusted_image_certificates | None | + | updated | 2025-11-28T14:41:22Z | + | user_id | d46c7955bea644c9a45e5d95bb462e29 | + | volumes_attached | | + +-------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + + +* List ports of the Virtual Machine + + .. code-block:: console + + $ openstack port list --device-id d2573702-b79c-46a3-bd7a-d8aa50341082 + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + | ID | Name | MAC Address | Fixed IP Addresses | Status | + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + | 692c7f41-0497-4d4c-9766-3d71ffd229df | | fa:16:3e:b6:44:9a | ip_address='10.0.0.30', subnet_id='866305cc-26db-48d7-8471-cbd267321b8b' | ACTIVE | + | | | | ip_address='fde7:7c8e:8883:0:f816:3eff:feb6:449a', subnet_id='b8b0a413-6229-4c64-9d6e-65906a33b056' | | + +--------------------------------------+------+-------------------+-----------------------------------------------------------------------------------------------------+--------+ + +* Set the Virtual IP address as an allowed address pair to the port of the + Virtual Machine + + .. code-block:: console + + $ openstack port set --allowed-address ip-address=10.0.0.20 692c7f41-0497-4d4c-9766-3d71ffd229df + + +After these steps, the Virtual IP address will be available on the port of the +Virtual Machine. + +If a CIDR such as ``10.0.0.0/24`` is set in the ``allowed_address_pairs`` +instead of the IP address ``10.0.0.20``, then the ``Logical Switch Port`` +related to the port with IP address ``10.0.0.20`` would +not be marked as a Virtual IP address due to the limitations mentioned above. +