)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"2af99788a2a7001ff984490fb87a0c594209d4f2","unresolved":true,"context_lines":[{"line_number":18,"context_line":"different from any other threads in the system now."},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"To make the change more understandable the event handling logic is moved"},{"line_number":21,"context_line":"behind and abstraction that is implemented twice, once for eventlet with"},{"line_number":22,"context_line":"the existing implementation just moved around, and once for native"},{"line_number":23,"context_line":"threading with the simplified handling."},{"line_number":24,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":23,"id":"3bff2928_7f4468ff","line":21,"range":{"start_line":21,"start_character":7,"end_line":21,"end_character":10},"updated":"2026-01-19 12:58:19.000000000","message":"nit : an","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1bf49fa30c377b82aaa00d7efaa08d4adf9a8eed","unresolved":false,"context_lines":[{"line_number":18,"context_line":"different from any other threads in the system now."},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"To make the change more understandable the event handling logic is moved"},{"line_number":21,"context_line":"behind and abstraction that is implemented twice, once for eventlet with"},{"line_number":22,"context_line":"the existing implementation just moved around, and once for native"},{"line_number":23,"context_line":"threading with the simplified handling."},{"line_number":24,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":23,"id":"09262af3_01895e07","line":21,"range":{"start_line":21,"start_character":7,"end_line":21,"end_character":10},"in_reply_to":"3bff2928_7f4468ff","updated":"2026-01-19 13:30:16.000000000","message":"Done","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":15,"id":"1a241898_e934c7dc","updated":"2025-12-11 11:41:52.000000000","message":"I need to review this in more detail, but this looks good from the high level review I\u0027ve done so far. Couple of nit but nothing you actually need to address","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":16,"id":"9adc94b4_92034fa2","updated":"2025-12-17 15:17:32.000000000","message":"The live migration issue is unrelated\n```\nNov 21 15:50:14.841128 npd3a8f0c576804 nova-compute[40023]: ERROR nova.virt.libvirt.driver [None req-90c70b34-2f94-44a6-83d9-9178b175f4c9 tempest-LiveMigrationTest-307564566 tempest-LiveMigrationTest-307564566-project-admin] [instance: a83f7568-8bd1-49c1-9b26-7013777b82e8] Live Migration failure: unable to connect to server at \u0027np9b7478816ddb4:49152\u0027: Connection refused: libvirt.libvirtError: unable to connect to server at \u0027np9b7478816ddb4:49152\u0027: Connection refused\n```\nThe py312-threading failure is relevant I have to look at it.","commit_id":"3606029252f4e07779329a32d0eeeb1429d724dc"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"de07f268dd69603899baecea8ec3a7a2b4d71aef","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":16,"id":"cef69e34_5ecceace","in_reply_to":"9adc94b4_92034fa2","updated":"2025-12-19 15:34:36.000000000","message":"Should passing now.","commit_id":"3606029252f4e07779329a32d0eeeb1429d724dc"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"89ffc9e7ad64129738a18166bdde9ca19cd341b2","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"6dfee064_9efbcc56","updated":"2026-01-15 15:56:00.000000000","message":"I think the Factory layer lgtm but I found a bug IMHO.","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"839272ce8e298ee5a1c86f90d3a3281244d25aba","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":22,"id":"36190e4c_884a9513","updated":"2026-01-16 11:38:54.000000000","message":"Thanks for catching that bug.","commit_id":"f5046ead6fd61a709318fa36cca437a8e5ef617b"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"2af99788a2a7001ff984490fb87a0c594209d4f2","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"31283dda_cf8a2bda","updated":"2026-01-19 12:58:19.000000000","message":"I cannot speak yet *why* we have a problem but when looking at the nova-next failure, I found that we don\u0027t get the detach event from libvirt for device ua-2e3d9045-99a6-4540-9236-a3220292c947 in https://storage.bhs.cloud.ovh.net/v1/AUTH_dcaab5e32b234d56b626f72581e3644c/zuul_opendev_logs_4df/openstack/4dfc5cd2cff140098b5ef4239b6fc09f/compute1/logs/screen-n-cpu.txt\n\nI don\u0027t want to freak out about a race condition but I\u0027m not super confident that we miss a libvirt event while we\u0027re changing the interface...\nContinuing to investigate.\n\n```\nJan 16 12:34:28.090259 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:34:28.094510 npc521841fd7244 nova-compute[42482]: DEBUG nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Found disk vdb by alias ua-2e3d9045-99a6-4540-9236-a3220292c947 {{(pid\u003d42482) _get_guest_disk_device /opt/stack/nova/nova/virt/libvirt/driver.py:2838}}\nJan 16 12:34:28.094941 npc521841fd7244 nova-compute[42482]: DEBUG nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Failed to detach device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Libvirt did not report any error but the device is still in the config. {{(pid\u003d42482) _detach_from_live_with_retry /opt/stack/nova/nova/virt/libvirt/driver.py:2637}}\nJan 16 12:34:28.095416 npc521841fd7244 nova-compute[42482]: ERROR nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Run out of retry while detaching device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Device is still attached to the guest.\nJan 16 12:34:28.096157 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.block_device [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] [instance: 21cd7b90-e282-4b71-9e70-ca58220d0cc3] Guest refused to detach volume 2e3d9045-99a6-4540-9236-a3220292c947: nova.exception.DeviceDetachFailed: Device detach failed for vdb: Run out of retry while detaching device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Device is still attached to the guest.\nJan 16 12:34:28.272567 npc521841fd7244 nova-compute[42482]: INFO nova.virt.libvirt.driver [None req-ac569fe9-d43f-4d5e-8b21-d9ae7aaeb31e tempest-ListImageFiltersTestJSON-1071332225 tempest-ListImageFiltersTestJSON-1071332225-project-member] [instance: 944cb8ea-a069-45e9-ad25-a16207865617] Beginning live snapshot process\nJan 16 12:34:28.384786 npc521841fd7244 nova-compute[42482]: DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 21 {{(pid\u003d42482) __log_wakeup /opt/stack/data/venv/lib/python3.12/site-packages/ovs/poller.py:263}}\nJan 16 12:34:28.387499 npc521841fd7244 nova-compute[42482]: DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 21 {{(pid\u003d42482) __log_wakeup /opt/stack/data/venv/lib/python3.12/site-packages/ovs/poller.py:263}}\nJan 16 12:34:28.465391 npc521841fd7244 nova-compute[42482]: DEBUG oslo_concurrency.lockutils [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Lock \"21cd7b90-e282-4b71-9e70-ca58220d0cc3\" \"released\" by \"nova.compute.manager.ComputeManager.detach_volume.\u003clocals\u003e.do_detach_volume\" :: held 201.130s {{(pid\u003d42482) inner /opt/stack/data/venv/lib/python3.12/site-packages/oslo_concurrency/lockutils.py:424}}\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Exception during message handling: nova.exception.DeviceDetachFailed: Device detach failed for vdb: Run out of retry while detaching device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Device is still attached to the guest.\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server Traceback (most recent call last):\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_messaging/rpc/server.py\", line 174, in _process_incoming\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     res \u003d self.dispatcher.dispatch(message)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_messaging/rpc/dispatcher.py\", line 309, in dispatch\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     return self._do_dispatch(endpoint, method, ctxt, args)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_messaging/rpc/dispatcher.py\", line 229, in _do_dispatch\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     result \u003d func(ctxt, **new_args)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server              ^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/exception_wrapper.py\", line 65, in wrapped\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     with excutils.save_and_reraise_exception():\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 256, in __exit__\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self.force_reraise()\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 222, in force_reraise\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     raise self.value\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/exception_wrapper.py\", line 63, in wrapped\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     return f(self, context, *args, **kw)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/utils.py\", line 1483, in decorated_function\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     return function(self, context, *args, **kwargs)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/manager.py\", line 211, in decorated_function\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     with excutils.save_and_reraise_exception():\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 256, in __exit__\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self.force_reraise()\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 222, in force_reraise\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     raise self.value\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/manager.py\", line 201, in decorated_function\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     return function(self, context, *args, **kwargs)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/manager.py\", line 8300, in detach_volume\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     do_detach_volume(context, volume_id, instance, attachment_id)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_concurrency/lockutils.py\", line 415, in inner\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     return f(*args, **kwargs)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server            ^^^^^^^^^^^^^^^^^^\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/manager.py\", line 8297, in do_detach_volume\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self._detach_volume(context, bdm, instance,\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/compute/manager.py\", line 8248, in _detach_volume\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     driver_bdm.detach(context, instance, self.volume_api, self.driver,\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/block_device.py\", line 573, in detach\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self._do_detach(context, instance, volume_api, virt_driver,\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/block_device.py\", line 494, in _do_detach\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self.driver_detach(context, instance, volume_api, virt_driver)\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/block_device.py\", line 422, in driver_detach\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     with excutils.save_and_reraise_exception():\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 256, in __exit__\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self.force_reraise()\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/data/venv/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 222, in force_reraise\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     raise self.value\nJan 16 12:34:29.492830 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/block_device.py\", line 413, in driver_detach\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     virt_driver.detach_volume(context, connection_info, instance, mp,\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/libvirt/driver.py\", line 2866, in detach_volume\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self._detach_with_retry(\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/libvirt/driver.py\", line 2569, in _detach_with_retry\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     self._detach_from_live_with_retry(\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server   File \"/opt/stack/nova/nova/virt/libvirt/driver.py\", line 2648, in _detach_from_live_with_retry\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server     raise exception.DeviceDetachFailed(\nJan 16 12:34:29.498086 npc521841fd7244 nova-compute[42482]: ERROR oslo_messaging.rpc.server nova.exception.DeviceDetachFailed: Device detach failed for vdb: Run out of retry while detaching device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Device is still attached to the guest.\n```","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"2e4be8ea409c2186cbb4bc5eb881aa0b4e61451e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"0bbf9703_c4ea059a","updated":"2026-01-19 10:00:19.000000000","message":"recheck unrelated volume issue, will investigate it","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"900de094f21a3e72f2fcf1b4682d440724af4146","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"086c99ef_61076d28","updated":"2026-01-19 11:10:44.000000000","message":"removing my +2 for now, apparenty the zuul failure is maybe due to the fact that we have a new timeout. Looking it at more after the lunch.","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9ba4a7043e359cdf3a64c14e46312a886e8ff4e5","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":23,"id":"7b900ed6_1fce45a2","in_reply_to":"1fb29a27_6c8287b4","updated":"2026-01-19 13:47:11.000000000","message":"```\n$ grep req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 screen-n-cpu.txt | grep WARNING\nJan 16 12:31:07.974010 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Failed to detach device vdb from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the persistent domain config. Libvirt did not report any error but the device is still in the config.\nJan 16 12:31:32.985152 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:31:57.998493 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:32:23.015811 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:32:48.033955 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:33:13.046536 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:33:38.061078 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:34:03.074381 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:34:28.090259 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.libvirt.driver [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] Waiting for libvirt event about the detach of device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 is timed out.\nJan 16 12:34:28.096157 npc521841fd7244 nova-compute[42482]: WARNING nova.virt.block_device [None req-bbbc91d0-945e-4cf9-830e-076c2b99a9f4 tempest-LiveMigrationTest-1598418232 tempest-LiveMigrationTest-1598418232-project-member] [instance: 21cd7b90-e282-4b71-9e70-ca58220d0cc3] Guest refused to detach volume 2e3d9045-99a6-4540-9236-a3220292c947: nova.exception.DeviceDetachFailed: Device detach failed for vdb: Run out of retry while detaching device vdb with device alias ua-2e3d9045-99a6-4540-9236-a3220292c947 from instance 21cd7b90-e282-4b71-9e70-ca58220d0cc3 from the live domain config. Device is still attached to the guest.\n```\n\nSo nova did the normal retries. \n\nAlso libvirt event handling works for this instance as:\n```\nJan 16 12:30:53.468957 npc521841fd7244 nova-compute[42482]: DEBUG nova.virt.driver [None req-898d2de0-b95e-4397-bad5-4e019d9a04d2 None None] Emitting event \u003cLifecycleEvent: 1768566653.4678788, 21cd7b90-e282-4b71-9e70-ca58220d0cc3 \u003d\u003e Started\u003e {{(pid\u003d42482) emit_event /opt/stack/nova/nova/virt/driver.py:1873}}\n```","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"e7dc81c390217b297308c03266a8848ba31648e7","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":23,"id":"e308a47f_a821ee1b","in_reply_to":"31283dda_cf8a2bda","updated":"2026-01-19 13:31:04.000000000","message":"This tends to be guest issue where the guest kernel does not release the device","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7bd136d69820554fdc6ce0e1c636b8b57b35f81a","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":23,"id":"06a461e6_f90c8c76","in_reply_to":"7b900ed6_1fce45a2","updated":"2026-01-19 13:49:03.000000000","message":"From the tempest log\n\n```\n        Body: b\u0027{\"output\": \"[  108.491585] ------------[ cut here ]------------\\\\n[  108.511829] WARNING: CPU: 0 PID: 0 at kernel/time/hrtimer.c:1714 __hrtimer_run_queues+0x149/0x230\\\\n[  108.515358] Modules linked in: ahci libahci sctp libcrc32c ip6_udp_tunnel udp_tunnel ip_tables x_tables 8021q garp stp llc mrp nls_utf8 nls_iso8859_1 nls_ascii isofs hid_generic usbhid hid virtiofs virtio_rng virtio_gpu virtio_dma_buf drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec rc_core drm virtio_scsi virtio_net net_failover failover virtio_input virtio_blk qemu_fw_cfg 9pnet_virtio 9pnet pcnet32 8139cp mii ne2k_pci 8390 igbvf igb i2c_algo_bit dca e1000e e1000\\\\n[  108.521938] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.15.0-117-generic #127-Ubuntu\\\\n[  108.523178] Hardware name: OpenStack Foundation OpenStack Nova, BIOS 1.16.3-debian-1.16.3-2 04/01/2014\\\\n[  108.524458] RIP: 0010:__hrtimer_run_queues+0x149/0x230\\\\n[  108.525611] Code: 46 38 01 0f 85 68 ff ff ff 31 d2 4c 89 fe 4c 89 f7 e8 ab f7 ff ff 41 83 47 10 01 41 83 47 10 01 4c 39 73 58 0f 84 5b ff ff ff \u003c0f\u003e 0b e9 54 ff ff ff 65 8b 05 49 c5 69 5d 89 c0 48 0f a3 05 af 07\\\\n[  108.528083] RSP: 0018:ffffb447c0003f18 EFLAGS: 00000003\\\\n[  108.528795] RAX: 0000000000000001 RBX: ffff958c4b022cc0 RCX: ffff958c4b023490\\\\n[  108.529674] RDX: 0000000000000001 RSI: ffff958c4b022d20 RDI: ffff958c4b023220\\\\n[  108.530494] RBP: ffffb447c0003f70 R08: ffff958c412c9910 R09: ffff958c4b032200\\\\n[  108.531357] R10: 0000000000000000 R11: 0000000088980e80 R12: ffffffffa29915e0\\\\n[  108.532183] R13: ffff958c4b022cc0 R14: ffff958c4b023220 R15: ffff958c4b022d00\\\\n[  108.533350] FS:  0000000000000000(0000) GS:ffff958c4b000000(0000) knlGS:0000000000000000\\\\n[  108.546660] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033\\\\n[  108.557827] CR2: 00007f2dccab6de0 CR3: 0000000001068000 CR4: 00000000000006f0\\\\n[  108.567214] Call Trace:\\\\n[  108.577086]  \u003cIRQ\u003e\\\\n[  108.587452]  ? show_trace_log_lvl+0x1d6/0x2ea\\\\n[  108.596112]  ? show_trace_log_lvl+0x1d6/0x2ea\\\\n[  108.604673]  ? hrtimer_interrupt+0x109/0x220\\\\n[  108.612887]  ? show_regs.part.0+0x23/0x29\\\\n[  108.621125]  ? show_regs.cold+0x8/0xd\\\\n[  108.631207]  ? __hrtimer_run_queues+0x149/0x230\\\\n[  108.644432]  ? __warn+0x8c/0x100\\\\n[  108.653433]  ? __hrtimer_run_queues+0x149/0x230\\\\n[  108.661497]  ? report_bug+0xa4/0xd0\\\\n[  108.671364]  ? handle_bug+0x39/0x90\\\\n[  108.679579]  ? exc_invalid_op+0x19/0x70\\\\n[  108.687260]  ? asm_exc_invalid_op+0x1b/0x20\\\\n[  108.695209]  ? tick_sched_do_timer+0xa0/0xa0\\\\n[  108.702284]  ? __hrtimer_run_queues+0x149/0x230\\\\n[  108.709362]  ? __hrtimer_run_queues+0x135/0x230\\\\n[  108.717127]  ? hrtimer_update_next_event+0x91/0xb0\\\\n[  108.725417]  hrtimer_interrupt+0x109/0x220\\\\n[  108.732697]  ? tick_do_update_jiffies64+0x9b/0xf0\\\\n[  108.740098]  __sysvec_apic_timer_interrupt+0x61/0xe0\\\\n[  108.748355]  sysvec_apic_timer_interrupt+0x7b/0x90\\\\n[  108.755416]  \u003c/IRQ\u003e\\\\n[  108.762668]  \u003cTASK\u003e\\\\n[  108.769712]  asm_sysvec_apic_timer_interrupt+0x1b/0x20\\\\n[  108.776870] RIP: 0010:native_safe_halt+0xb/0x10\\\\n[  108.783285] Code: 2a ff 5b 41 5c 41 5d 5d c3 cc cc cc cc 4c 89 ee 48 c7 c7 40 4a 65 a4 e8 33 50 8c ff eb ca cc eb 07 0f 00 2d 99 29 43 00 fb f4 \u003cc3\u003e cc cc cc cc eb 07 0f 00 2d 89 29 43 00 f4 c3 cc cc cc cc cc 0f\\\\n[  108.799570] RSP: 0018:ffffffffa4603e08 EFLAGS: 00000202\\\\n[  108.807449] RAX: ffffffffa35d9880 RBX: ffffffffa461b440 RCX: 0000000000000000\\\\n[  108.815106] RDX: 0000000000001775 RSI: 0000000000000083 RDI: 0000000000001776\\\\n[  108.822980] RBP: ffffffffa4603e10 R08: ffff958c4b022d20 R09: 0000000000000000\\\\n[  108.830737] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000\\\\n[  108.839986] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000\\\\n[  108.848769]  ? __sched_text_end+0xa/0xa\\\\n[  108.855732]  ? default_idle+0xe/0x20\\\\n[  108.862000]  arch_cpu_idle+0x15/0x20\\\\n[  108.867843]  default_idle_call+0x3e/0xd0\\\\n[  108.873977]  cpuidle_idle_call+0x179/0x1e0\\\\n[  108.880155]  do_idle+0x83/0xf0\\\\n[  108.885696]  cpu_startup_entry+0x20/0x30\\\\n[  108.891044]  rest_init+0xd3/0x10\n2026-01-16 12:34:22,967 101786 DEBUG    [tempest.common.waiters] Console output for 21cd7b90-e282-4b71-9e70-ca58220d0cc3\nbody\u003d\n[  108.491585] ------------[ cut here ]------------\n[  108.511829] WARNING: CPU: 0 PID: 0 at kernel/time/hrtimer.c:1714 __hrtimer_run_queues+0x149/0x230\n[  108.515358] Modules linked in: ahci libahci sctp libcrc32c ip6_udp_tunnel udp_tunnel ip_tables x_tables 8021q garp stp llc mrp nls_utf8 nls_iso8859_1 nls_ascii isofs hid_generic usbhid hid virtiofs virtio_rng virtio_gpu virtio_dma_buf drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec rc_core drm virtio_scsi virtio_net net_failover failover virtio_input virtio_blk qemu_fw_cfg 9pnet_virtio 9pnet pcnet32 8139cp mii ne2k_pci 8390 igbvf igb i2c_algo_bit dca e1000e e1000\n[  108.521938] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.15.0-117-generic #127-Ubuntu\n[  108.523178] Hardware name: OpenStack Foundation OpenStack Nova, BIOS 1.16.3-debian-1.16.3-2 04/01/2014\n[  108.524458] RIP: 0010:__hrtimer_run_queues+0x149/0x230\n[  108.525611] Code: 46 38 01 0f 85 68 ff ff ff 31 d2 4c 89 fe 4c 89 f7 e8 ab f7 ff ff 41 83 47 10 01 41 83 47 10 01 4c 39 73 58 0f 84 5b ff ff ff \u003c0f\u003e 0b e9 54 ff ff ff 65 8b 05 49 c5 69 5d 89 c0 48 0f a3 05 af 07\n[  108.528083] RSP: 0018:ffffb447c0003f18 EFLAGS: 00000003\n[  108.528795] RAX: 0000000000000001 RBX: ffff958c4b022cc0 RCX: ffff958c4b023490\n[  108.529674] RDX: 0000000000000001 RSI: ffff958c4b022d20 RDI: ffff958c4b023220\n[  108.530494] RBP: ffffb447c0003f70 R08: ffff958c412c9910 R09: ffff958c4b032200\n[  108.531357] R10: 0000000000000000 R11: 0000000088980e80 R12: ffffffffa29915e0\n[  108.532183] R13: ffff958c4b022cc0 R14: ffff958c4b023220 R15: ffff958c4b022d00\n[  108.533350] FS:  0000000000000000(0000) GS:ffff958c4b000000(0000) knlGS:0000000000000000\n[  108.546660] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033\n[  108.557827] CR2: 00007f2dccab6de0 CR3: 0000000001068000 CR4: 00000000000006f0\n[  108.567214] Call Trace:\n[  108.577086]  \u003cIRQ\u003e\n[  108.587452]  ? show_trace_log_lvl+0x1d6/0x2ea\n[  108.596112]  ? show_trace_log_lvl+0x1d6/0x2ea\n[  108.604673]  ? hrtimer_interrupt+0x109/0x220\n[  108.612887]  ? show_regs.part.0+0x23/0x29\n[  108.621125]  ? show_regs.cold+0x8/0xd\n[  108.631207]  ? __hrtimer_run_queues+0x149/0x230\n[  108.644432]  ? __warn+0x8c/0x100\n[  108.653433]  ? __hrtimer_run_queues+0x149/0x230\n[  108.661497]  ? report_bug+0xa4/0xd0\n[  108.671364]  ? handle_bug+0x39/0x90\n[  108.679579]  ? exc_invalid_op+0x19/0x70\n[  108.687260]  ? asm_exc_invalid_op+0x1b/0x20\n[  108.695209]  ? tick_sched_do_timer+0xa0/0xa0\n[  108.702284]  ? __hrtimer_run_queues+0x149/0x230\n[  108.709362]  ? __hrtimer_run_queues+0x135/0x230\n[  108.717127]  ? hrtimer_update_next_event+0x91/0xb0\n[  108.725417]  hrtimer_interrupt+0x109/0x220\n[  108.732697]  ? tick_do_update_jiffies64+0x9b/0xf0\n[  108.740098]  __sysvec_apic_timer_interrupt+0x61/0xe0\n[  108.748355]  sysvec_apic_timer_interrupt+0x7b/0x90\n[  108.755416]  \u003c/IRQ\u003e\n[  108.762668]  \u003cTASK\u003e\n[  108.769712]  asm_sysvec_apic_timer_interrupt+0x1b/0x20\n[  108.776870] RIP: 0010:native_safe_halt+0xb/0x10\n[  108.783285] Code: 2a ff 5b 41 5c 41 5d 5d c3 cc cc cc cc 4c 89 ee 48 c7 c7 40 4a 65 a4 e8 33 50 8c ff eb ca cc eb 07 0f 00 2d 99 29 43 00 fb f4 \u003cc3\u003e cc cc cc cc eb 07 0f 00 2d 89 29 43 00 f4 c3 cc cc cc cc cc 0f\n[  108.799570] RSP: 0018:ffffffffa4603e08 EFLAGS: 00000202\n[  108.807449] RAX: ffffffffa35d9880 RBX: ffffffffa461b440 RCX: 0000000000000000\n[  108.815106] RDX: 0000000000001775 RSI: 0000000000000083 RDI: 0000000000001776\n[  108.822980] RBP: ffffffffa4603e10 R08: ffff958c4b022d20 R09: 0000000000000000\n[  108.830737] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000\n[  108.839986] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000\n[  108.848769]  ? __sched_text_end+0xa/0xa\n[  108.855732]  ? default_idle+0xe/0x20\n[  108.862000]  arch_cpu_idle+0x15/0x20\n[  108.867843]  default_idle_call+0x3e/0xd0\n[  108.873977]  cpuidle_idle_call+0x179/0x1e0\n[  108.880155]  do_idle+0x83/0xf0\n[  108.885696]  cpu_startup_entry+0x20/0x30\n[  108.891044]  rest_init+0xd3/0x100\n[  108.896186]  ? acpi_enable_subsystem+0x21d/0x229\n[  108.901442]  arch_call_rest_init+0xe/0x23\n[  108.906664]  start_kernel+0x4a9/0x4ca\n[  108.912255]  x86_64_start_reservations+0x24/0x2a\n[  108.919140]  x86_64_start_kernel+0xfb/0x106\n[  108.925085]  secondary_startup_64_no_verify+0xc2/0xcb\n[  108.930494]  \u003c/TASK\u003e\n[  108.935655] ---[ end trace 6c4aa22efe9864f8 ]---\n[  108.948692] ------------[ cut here ]------------\n[  108.954813] kernel BUG at kernel/irq_work.c:170!\n[  108.962184] invalid opcode: 0000 [#1] SMP PTI\n[  108.968077] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G        W         5.15.0-117-generic #127-Ubuntu\n[  108.974474] Hardware name: OpenStack Foundation OpenStack Nova, BIOS 1.16.3-debian-1.16.3-2 04/01/2014\n[  108.980351] RIP: 0010:irq_work_run_list+0x40/0x50\n[  108.986111] Code: 48 8b 07 48 85 c0 74 18 48 87 1f 48 85 db 74 10 48 89 df 48 8b 1b e8 7f ff ff ff 48 85 db 75 f0 48 8b 5d f8 c9 c3 cc cc cc cc \u003c0f\u003e 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 55 48 c7 c7 48 cb\n[  108.998571] RSP: 0018:ffffb447c0003e88 EFLAGS: 00000206\n[  109.005334] RAX: 0000000000000200 RBX: ffff958c4b0207c0 RCX: 0000000000000001\n[  109.011694] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff958c4b02cb40\n[  109.017714] RBP: ffffb447c0003e90 R08: ffff958c4b022d20 R09: ffff958c4b032200\n[  109.024100] R10: 0000000000000000 R11: 0000000088980e80 R12: 0000000000000000\n[  109.030350] R13: 00000019610a48d0 R14: ffffffffa4603d58 R15: ffff958c4b022d00\n[  109.036634] FS:  0000000000000000(0000) GS:ffff958c4b000000(0000) knlGS:0000000000000000\n[  109.042809] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033\n[  109.049065] CR2: 00007f2dccab6de0 CR3: 0000000001068000 CR4: 00000000000006f0\n[  109.055587] Call Trace:\n[  109.063125]  \u003cIRQ\u003e\n[  109.072223]  ? show_trace_log_lvl+0x1d6/0x2ea\n[  109.080245]  ? show_trace_log_lvl+0x1d6/0x2ea\n[  109.086780]  ? irq_work_tick+0x3b/0x50\n[  109.093364]  ? show_regs.part.0+0x23/0x29\n[  109.100505]  ? __die_body.cold+0x8/0xd\n[  109.107651]  ? __die+0x2b/0x37\n[  109.113945]  ? die+0x30/0x60\n[  109.120867]  ? do_trap+0xbe/0x100\n[  109.126703]  ? do_error_trap+0x6f/0xb0\n[  109.132568]  ? irq_work_run_list+0x40/0x50\n[  109.138372]  ? exc_invalid_op+0x53/0x70\n[  109.143914]  ? irq_work_run_list+0x40/0x50\n[  109.149459]  ? asm_exc_invalid_op+0x1b/0x20\n[  109.155045]  ? irq_work_run_list+0x40/0x50\n[  109.161640]  irq_work_tick+0x3b/0x50\n[  109.167935]  update_process_times+0xba/0xd0\n[  109.173702]  tick_sched_handle+0x29/0x70\n[  109.179210]  tick_sched_timer+0x6f/0x90\n[  109.186037]  ? tick_sched_do_timer+0xa0/0xa0\n[  109.191759]  __hrtimer_run_queues+0x107/0x230\n[  109.197209]  ? hrtimer_update_next_event+0x91/0xb0\n[  109.203091]  hrtimer_interrupt+0x109/0x220\n[  109.208475]  ? tick_do_update_jiffies64+0x9b/0xf0\n[  109.216130]  __sysvec_apic_timer_interrupt+0x61/0xe0\n[  109.222037]  sysvec_apic_timer_interrupt+0x7b/0x90\n[  109.227458]  \u003c/IRQ\u003e\n[  109.232566]  \u003cTASK\u003e\n[  109.237783]  asm_sysvec_apic_timer_interrupt+0x1b/0x20\n[  109.243139] RIP: 0010:native_safe_halt+0xb/0x10\n[  109.248492] Code: 2a ff 5b 41 5c 41 5d 5d c3 cc cc cc cc 4c 89 ee 48 c7 c7 40 4a 65 a4 e8 33 50 8c ff eb ca cc eb 07 0f 00 2d 99 29 43 00 fb f4 \u003cc3\u003e cc cc cc cc eb 07 0f 00 2d 89 29 43 00 f4 c3 cc cc cc cc cc 0f\n[  109.261586] RSP: 0018:ffffffffa4603e08 EFLAGS: 00000202\n[  109.267532] RAX: ffffffffa35d9880 RBX: ffffffffa461b440 RCX: 0000000000000000\n[  109.273656] RDX: 0000000000001775 RSI: 0000000000000083 RDI: 0000000000001776\n[  109.279718] RBP: ffffffffa4603e10 R08: ffff958c4b022d20 R09: 0000000000000000\n[  109.285916] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000\n[  109.292326] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000\n[  109.298865]  ? __sched_text_end+0xa/0xa\n[  109.305087]  ? default_idle+0xe/0x20\n[  109.311681]  arch_cpu_idle+0x15/0x20\n[  109.317295]  default_idle_call+0x3e/0xd0\n[  109.322498]  cpuidle_idle_call+0x179/0x1e0\n[  109.327432]  do_idle+0x83/0xf0\n[  109.332288]  cpu_startup_entry+0x20/0x30\n[  109.337278]  rest_init+0xd3/0x100\n[  109.341940]  ? acpi_enable_subsystem+0x21d/0x229\n[  109.346696]  arch_call_rest_init+0xe/0x23\n[  109.351503]  start_kernel+0x4a9/0x4ca\n[  109.356247]  x86_64_start_reservations+0x24/0x2a\n[  109.361020]  x86_64_start_kernel+0xfb/0x106\n[  109.365719]  secondary_startup_64_no_verify+0xc2/0xcb\n[  109.371207]  \u003c/TASK\u003e\n[  109.376354] Modules linked in: ahci libahci sctp libcrc32c ip6_udp_tunnel udp_tunnel ip_tables x_tables 8021q garp stp llc mrp nls_utf8 nls_iso8859_1 nls_ascii isofs hid_generic usbhid hid virtiofs virtio_rng virtio_gpu virtio_dma_buf drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec rc_core drm virtio_scsi virtio_net net_failover failover virtio_input virtio_blk qemu_fw_cfg 9pnet_virtio 9pnet pcnet32 8139cp mii ne2k_pci 8390 igbvf igb i2c_algo_bit dca e1000e e1000\n[  109.405154] ---[ end trace 6c4aa22efe9864f9 ]---\n[  109.412032] RIP: 0010:irq_work_run_list+0x40/0x50\n[  109.419047] Code: 48 8b 07 48 85 c0 74 18 48 87 1f 48 85 db 74 10 48 89 df 48 8b 1b e8 7f ff ff ff 48 85 db 75 f0 48 8b 5d f8 c9 c3 cc cc cc cc \u003c0f\u003e 0b 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 55 48 c7 c7 48 cb\n[  109.431638] RSP: 0018:ffffb447c0003e88 EFLAGS: 00000206\n[  109.437871] RAX: 0000000000000200 RBX: ffff958c4b0207c0 RCX: 0000000000000001\n[  109.447973] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff958c4b02cb40\n[  109.456294] RBP: ffffb447c0003e90 R08: ffff958c4b022d20 R09: ffff958c4b032200\n[  109.463805] R10: 0000000000000000 R11: 0000000088980e80 R12: 0000000000000000\n[  109.471395] R13: 00000019610a48d0 R14: ffffffffa4603d58 R15: ffff958c4b022d00\n[  109.480562] FS:  0000000000000000(0000) GS:ffff958c4b000000(0000) knlGS:0000000000000000\n[  109.488193] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033\n[  109.495679] CR2: 00007f2dccab6de0 CR3: 0000000001068000 CR4: 00000000000006f0\n[  109.502745] Kernel panic - not syncing: Fatal exception in interrupt\n[  109.511768] Kernel Offset: 0x21800000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)\n[  109.519982] ---[ end Kernel panic - not syncing: Fatal exception in interrupt ]---\n}}}\n\nTraceback (most recent call last):\n  File \"/opt/stack/tempest/tempest/common/waiters.py\", line 464, in wait_for_volume_attachment_remove_from_server\n    raise lib_exc.TimeoutException(message)\ntempest.lib.exceptions.TimeoutException: Request timed out\nDetails: Volume 2e3d9045-99a6-4540-9236-a3220292c947 failed to detach from server 21cd7b90-e282-4b71-9e70-ca58220d0cc3 within the required time (196 s) from the compute API perspective\n```\nIt is indeed a guest kernel hang. Case closed :D","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"0c2fa4a44068cefbd5081ab9527c947281d9e1f8","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":23,"id":"1fb29a27_6c8287b4","in_reply_to":"e308a47f_a821ee1b","updated":"2026-01-19 13:38:15.000000000","message":"https://opensearch.logs.openstack.org/_dashboards/app/data-explorer/discover?security_tenant\u003dglobal#?_a\u003d(discover:(columns:!(build_ref),isDirty:!f,sort:!()),metadata:(indexPattern:\u002794869730-aea8-11ec-9e6a-83741af3fdcd\u0027,view:discover))\u0026_q\u003d(filters:!(),query:(language:kuery,query:\u0027%22Run%20out%20of%20retry%20while%20detaching%20device%22\u0027))\u0026_g\u003d(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-4w,to:now))\n\nWe have a single hit so far. I remember in the past we had this issue more frequently","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a86ffaa207aa360f61ef823805d79f1c3f27fe8a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":24,"id":"d92930c1_a6b7ae1d","updated":"2026-01-19 13:28:38.000000000","message":"resolved mergeconflict","commit_id":"5473d2bf5ab2e08003413ff738ae781c31970593"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"929628f4d92fcb1ee38795bc4c783ff0cb0320e1","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":25,"id":"9e0d459b_f87925b7","updated":"2026-01-20 09:08:08.000000000","message":"```\nKilled\n{2} nova.tests.unit.test_utils.SpawnAfterTestCase.test_spawn_after_submits_work_after_delay [] ... inprogress\n\nThe following tests exited without returning a status\nand likely segfaulted or crashed Python:\n\n\t* nova.tests.unit.test_utils.SpawnAfterTestCase.test_spawn_after_submits_work_after_delay\n\t```\nA new complication. I will check","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1f5c2ae934079e5c7658855ffa9972f6011ba495","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"46413699_ba02cb41","in_reply_to":"04b54da3_d1e61788","updated":"2026-01-21 13:06:11.000000000","message":"hm this is the fake message bus driver so that should be OK in unit test. Then the next possible thing to avoid mocking time.sleep but the it we need to work a bit to make the spawn_after logic testable","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9185e05b4fcb3c57402d4f6a12c0827339b2b84b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":25,"id":"61b37dd2_b244f514","in_reply_to":"112386f1_80c18fe4","updated":"2026-01-22 09:47:06.000000000","message":"Solved by https://review.opendev.org/c/openstack/nova/+/974299","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"0e5ea72a2c7add51431553d42e29c39dcda48ef5","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"41676a28_b13bc9e2","in_reply_to":"2e0dc27f_9dd96770","updated":"2026-01-20 17:46:31.000000000","message":"This is the minimal reproducer of the test case hang:\n```\n$ cat test.list\nnova.tests.unit.test_service.ServiceTestCase.test_start_updates_version\nnova.tests.unit.test_service_auth.ServiceAuthTestCase.test_get_auth_plugin_wraps\nnova.tests.unit.test_test.ContainKeyValueTestCase.test_contain_key_value_exception\nnova.tests.unit.test_test.ContainKeyValueTestCase.test_contain_key_value_normal\nnova.tests.unit.test_test.JsonTestCase.test_compare_dict_string\nnova.tests.unit.test_test.JsonTestCase.test_fail_on_list_value\nnova.tests.unit.test_test.PatchExistsTestCase.test_with_patch_exists_false\nnova.tests.unit.test_utils.SpawnAfterTestCase.test_spawn_after_submits_work_after_delay\n$ tox -e py313 -- --serial --load-list test.list\n```\nIf you remove any of the tests from that list then the test set does not hang. This is a new for me. I will continue here tomorrow.","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"260e0853b3f984d5b8b37527e93962e6aa8fcd54","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"04b54da3_d1e61788","in_reply_to":"41676a28_b13bc9e2","updated":"2026-01-21 12:55:10.000000000","message":"It seems that some of the earlier tests started the rabbimq driver and leaked it. When the last test mocks sleep it removes the eventlet yield from the rabbitmq driver codepath making other eventlet hangs.\n\nBasically this was hit during the running of the last test case, which is wrong. We should never start the rabbit driver in our unit tests.\n\n```\nafter future.result\nafter test case\n  File \"/usr/lib/python3.12/threading.py\", line 1032, in _bootstrap\n    self._bootstrap_inner()\n  File \"/usr/lib/python3.12/threading.py\", line 1075, in _bootstrap_inner\n    self.run()\n  File \"/usr/lib/python3.12/threading.py\", line 1012, in run\n    self._target(*self._args, **self._kwargs)\n  File \"/home/gibi/upstream/git/openstack/nova/.tox/py312-threading/lib/python3.12/site-packages/oslo_utils/excutils.py\", line 289, in wrapper\n    return infunc(*args, **kwargs)\n  File \"/home/gibi/upstream/git/openstack/nova/.tox/py312-threading/lib/python3.12/site-packages/oslo_messaging/_drivers/base.py\", line 294, in _runner\n    incoming \u003d self._poll_style_listener.poll(\n  File \"/home/gibi/upstream/git/openstack/nova/.tox/py312-threading/lib/python3.12/site-packages/oslo_messaging/_drivers/base.py\", line 42, in wrapper\n    message \u003d func(in_self, timeout\u003dwatch.leftover(True))\n  File \"/home/gibi/upstream/git/openstack/nova/.tox/py312-threading/lib/python3.12/site-packages/oslo_messaging/_drivers/impl_fake.py\", line 83, in poll\n    time.sleep(pause)\n  File \"/usr/lib/python3.12/unittest/mock.py\", line 1139, in __call__\n    return self._mock_call(*args, **kwargs)\n  File \"/usr/lib/python3.12/unittest/mock.py\", line 1143, in _mock_call\n    return self._execute_mock_call(*args, **kwargs)\n  File \"/usr/lib/python3.12/unittest/mock.py\", line 1204, in _execute_mock_call\n    result \u003d effect(*args, **kwargs)\n  File \"/home/gibi/upstream/git/openstack/nova/nova/tests/unit/test_utils.py\", line 1569, in sleep\n    s \u003d traceback.format_stack()\n```","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"070bf8e41f8d6dbe0f1368f917ddaaafff682ec1","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"f64caeb0_527027ed","in_reply_to":"46413699_ba02cb41","updated":"2026-01-21 13:06:58.000000000","message":"Still the fake driver should not leak between test cases, we should clean that poll up at the end of the testcase that started it","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1da2f019ba74d9a876ac344703cc09677472be5e","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"d6a3f8bf_61782f34","in_reply_to":"8ef2c020_feb280f2","updated":"2026-01-20 14:32:35.000000000","message":"OK it is always the test case\n\nnova.tests.unit.test_utils.SpawnAfterTestCase.test_spawn_after_submits_work_after_delay\n\nthat crashes","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"f4db2711f8fae82b5bca2637225255fde0bc9690","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"8ef2c020_feb280f2","in_reply_to":"9e0d459b_f87925b7","updated":"2026-01-20 14:29:02.000000000","message":"Nice other than what copied above there is no other data available about such a crash.\n\nWe have multiple hits of crash in this series unfortunately: https://opensearch.logs.openstack.org/_dashboards/app/data-explorer/discover?security_tenant\u003dglobal#?_a\u003d(discover:(columns:!(build_ref,build_name,project,build_uuid,change_url),isDirty:!f,sort:!()),metadata:(indexPattern:\u002794869730-aea8-11ec-9e6a-83741af3fdcd\u0027,view:discover))\u0026_q\u003d(filters:!(),query:(language:kuery,query:\u0027%22likely%20segfaulted%20or%20crashed%20Python%22%20AND%20project:nova%20AND%20NOT%20build_name:%20arm64\u0027))\u0026_g\u003d(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-4w,to:now))\n\nI will try to see if I it always hits the same test case and if I can reproduce it locally...","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"efb1234fe41621491d539dcad85f61de059f06f5","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"2e0dc27f_9dd96770","in_reply_to":"d6a3f8bf_61782f34","updated":"2026-01-20 16:09:50.000000000","message":"I can reproduce it locally. I shrunk the necessary test cases down to 35 that creates a hang in the last test. (The timeout kills the interpreter there is no crash)\n\nnova.tests.unit.test_quota.QuotaEngineTestCase.test_limit_check_project_and_user\nnova.tests.unit.test_quota.QuotaEngineTestCase.test_resources\nnova.tests.unit.test_quota.QuotaIntegrationTestCase.test_many_cores_with_unlimited_quota\nnova.tests.unit.test_quota.QuotaIntegrationTestCase.test_too_many_injected_file_path_bytes\nnova.tests.unit.test_quota.QuotaIntegrationTestCase.test_too_many_injected_files\nnova.tests.unit.test_quota.UnifiedLimitsDriverTestCase.test_get_project_quotas\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_max_injected_file_path_bytes\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_too_many_cores\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_too_many_injected_file_content_bytes\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_too_many_injected_file_path_bytes\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_too_many_injected_files\nnova.tests.unit.test_quota.UnifiedLimitsIntegrationTestCase.test_too_many_instances\nnova.tests.unit.test_rpc.TestProfilerRequestContextSerializer.test_serialize_context\nnova.tests.unit.test_rpc.TestRPC.test_add_extra_exmods\nnova.tests.unit.test_rpc.TestRPC.test_cleanup\nnova.tests.unit.test_rpc.TestRPC.test_cleanup_legacy_notifier_null\nnova.tests.unit.test_rpc.TestRPC.test_get_client\nnova.tests.unit.test_rpc.TestRPC.test_get_server_profiler_enabled\nnova.tests.unit.test_rpc.TestRPC.test_get_transport_url\nnova.tests.unit.test_rpc.TestRPC.test_init_unversioned\nnova.tests.unit.test_rpc.TestRequestContextSerializer.test_deserialize_context\nnova.tests.unit.test_rpc.TestRequestContextSerializer.test_serialize_entity\nnova.tests.unit.test_rpc.TestRequestContextSerializer.test_serialize_entity_null_base\nnova.tests.unit.test_safeutils.WrappedCodeTestCase.test_double_wrapped\nnova.tests.unit.test_service.ServiceTestCase.test_parent_graceful_shutdown_with_cleanup_host\nnova.tests.unit.test_service.ServiceTestCase.test_service_check_create_race_binary_exists\nnova.tests.unit.test_service.ServiceTestCase.test_start_updates_version\nnova.tests.unit.test_service.TestLauncher.test_launch_app\nnova.tests.unit.test_service_auth.ServiceAuthTestCase.test_get_auth_plugin_wraps\nnova.tests.unit.test_test.ContainKeyValueTestCase.test_contain_key_value_exception\nnova.tests.unit.test_test.ContainKeyValueTestCase.test_contain_key_value_normal\nnova.tests.unit.test_test.JsonTestCase.test_compare_dict_string\nnova.tests.unit.test_test.JsonTestCase.test_fail_on_list_value\nnova.tests.unit.test_test.PatchExistsTestCase.test_with_patch_exists_false\n nova.tests.unit.test_utils.SpawnAfterTestCase.test_spawn_after_submits_work_after_delay","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1dbf5fc6bdba9b32d106f30c7be94f3948310c77","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":25,"id":"112386f1_80c18fe4","in_reply_to":"f64caeb0_527027ed","updated":"2026-01-21 18:51:23.000000000","message":"Some tests use nova.service.Service().start() but never calls .stop() and indirectly that leaks a running RPC poller thread from the fake RPC implementation.","commit_id":"48d1233fcc10d0f506d46333310903d724e49601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c124c8d1e5d18770491061ae5d9a0878173fd4d0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"aeeae4a8_fd38b172","updated":"2026-01-23 10:36:23.000000000","message":"I need to hold this. The fact that we cannot cancel the task in threading mode during the 15 second delay creates a regression.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"784237b5c9d4024c1a7f28d54efe59bbc5d98ab8","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"dc06f794_a7ac6139","updated":"2026-01-22 16:23:39.000000000","message":"Thanks for all the good observations. There are two valid concerns that I would like to investigate further before we decide about what direction to go. Lets hold this in a bit while I investigate.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"07f06db381a586e6805cd7980f604669ebdf63ba","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":26,"id":"941450b3_42f0e664","updated":"2026-01-22 13:10:39.000000000","message":"i have some concerns with the thread usage of spawn_after so -1 just to highlight that so we can make sure we are ok with this in the short term\n\nim debatign if the libvirt event usage of this shoudl use its own execuror pool to not consudme the defautl one. over all i like the direction","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"78235deb5a334af17c02c26c5f7f26633f438e08","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"39b29706_efd35a2d","updated":"2026-01-22 20:21:56.000000000","message":"the  nova-next failure is a guest kernel panic, this time not during volume detach\n```\n[   14.541631] Kernel panic - not syncing: Attempted to kill init! exitcode\u003d0x00001000\n[   14.543026] CPU: 0 PID: 1 Comm: init Not tainted 5.15.0-117-generic #127-Ubuntu\n[   14.543874] Hardware name: OpenStack Foundation OpenStack Nova, BIOS 1.16.3-debian-1.16.3-2 04/01/2014\n[   14.545101] Call Trace:\n[   14.546348]  \u003cTASK\u003e\n[   14.546967]  show_stack+0x52/0x5c\n[   14.548113]  dump_stack_lvl+0x4a/0x63\n[   14.548646]  dump_stack+0x10/0x16\n[   14.549111]  panic+0x15c/0x33b\n[   14.549592]  ? __vm_munmap+0x96/0x130\n[   14.550093]  do_exit.cold+0x15/0xa0\n[   14.550599]  __x64_sys_exit+0x1b/0x20\n[   14.551102]  x64_sys_call+0x1f30/0x1fa0\n[   14.551758]  do_syscall_64+0x56/0xb0\n[   14.552242]  ? vfs_write+0x1d5/0x270\n[   14.552760]  ? ksys_write+0x67/0xf0\n[   14.553246]  ? exit_to_user_mode_prepare+0x37/0xb0\n[   14.553840]  ? syscall_exit_to_user_mode+0x2c/0x50\n[   14.554400]  ? x64_sys_call+0x47c/0x1fa0\n[   14.554925]  ? do_syscall_64+0x63/0xb0\n[   14.555416]  ? update_load_avg+0x82/0x660\n[   14.555973]  ? set_next_entity+0xe9/0x230\n[   14.566105]  ? pick_next_task_fair+0x242/0x510\n[   14.570626]  ? enqueue_task_fair+0x17e/0x7f0\n[   14.574927]  ? sched_clock+0x9/0x10\n[   14.579185]  ? sched_clock_cpu+0x12/0xf0\n[   14.583412]  ? psi_task_switch+0xc6/0x220\n[   14.587656]  ? finish_task_switch.isra.0+0x7e/0x280\n[   14.591975]  ? __schedule+0x256/0x590\n[   14.596351]  ? rcu_core+0x122/0x2a0\n[   14.601244]  ? schedule+0x69/0x110\n[   14.607048]  ? exit_to_user_mode_loop+0x7e/0x160\n[   14.612342]  ? exit_to_user_mode_prepare+0x99/0xb0\n[   14.616798]  ? irqentry_exit_to_user_mode+0xe/0x20\n[   14.621173]  ? irqentry_exit+0x1d/0x30\n[   14.625225]  ? sysvec_apic_timer_interrupt+0x4e/0x90\n[   14.629604]  entry_SYSCALL_64_after_hwframe+0x6c/0xd6\n[   14.633960] RIP: 0033:0x7f5bf013155e\n[   14.638855] Code: 05 d7 2a 00 00 4c 89 f9 bf 02 00 00 00 48 8d 35 fb 0d 00 00 48 8b 10 31 c0 e8 50 d2 ff ff bf 10 00 00 00 b8 3c 00 00 00 0f 05 \u003c48\u003e 8d 15 f3 2a 00 00 f7 d8 89 02 48 83 ec 20 49 8b 8c 24 b8 00 00\n[   14.648250] RSP: 002b:00007fff29eb83c0 EFLAGS: 00000207 ORIG_RAX: 000000000000003c\n[   14.653022] RAX: ffffffffffffffda RBX: 00007fff29eb9670 RCX: 00007f5bf013155e\n[   14.659047] RDX: 0000000000000002 RSI: 0000000000001000 RDI: 0000000000000010\n[   14.663932] RBP: 00007fff29eb9650 R08: 00007f5bf012a000 R09: 00007f5bf012a01a\n[   14.668695] R10: 0000000000000001 R11: 0000000000000207 R12: 00007f5bf012b040\n[   14.673422] R13: 00000000004bae50 R14: 0000000000000000 R15: 0000000000403d66\n[   14.678571]  \u003c/TASK\u003e\n[   14.684416] Kernel Offset: 0x31400000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)\n[   14.690132] ---[ end Kernel panic - not syncing: Attempted to kill init! exitcode\u003d0x00001000 ]---\n```","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"fb315aa8c7ab15ac015a96aaa23d18306c1db418","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"3618200b_24b5ee72","in_reply_to":"941450b3_42f0e664","updated":"2026-01-29 10:39:42.000000000","message":"Done","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"07a39db9b22b12977907909b2e5b8429a97b45b7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":29,"id":"e6fbcbe5_04fb0471","updated":"2026-02-05 01:13:43.000000000","message":"+W as nova-next job fix is merged now","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"745dfcc47d65819e4cd498a9c9ede9b0c821475c","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":29,"id":"51d6346c_996e32de","updated":"2026-02-04 16:51:30.000000000","message":"holding +w until we merge https://review.opendev.org/c/openstack/nova/+/975542\n\nbut im ok with deferring the scaling issue to the later patch","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"e11f10a6cc9f13dd01ee1009d43e627b06b0ac24","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":29,"id":"a19b5a5c_554ffd7a","updated":"2026-02-03 14:01:41.000000000","message":"reapplying my +2 now that there a solution as gmaan said.\nHolding my +W for the moment.","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"7b444531ab0d138175c10d52f417cd01d6b224d4","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":29,"id":"020a5346_2600d0b3","updated":"2026-02-02 01:18:21.000000000","message":"this lgtm and the raise concern is solved by the https://review.opendev.org/c/openstack/nova/+/974445","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"745dfcc47d65819e4cd498a9c9ede9b0c821475c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":29,"id":"7d095080_d7e6b0da","in_reply_to":"020a5346_2600d0b3","updated":"2026-02-04 16:51:30.000000000","message":"ack","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"}],"nova/tests/unit/test_utils.py":[{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"b22c11a8e83c21873f6e6a8287e31c006ad5d059","unresolved":false,"context_lines":[{"line_number":1568,"context_line":"        future.result()"},{"line_number":1569,"context_line":""},{"line_number":1570,"context_line":"        task.assert_called_once_with(13, foo\u003d\u0027bar\u0027)"},{"line_number":1571,"context_line":"        mock_sleep.assert_called_once_with(0.1)"},{"line_number":1572,"context_line":""},{"line_number":1573,"context_line":""},{"line_number":1574,"context_line":"class ExecutorStatsTestCase(test.NoDBTestCase):"}],"source_content_type":"text/x-python","patch_set":23,"id":"da456560_99a848d0","line":1571,"updated":"2026-01-19 10:01:21.000000000","message":"thanks for that new test that covers the issue","commit_id":"4bcfcda94189e97e95c5dbd6b1b94c10b2a4dd28"}],"nova/utils.py":[{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"89ffc9e7ad64129738a18166bdde9ca19cd341b2","unresolved":true,"context_lines":[{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, args, kwargs)"},{"line_number":588,"context_line":""},{"line_number":589,"context_line":""},{"line_number":590,"context_line":"def _executor_is_full(executor):"}],"source_content_type":"text/x-python","patch_set":20,"id":"e5fbe1f7_83ae4a59","line":587,"updated":"2026-01-15 15:56:00.000000000","message":"shouldn\u0027t be ``spawn(delayed, *args, **kwargs)`` ?","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"839272ce8e298ee5a1c86f90d3a3281244d25aba","unresolved":false,"context_lines":[{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, args, kwargs)"},{"line_number":588,"context_line":""},{"line_number":589,"context_line":""},{"line_number":590,"context_line":"def _executor_is_full(executor):"}],"source_content_type":"text/x-python","patch_set":20,"id":"7901ef3e_fedc26af","line":587,"in_reply_to":"e5fbe1f7_83ae4a59","updated":"2026-01-16 11:38:54.000000000","message":"you are right, thanks for catching it. Fixed now","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"784237b5c9d4024c1a7f28d54efe59bbc5d98ab8","unresolved":true,"context_lines":[{"line_number":581,"context_line":"    \"\"\"Executing the function asynchronously after the given time.\"\"\""},{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":26,"id":"86be2f24_bfd1232e","line":584,"updated":"2026-01-22 16:23:39.000000000","message":"We discussed with Sylvain over video that the caller in the libvirt driver use future.cancel() sometimes on these tasks. An already running task in threading mode is not cancellable without cooperation from the task\u0027s own logic. \n\nHowever here we have a generic logic that allows generic cooperation with cancel. We can check for cancellation after time.sleep and before calling the real function. \n\nI will investigate it further.\n\n--\nhttps://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.cancel\n\n\u003e Attempt to cancel the call. If the call is currently being executed or finished running and cannot be cancelled then the method will return False, otherwise the call will be cancelled and the method will return True.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"5144e9523ded73f2949c71831821d1b0d72ed14f","unresolved":false,"context_lines":[{"line_number":581,"context_line":"    \"\"\"Executing the function asynchronously after the given time.\"\"\""},{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":26,"id":"1769522b_f87c3eba","line":584,"in_reply_to":"2e1be617_b87950d1","updated":"2026-01-29 10:39:11.000000000","message":"solution is ready for review","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c124c8d1e5d18770491061ae5d9a0878173fd4d0","unresolved":true,"context_lines":[{"line_number":581,"context_line":"    \"\"\"Executing the function asynchronously after the given time.\"\"\""},{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":26,"id":"f7e4af61_47893fe5","line":584,"in_reply_to":"86be2f24_bfd1232e","updated":"2026-01-23 10:36:23.000000000","message":"Moreover the ability to cancel the task during the delay is a crucial part of the virt driver logic so this is actually a bug right now that regresses the openstack server reboot --hard logic as nova compute will force the VM to shutoff due to libvirt STOPPED signal during the hard reboot and that might race with the start of the VM during the hard reboot logic in the driver.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"43a292d9e7d54bcf9fc4dc9dbc076f0ce2a55358","unresolved":true,"context_lines":[{"line_number":581,"context_line":"    \"\"\"Executing the function asynchronously after the given time.\"\"\""},{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":26,"id":"2e1be617_b87950d1","line":584,"in_reply_to":"b362ddeb_0e897b39","updated":"2026-01-27 08:30:27.000000000","message":"Potential solution is forming here https://review.opendev.org/c/openstack/nova/+/974445/4?usp\u003drelated-change","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"c7491a6124acafb52e2c1cd33d503c65f3988570","unresolved":true,"context_lines":[{"line_number":581,"context_line":"    \"\"\"Executing the function asynchronously after the given time.\"\"\""},{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":26,"id":"b362ddeb_0e897b39","line":584,"in_reply_to":"f7e4af61_47893fe5","updated":"2026-01-26 15:43:28.000000000","message":"Thanks gibi for the hard investigation, OK so take time for finding a solution but we can discuss it on the next eventlet meeting","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"07f06db381a586e6805cd7980f604669ebdf63ba","unresolved":true,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"848df587_69fec90d","line":585,"updated":"2026-01-22 13:10:39.000000000","message":"hum this is going to consume a executor worker for the delay time\nim not sure that is a good idea.\n\nmy inclidation is ot say we shoudl have a single thread that manages submitting deferted work.\n\nbasically what i think we shoudl do is compute the execution time as a unix time stamp (an int) and use a min heap/priority queue to keep track of a tuple of the function,args, time  and executor.\n\nthat backgorund thread would basiclly do monitor a queue defered work adn sleep for the smaller of the time remaining till the next task to submit to the actual executor or a short pooling interval i.e. 0.1 seconds\n\nim not suggesting we implement this in this change. but if are going to use spawn after in code often i dont hink we can tie up an executor thread time just sleeping when we shoudl eb doing useful work\n\n\nif i was to do this properly i woudl aslo suggest we make this change in futureist\ni.e. add teh abllity for it to nativly supprot spawn_after to the executor interface and implemnt it for at least the threaded executor although it woudl be pretty simpler for the other as well.\n\n\nto summerise i think this is ok for eventlet or as as a very infrequetly used implemention in the threaded backend. i think if we want to use this more often we should invest some time in a seperate patch to build out that more effience way of doing defered executions.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c58c1ee783d7e7d1c456dabcc5de521bbb70faa3","unresolved":true,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"ca692cad_2f2f3b40","line":585,"in_reply_to":"45470396_8733bdf2","updated":"2026-01-22 17:27:20.000000000","message":"i looked breifly at that when i was wriging that comment nad no its jsut usign sleep\n\ni was goign to spit ball a few optiosn with ai but the direction iwas hteadign was using the timeout on https://docs.python.org/3/library/queue.html#queue.Queue.get\nin blocking mode to have the backround thread wait tille eitehr there is a new task in its submition queu or the nearest derfer tasks tiemout is passed\n\nso what im thinking is\n\nwe add a submit_after and submit_at functions to the executor in futureist\nwe have both fo those add the task to a defered_task queue\n via a function that will create a future and return it once the task and future are added to the defered queue\n \n i knwo we cant use the concurent futrues set_resutl  https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future.set_result as that can only be set by the executor but we have ways to have the same effect\n \n we did it for nova withthe event that returns a result for exampel so we woudl do someitng like that in the futureist futrue or simialr.\n \n the backgorund thread would consume for the defered_queue in a busy loop\n \n it woudl sue the time out on queue.get in blockign mode to sleep for a task.\n\nwhen get returns it has to do 1 of 2 things. if get retun a non None value we have recived a new defered task.so it will will calulate the time till it shodl be run.\nif it has past it will call submit on self(the executor) to run the task now.\nif not it will add it to a second priroty queue (ordered by the run timestamp)\nand then call get on teh queu again.\n\nif get returned none it means we work up because the timeout expireed meaning the first task on the priority queue is not due to eb executed.\nso if get returnd none we just pop the pirority que adn submit it to the executor.\n\nin both casees when we submit it to the executor we need to use funtools.partal to wrap it in a tiny funciton that just does this\n\ndef wrapper (future, task):\n  try:\n     future.set_result\u003dtask()\n  except Exeception as e:\n     future.set_exception(e)\n  \n\ni.e. propagate the result/error to the futre like object we retruend wehn it was encued.\n\nsubmit_after and submit_at can be implemetne in terms fo each other you are jsut convertign the time specification.\n\nwe can do this entirly in nova initally too if we ant \n\nyour current spwan after is fine for eventlet\nwe only need the more complex approch for threaded\n\nso we can do a minimal implemation for threaed now and upstream it after or do it in futureist form the start.\n\n\nin terms fo looping call it is using sleep internally and i actully think i we add this to futureist we shoudl reimplemnt looping call in terms of submit_after or submit_at\n\nnot im using submit_* above just to allign to futureist fucntion names on the executor interface but in nvoa we can keep the wrapper called spawn_after to keep it consitent with the other spwan_ functions.\n\ni know thats a lot but that a brain dump of what i have been thinking about sinc ei reviewd this earlier.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"0e07c59ad8400e911a1b0113c54c9b0f110abd62","unresolved":true,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"76b70d29_7645bbc1","line":585,"in_reply_to":"643b9185_bb5db035","updated":"2026-01-23 15:56:35.000000000","message":"This is the WIP patch but it is not fully working https://review.opendev.org/c/openstack/nova/+/974445/1","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"fb315aa8c7ab15ac015a96aaa23d18306c1db418","unresolved":false,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"8494afb5_2d4a7e9d","line":585,"in_reply_to":"76b70d29_7645bbc1","updated":"2026-01-29 10:39:42.000000000","message":"solution is reviewable now","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"784237b5c9d4024c1a7f28d54efe59bbc5d98ab8","unresolved":true,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"45470396_8733bdf2","line":585,"in_reply_to":"848df587_69fec90d","updated":"2026-01-22 16:23:39.000000000","message":"Good point. I will do some investigation to see how complex it is to use a single thread to schedule the delayed work. We might have something already existing around looping calls to reuse...","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"f2904476646fe4e2d345b18023fb701df79fd445","unresolved":true,"context_lines":[{"line_number":582,"context_line":""},{"line_number":583,"context_line":"    def delayed(*args, **kwargs):"},{"line_number":584,"context_line":"        time.sleep(seconds)"},{"line_number":585,"context_line":"        return func(*args, **kwargs)"},{"line_number":586,"context_line":""},{"line_number":587,"context_line":"    return spawn(delayed, *args, **kwargs)"},{"line_number":588,"context_line":""}],"source_content_type":"text/x-python","patch_set":26,"id":"643b9185_bb5db035","line":585,"in_reply_to":"ca692cad_2f2f3b40","updated":"2026-01-22 20:18:07.000000000","message":"Thanks. I think I got your suggestion. I\u0027m a bit hesitant to bringing in that level of complexity to the codebase. I dig a bit around why we need spawn_after and how we are using it.\n\nThe delay is needed to avoid a power_sync of a VM in the DB during an openstack server reboot execution. During that operation libvirt will emit 3 lifecycle events for the domain in sequence\n* stopped\n* resumed\n* started\n\nThe power sync is triggering on the stopped event which is correct if the domain is really powered off or crashed. But causes a race condition and an unnecessary server stop api call if it is triggered while a started event can appear in a short succession. So the current code delays the processing of the stopped event by 15 seconds and if a started event is received in the meantime then cancels the processing of the stopped event.\n\nThis sequence of events are not triggered if the VM is rebooted from within the guest or if it is rebooted with virsh via acpi or the guest agent method. (No other methods seems to be implemented for kvm quest based on my trials) This methods create no event in libvirt at all.\n\nSo this is really a one-off usage where I would not chase a generic solution (yet). If we don\u0027t try to implement a generic spawn_after, just cater for the current narrow use case then I see one significant simplification opportunity due to the fact that each stop event is delayed by the same constant 15 seconds. Therefore event timers are expiring in sequence and no newer event needs to cause a re-schedule of the current wake time as that is determined by the oldest event in the queue. So a single thread, a single list of events and a single next-wakeup-time calculation is enough to handle this narrow use case.\n\nI will draw up some code for this simplified case tomorrow.\n\nBtw, this also capable of solving the cancel problem raised by Sylvain above. We can simply need to mark events as cancelled and not process cancelled events when the thread wakes up.","commit_id":"fd9a24d850631fca2155bcc157d81ced2a6e11b6"}],"nova/virt/libvirt/host.py":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":113,"context_line":"    return _loaders"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":""},{"line_number":116,"context_line":"class LibvirtEventHandler:"},{"line_number":117,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":118,"context_line":"        self._lifecycle_event_handler \u003d lifecycle_event_handler"},{"line_number":119,"context_line":"        self._conn_event_handler \u003d conn_event_handler"}],"source_content_type":"text/x-python","patch_set":15,"id":"47b92b1d_21adef92","line":116,"updated":"2025-12-11 11:41:52.000000000","message":"This is a big file already. Could we put this in an e.g. `event_handler` module, and move the tests? Doing it as a follow-up is fine.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"8191602c40bd337b73125de782a0f0820865f3ee","unresolved":false,"context_lines":[{"line_number":113,"context_line":"    return _loaders"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":""},{"line_number":116,"context_line":"class LibvirtEventHandler:"},{"line_number":117,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":118,"context_line":"        self._lifecycle_event_handler \u003d lifecycle_event_handler"},{"line_number":119,"context_line":"        self._conn_event_handler \u003d conn_event_handler"}],"source_content_type":"text/x-python","patch_set":15,"id":"eba49c5c_6c213ce6","line":116,"in_reply_to":"42c7eec2_2a7df99f","updated":"2026-01-19 08:24:19.000000000","message":"Acknowledged","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":true,"context_lines":[{"line_number":113,"context_line":"    return _loaders"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":""},{"line_number":116,"context_line":"class LibvirtEventHandler:"},{"line_number":117,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":118,"context_line":"        self._lifecycle_event_handler \u003d lifecycle_event_handler"},{"line_number":119,"context_line":"        self._conn_event_handler \u003d conn_event_handler"}],"source_content_type":"text/x-python","patch_set":15,"id":"92a51c38_72a4edd4","line":116,"in_reply_to":"47b92b1d_21adef92","updated":"2025-12-17 15:17:32.000000000","message":"After eventlet is removed in this whole thing discussed here shrinks to ~50 lines of code as we only need the ThreadingLibvirtEventHandler implementation. Having that 50 lines out in a separate file does not make sense to me. \n\nAlso I believe further code simplification can be done when we only need to support threading impl in the caller side in the driver allowing further reduction of code complexity. So I would like to keep the code close to see spot those opportunities easier.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"89ffc9e7ad64129738a18166bdde9ca19cd341b2","unresolved":true,"context_lines":[{"line_number":113,"context_line":"    return _loaders"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":""},{"line_number":116,"context_line":"class LibvirtEventHandler:"},{"line_number":117,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":118,"context_line":"        self._lifecycle_event_handler \u003d lifecycle_event_handler"},{"line_number":119,"context_line":"        self._conn_event_handler \u003d conn_event_handler"}],"source_content_type":"text/x-python","patch_set":15,"id":"42c7eec2_2a7df99f","line":116,"in_reply_to":"92a51c38_72a4edd4","updated":"2026-01-15 15:56:00.000000000","message":"agreed with gibi here, we will have code simplication once we remove eventlet.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":121,"context_line":"    def _queue_event(self, event):"},{"line_number":122,"context_line":"        pass"},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"    def start(self):"},{"line_number":125,"context_line":"        pass"},{"line_number":126,"context_line":""},{"line_number":127,"context_line":"    @classmethod"}],"source_content_type":"text/x-python","patch_set":15,"id":"78cb734f_79dc41fe","line":124,"updated":"2025-12-11 11:41:52.000000000","message":"nit: Should we make this abstract/raise `NotImplementedError`?","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":121,"context_line":"    def _queue_event(self, event):"},{"line_number":122,"context_line":"        pass"},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"    def start(self):"},{"line_number":125,"context_line":"        pass"},{"line_number":126,"context_line":""},{"line_number":127,"context_line":"    @classmethod"}],"source_content_type":"text/x-python","patch_set":15,"id":"b1160e9c_df7821f5","line":124,"in_reply_to":"78cb734f_79dc41fe","updated":"2025-12-17 15:17:32.000000000","message":"yeah, good idea.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":134,"context_line":"                conn_event_handler, lifecycle_event_handler)"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"class EventletLibvirtEventHandler(LibvirtEventHandler):"},{"line_number":138,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":139,"context_line":"        super().__init__(conn_event_handler, lifecycle_event_handler)"},{"line_number":140,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"315e6498_46370046","line":137,"updated":"2025-12-11 11:41:52.000000000","message":"nit: Do we want these to be private? No one should use them directly, right?","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":134,"context_line":"                conn_event_handler, lifecycle_event_handler)"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"class EventletLibvirtEventHandler(LibvirtEventHandler):"},{"line_number":138,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":139,"context_line":"        super().__init__(conn_event_handler, lifecycle_event_handler)"},{"line_number":140,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"a8b6c363_54211757","line":137,"in_reply_to":"315e6498_46370046","updated":"2025-12-17 15:17:32.000000000","message":"yepp we can.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":142,"context_line":"        from eventlet import patcher"},{"line_number":143,"context_line":""},{"line_number":144,"context_line":"        self.native_threading \u003d patcher.original(\"threading\")"},{"line_number":145,"context_line":"        self.native_Queue \u003d patcher.original(\"queue\")"},{"line_number":146,"context_line":""},{"line_number":147,"context_line":"        self._event_thread \u003d None"},{"line_number":148,"context_line":"        # This is a Queue between the native libvirt event thread"}],"source_content_type":"text/x-python","patch_set":15,"id":"55ead25f_51474319","line":145,"updated":"2025-12-11 11:41:52.000000000","message":"```suggestion\n        self.native_queue \u003d patcher.original(\"queue\")\n```\n\nI\u0027m guessing `Queue` was used originally because that was the name of the module in Python 2, but we don\u0027t care about Python 2 now","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":142,"context_line":"        from eventlet import patcher"},{"line_number":143,"context_line":""},{"line_number":144,"context_line":"        self.native_threading \u003d patcher.original(\"threading\")"},{"line_number":145,"context_line":"        self.native_Queue \u003d patcher.original(\"queue\")"},{"line_number":146,"context_line":""},{"line_number":147,"context_line":"        self._event_thread \u003d None"},{"line_number":148,"context_line":"        # This is a Queue between the native libvirt event thread"}],"source_content_type":"text/x-python","patch_set":15,"id":"6d99906b_1838e585","line":145,"in_reply_to":"55ead25f_51474319","updated":"2025-12-17 15:17:32.000000000","message":"yeah it is pretty inconsistent anyhow. Fixed.","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":165,"context_line":"        libvirt event loop integration. This forwards events"},{"line_number":166,"context_line":"        to a green thread which does the actual dispatching."},{"line_number":167,"context_line":"        \"\"\""},{"line_number":168,"context_line":"        super().start()"},{"line_number":169,"context_line":""},{"line_number":170,"context_line":"        libvirt.virEventRegisterDefaultImpl()"},{"line_number":171,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"71baf3f0_2c66de24","line":168,"updated":"2025-12-11 11:41:52.000000000","message":"nit: this doesn\u0027t seem necessary as the super class implementation doesn\u0027t do anything. You also don\u0027t call super in `_queue_event` which is also defined in the super class","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":165,"context_line":"        libvirt event loop integration. This forwards events"},{"line_number":166,"context_line":"        to a green thread which does the actual dispatching."},{"line_number":167,"context_line":"        \"\"\""},{"line_number":168,"context_line":"        super().start()"},{"line_number":169,"context_line":""},{"line_number":170,"context_line":"        libvirt.virEventRegisterDefaultImpl()"},{"line_number":171,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"9b9caf55_c14aed22","line":168,"in_reply_to":"71baf3f0_2c66de24","updated":"2025-12-17 15:17:32.000000000","message":"Done","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":279,"context_line":""},{"line_number":280,"context_line":"        This requires running a native thread that pools for libvirt events."},{"line_number":281,"context_line":"        \"\"\""},{"line_number":282,"context_line":"        super().start()"},{"line_number":283,"context_line":""},{"line_number":284,"context_line":"        libvirt.virEventRegisterDefaultImpl()"},{"line_number":285,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"6dc58a8c_8d3c1161","line":282,"updated":"2025-12-11 11:41:52.000000000","message":"As above","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":279,"context_line":""},{"line_number":280,"context_line":"        This requires running a native thread that pools for libvirt events."},{"line_number":281,"context_line":"        \"\"\""},{"line_number":282,"context_line":"        super().start()"},{"line_number":283,"context_line":""},{"line_number":284,"context_line":"        libvirt.virEventRegisterDefaultImpl()"},{"line_number":285,"context_line":""}],"source_content_type":"text/x-python","patch_set":15,"id":"dce06069_b5b531e9","line":282,"in_reply_to":"6dc58a8c_8d3c1161","updated":"2025-12-17 15:17:32.000000000","message":"Done","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"986586671c9b4791305f5f07995760bd6f01fa18","unresolved":true,"context_lines":[{"line_number":297,"context_line":"        the thread that receives the event as we don\u0027t have the requirement to"},{"line_number":298,"context_line":"        move the event handler to the main thread with the eventlet hub"},{"line_number":299,"context_line":"        \"\"\""},{"line_number":300,"context_line":"        super()._queue_event(event)"},{"line_number":301,"context_line":""},{"line_number":302,"context_line":"        if not self._started:"},{"line_number":303,"context_line":"            return"}],"source_content_type":"text/x-python","patch_set":15,"id":"8b4fc8c7_26c3a1f0","line":300,"updated":"2025-12-11 11:41:52.000000000","message":"As above","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"45c6aac06021aedcbe9fc64134728346d0b753a6","unresolved":false,"context_lines":[{"line_number":297,"context_line":"        the thread that receives the event as we don\u0027t have the requirement to"},{"line_number":298,"context_line":"        move the event handler to the main thread with the eventlet hub"},{"line_number":299,"context_line":"        \"\"\""},{"line_number":300,"context_line":"        super()._queue_event(event)"},{"line_number":301,"context_line":""},{"line_number":302,"context_line":"        if not self._started:"},{"line_number":303,"context_line":"            return"}],"source_content_type":"text/x-python","patch_set":15,"id":"14215faa_e80913cd","line":300,"in_reply_to":"8b4fc8c7_26c3a1f0","updated":"2025-12-17 15:17:32.000000000","message":"Done","commit_id":"df46344b7a144e4ea25b9ca43cba61b38c88e251"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"89ffc9e7ad64129738a18166bdde9ca19cd341b2","unresolved":false,"context_lines":[{"line_number":113,"context_line":"    return _loaders"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":""},{"line_number":116,"context_line":"class LibvirtEventHandler:"},{"line_number":117,"context_line":"    def __init__(self, conn_event_handler\u003dNone, lifecycle_event_handler\u003dNone):"},{"line_number":118,"context_line":"        self._lifecycle_event_handler \u003d lifecycle_event_handler"},{"line_number":119,"context_line":"        self._conn_event_handler \u003d conn_event_handler"}],"source_content_type":"text/x-python","patch_set":20,"id":"6c3a2489_beccaafa","line":116,"updated":"2026-01-15 15:56:00.000000000","message":"nice abstraction layer.","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"89ffc9e7ad64129738a18166bdde9ca19cd341b2","unresolved":true,"context_lines":[{"line_number":562,"context_line":"            # Delay STOPPED event, as they may be followed by a STARTED"},{"line_number":563,"context_line":"            # event in case the instance is rebooting"},{"line_number":564,"context_line":"            id_ \u003d utils.spawn_after("},{"line_number":565,"context_line":"                self._lifecycle_delay, self._event_emit, event)"},{"line_number":566,"context_line":"            self._events_delayed[event.uuid] \u003d id_"},{"line_number":567,"context_line":"            # add callback to cleanup self._events_delayed dict after"},{"line_number":568,"context_line":"            # event was called"}],"source_content_type":"text/x-python","patch_set":20,"id":"73503881_8cf2bb97","line":565,"updated":"2026-01-15 15:56:00.000000000","message":"see, you are only passing one arg, so that\u0027s why we don\u0027t see the problem about missing \"*args, **kwargs\", maybe we need UTs for spawn_after closure ?","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"839272ce8e298ee5a1c86f90d3a3281244d25aba","unresolved":false,"context_lines":[{"line_number":562,"context_line":"            # Delay STOPPED event, as they may be followed by a STARTED"},{"line_number":563,"context_line":"            # event in case the instance is rebooting"},{"line_number":564,"context_line":"            id_ \u003d utils.spawn_after("},{"line_number":565,"context_line":"                self._lifecycle_delay, self._event_emit, event)"},{"line_number":566,"context_line":"            self._events_delayed[event.uuid] \u003d id_"},{"line_number":567,"context_line":"            # add callback to cleanup self._events_delayed dict after"},{"line_number":568,"context_line":"            # event was called"}],"source_content_type":"text/x-python","patch_set":20,"id":"baeb2d05_e64dbf1e","line":565,"in_reply_to":"73503881_8cf2bb97","updated":"2026-01-16 11:38:54.000000000","message":"You are correct. Reproduce the problem with a new test case and fixed it now.","commit_id":"9469ddea6e8ebd9039a8a87f33f171f78e988299"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"7b444531ab0d138175c10d52f417cd01d6b224d4","unresolved":true,"context_lines":[{"line_number":543,"context_line":"    def _event_emit_delayed(self, event):"},{"line_number":544,"context_line":"        \"\"\"Emit events - possibly delayed.\"\"\""},{"line_number":545,"context_line":"        def event_cleanup(event):"},{"line_number":546,"context_line":"            \"\"\"Callback function for greenthread. Called"},{"line_number":547,"context_line":"            to cleanup the _events_delayed dictionary when an event"},{"line_number":548,"context_line":"            was called."},{"line_number":549,"context_line":"            \"\"\""},{"line_number":550,"context_line":"            self._events_delayed.pop(event.uuid, None)"}],"source_content_type":"text/x-python","patch_set":29,"id":"c8f956e6_05aa7f63","line":547,"range":{"start_line":546,"start_character":15,"end_line":547,"end_character":22},"updated":"2026-02-02 01:18:21.000000000","message":"Callback function to cleanup.....","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"44dc54ec3e6c28fdb634e33a37291249617bc6c0","unresolved":false,"context_lines":[{"line_number":543,"context_line":"    def _event_emit_delayed(self, event):"},{"line_number":544,"context_line":"        \"\"\"Emit events - possibly delayed.\"\"\""},{"line_number":545,"context_line":"        def event_cleanup(event):"},{"line_number":546,"context_line":"            \"\"\"Callback function for greenthread. Called"},{"line_number":547,"context_line":"            to cleanup the _events_delayed dictionary when an event"},{"line_number":548,"context_line":"            was called."},{"line_number":549,"context_line":"            \"\"\""},{"line_number":550,"context_line":"            self._events_delayed.pop(event.uuid, None)"}],"source_content_type":"text/x-python","patch_set":29,"id":"2473d039_00896448","line":547,"range":{"start_line":546,"start_character":15,"end_line":547,"end_character":22},"in_reply_to":"c8f956e6_05aa7f63","updated":"2026-02-02 01:19:06.000000000","message":"ignore as you are removing this callback in https://review.opendev.org/c/openstack/nova/+/974445/9/nova/virt/libvirt/host.py","commit_id":"a89c1b44c56e04223f61925305b0f48f3791c7d8"}]}
