)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"826182508912c4f89043bf2bc211eacafbc3d481","unresolved":true,"context_lines":[{"line_number":16,"context_line":"  instead to spawn_*"},{"line_number":17,"context_line":""},{"line_number":18,"context_line":"The ComputeManager live_migration_executor is already a"},{"line_number":19,"context_line":"GreenThreaPoolExecutor so here we only switching to spawn_on to get rid"},{"line_number":20,"context_line":"of the explicit pass_context call."},{"line_number":21,"context_line":""},{"line_number":22,"context_line":"Change-Id: I51c5339d3f54f5c66856b6d4e06cd91ac04977cb"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":21,"id":"abd69f61_b3d4bf35","line":19,"range":{"start_line":19,"start_character":0,"end_line":19,"end_character":22},"updated":"2025-07-01 15:28:28.000000000","message":"nit: GreenThreadPoolExecutor but meh","commit_id":"d0bb697167493e8adf1f898a6d5b3f7b306e0c35"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"2a63f2acf2eb626b3cf273e4fffb19d683e13671","unresolved":false,"context_lines":[{"line_number":16,"context_line":"  instead to spawn_*"},{"line_number":17,"context_line":""},{"line_number":18,"context_line":"The ComputeManager live_migration_executor is already a"},{"line_number":19,"context_line":"GreenThreaPoolExecutor so here we only switching to spawn_on to get rid"},{"line_number":20,"context_line":"of the explicit pass_context call."},{"line_number":21,"context_line":""},{"line_number":22,"context_line":"Change-Id: I51c5339d3f54f5c66856b6d4e06cd91ac04977cb"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":21,"id":"6f113936_b580be01","line":19,"range":{"start_line":19,"start_character":0,"end_line":19,"end_character":22},"in_reply_to":"abd69f61_b3d4bf35","updated":"2025-07-02 12:25:20.000000000","message":"I need to add the sign-off lines anyhow :/ So done.","commit_id":"d0bb697167493e8adf1f898a6d5b3f7b306e0c35"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":16207,"name":"ribaudr","display_name":"uggla","email":"rene.ribaud@gmail.com","username":"uggla","status":"Red Hat"},"change_message_id":"f05e24f8394bc395ac3140c32929c1379a09b730","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":15,"id":"c14869da_fe6a566d","updated":"2025-06-10 15:30:11.000000000","message":"To my mind this patch looks logical.\nTests are still passing.\nSo it looks good to me.","commit_id":"a1661e69f0ede10f08a5f27a1fb99d7eb37eea19"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6c8df5e82690a0873ce282fae41c983cc79e6bc5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":15,"id":"b7d417df_da906ff9","updated":"2025-06-03 13:01:47.000000000","message":"recheck grenade timeout","commit_id":"a1661e69f0ede10f08a5f27a1fb99d7eb37eea19"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4e2c5214011e735e446ee2266f6d942f6373cfa6","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":15,"id":"2abff21f_b4ea2624","updated":"2025-06-05 12:25:30.000000000","message":"recheck grenade timeout (I see this multiple times that grenade times out while dumping the DBs )","commit_id":"a1661e69f0ede10f08a5f27a1fb99d7eb37eea19"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"0deae5b840cd74f89fb5272f8313e9904ad67a71","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":15,"id":"d7ef0de3_b50b1d26","updated":"2025-05-27 16:51:07.000000000","message":"recheck grenade-skip-level-always timed out while dumping databases \nhttps://zuul.opendev.org/t/openstack/build/a0c654ddd70f408c9b35ce2e89a5be65/log/job-output.txt#21859-21860","commit_id":"a1661e69f0ede10f08a5f27a1fb99d7eb37eea19"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a7adc16ff5f718de5e1ef0d4a4010e8a292c73ce","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":17,"id":"dbbee842_7464c4f4","updated":"2025-06-16 11:51:53.000000000","message":"recheck nova-ceph-mutlistore guest kernel panic\n```\ncurrently loaded modules: 8021q 8139cp 8390 9pnet 9pnet_virtio ahci cec dca drm drm_kms_helper e1000 e1000e failover fb_sys_fops garp hid hid_generic i2c_algo_bit igb igbvf ip6_udp_tunnel ip_tables isofs libahci libcrc32c llc mii mrp ne2k_pci net_failover nls_ascii nls_iso8859_1 nls_utf8 pcnet32 qemu_fw_cfg rc_core sctp stp syscopyarea sysfillrect sysimgblt udp_tunnel usbhid virtio_blk virtio_dma_buf virtio_gpu virtio_input virtio_net virtio_rng virtio_scsi virtiofs x_tables \ninfo: initramfs loading root from /dev/vda1\n/sbin/init: can\u0027t load library \u0027libtirpc.so.3\u0027\n[    7.159717] Kernel panic - not syncing: Attempted to kill init! exitcode\u003d0x00001000\n[    7.160384] CPU: 0 PID: 1 Comm: init Not tainted 5.15.0-117-generic #127-Ubuntu\n[    7.160700] Hardware name: OpenStack Foundation OpenStack Nova, BIOS 1.16.3-debian-1.16.3-2 04/01/2014\n[    7.161156] Call Trace:\n[    7.161760]  \u003cTASK\u003e\n[    7.161971]  show_stack+0x52/0x5c\n[    7.162390]  dump_stack_lvl+0x4a/0x63\n[    7.162572]  dump_stack+0x10/0x16\n[    7.162735]  panic+0x15c/0x33b\n[    7.162867]  do_exit.cold+0x15/0xa0\n[    7.162992]  __x64_sys_exit+0x1b/0x20\n[    7.163123]  x64_sys_call+0x1f30/0x1fa0\n[    7.163281]  do_syscall_64+0x56/0xb0\n[    7.163410]  ? exit_to_user_mode_prepare+0x37/0xb0\n[    7.163614]  ? syscall_exit_to_user_mode+0x2c/0x50\n[    7.163785]  ? x64_sys_call+0x1e07/0x1fa0\n[    7.163936]  ? do_syscall_64+0x63/0xb0\n[    7.164081]  ? vfs_write+0x1d5/0x270\n[    7.164246]  ? ksys_write+0x67/0xf0\n[    7.164385]  ? exit_to_user_mode_prepare+0x37/0xb0\n[    7.164588]  ? syscall_exit_to_user_mode+0x2c/0x50\n[    7.164749]  ? x64_sys_call+0x47c/0x1fa0\n[    7.164886]  ? do_syscall_64+0x63/0xb0\n[    7.165015]  ? do_syscall_64+0x63/0xb0\n[    7.168272]  ? exit_to_user_mode_prepare+0x99/0xb0\n[    7.170604]  ? irqentry_exit_to_user_mode+0xe/0x20\n[    7.172947]  ? irqentry_exit+0x1d/0x30\n[    7.175256]  ? sysvec_apic_timer_interrupt+0x4e/0x90\n[    7.177535]  entry_SYSCALL_64_after_hwframe+0x6c/0xd6\n[    7.179919] RIP: 0033:0x7f03fb9e355e\n[    7.182670] Code: 05 d7 2a 00 00 4c 89 f9 bf 02 00 00 00 48 8d 35 fb 0d 00 00 48 8b 10 31 c0 e8 50 d2 ff ff bf 10 00 00 00 b8 3c 00 00 00 0f 05 \u003c48\u003e 8d 15 f3 2a 00 00 f7 d8 89 02 48 83 ec 20 49 8b 8c 24 b8 00 00\n[    7.188203] RSP: 002b:00007ffd2cbda960 EFLAGS: 00000207 ORIG_RAX: 000000000000003c\n[    7.191063] RAX: ffffffffffffffda RBX: 00007ffd2cbdbc10 RCX: 00007f03fb9e355e\n[    7.193710] RDX: 0000000000000002 RSI: 0000000000001000 RDI: 0000000000000010\n[    7.196787] RBP: 00007ffd2cbdbbf0 R08: 00007f03fb9dc000 R09: 00007f03fb9dc01a\n[    7.199335] R10: 0000000000000001 R11: 0000000000000207 R12: 00007f03fb9dd040\n[    7.202302] R13: 00000000004bae50 R14: 0000000000000000 R15: 0000000000403d66\n[    7.205255]  \u003c/TASK\u003e\n[    7.208361] Kernel Offset: 0xde00000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff)\n[    7.211468] ---[ end Kernel panic - not syncing: Attempted to kill init! exitcode\u003d0x00001000 ]---\n```","commit_id":"9e8b3f7427053181c577c74b01a58604b0b8ba81"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"e49e28ec5702120ca0c0c4f44995322a2e480fc3","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":21,"id":"1cd5675d_f3ef9717","updated":"2025-07-01 15:27:47.000000000","message":"that one was way easier to review than the previous commit, all good 👍","commit_id":"d0bb697167493e8adf1f898a6d5b3f7b306e0c35"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"edfe9dc5bb8e56054b16a2b57b4285d68e8a463e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"d1d39f06_90e326dc","updated":"2025-07-10 13:30:56.000000000","message":"recheck guest ssh time out","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"fc34af7efd4952f5445c5be801fbf6b848d52e01","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"62e92f6c_09c7599a","updated":"2025-07-11 12:51:34.000000000","message":"recheck guest ssh timeout (we have this semi frequently)","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d49cca9954d7c570a042a0c58e67b4db1fca711e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"c3faf5b2_842ccf5d","updated":"2025-07-14 07:50:41.000000000","message":"recheck same memory allocation error in extend volume. Is it a pattern?","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9783897cb81edf7829a4eb0830823b071addfcd4","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"a5d82e45_db58e516","updated":"2025-07-16 07:32:09.000000000","message":"recheck the gate should be unblocked as we disabled the tempest test that is broken due to the new ceph package version","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d1b25c05ed361a62329d542caefca9eb6c1b841a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"ce33c2bb_c7eae867","updated":"2025-07-13 12:23:29.000000000","message":"recheck volume extend test failed in ceph mutlistore\n```\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] nova.exception.InvalidDiskInfo: Disk info file is invalid: qemu-img failed to execute on rbd:volumes/volume-b5a83deb-b63d-49fb-8c40-b66b3533a2cc:id\u003dcinder : Unexpected error while running command.\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] Command: /opt/stack/data/venv/bin/python3.12 -m oslo_concurrency.prlimit --as\u003d1073741824 --cpu\u003d30 -- env LC_ALL\u003dC LANG\u003dC qemu-img info rbd:volumes/volume-b5a83deb-b63d-49fb-8c40-b66b3533a2cc:id\u003dcinder --force-share --output\u003djson\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] Exit code: -6\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] Stdout: \u0027\u0027\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] Stderr: \u0027failed to allocate memory for stack: Cannot allocate memory\\n\u0027\nJul 11 14:27:44.552733 npb6cbf0986f2a4 nova-compute[96208]: ERROR nova.virt.libvirt.driver [instance: 0ae4433b-201a-461b-bf92-66d38da50e32] \n```","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"b08ac60e4381a2efb4c13e6569ced0280bcc2b4e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"16fa53ea_e1633d46","updated":"2025-07-10 09:43:58.000000000","message":"restating my +2","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"90daf2a34bab967111860f50053afa52aac3da44","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"9d660d36_97a15e0b","updated":"2025-07-14 08:03:53.000000000","message":"yeah the gate is blocked by https://bugs.launchpad.net/nova/+bug/2116852","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"}],"nova/compute/manager.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"23b51b07c83cc154d8db9113d23c4293a87cb169","unresolved":true,"context_lines":[{"line_number":643,"context_line":"        self.compute_task_api \u003d conductor.ComputeTaskAPI()"},{"line_number":644,"context_line":"        self.query_client \u003d query.SchedulerQueryClient()"},{"line_number":645,"context_line":"        self.instance_events \u003d InstanceEvents()"},{"line_number":646,"context_line":"        self._sync_power_executor \u003d futurist.GreenThreadPoolExecutor("},{"line_number":647,"context_line":"            max_workers\u003dCONF.sync_power_state_pool_size)"},{"line_number":648,"context_line":"        self._syncs_in_progress \u003d {}"},{"line_number":649,"context_line":"        self.send_instance_updates \u003d ("},{"line_number":650,"context_line":"            CONF.filter_scheduler.track_instance_changes)"}],"source_content_type":"text/x-python","patch_set":13,"id":"56477492_82616039","line":647,"range":{"start_line":646,"start_character":36,"end_line":647,"end_character":56},"updated":"2025-05-26 15:00:56.000000000","message":"i guess we are not using untils funciton to get the default or get_scatter_gather_executor fucntion because we want to use a diffent config option to contol the size\n\nshoudl we condier adding a get_executor function to nova.utils that will take the max_workers and then internally constuct the correct executor based on concurrency_mode_threading?\n\nthat way the compute manager code does not actully need to be aware of if its a greenthread or threadpool executor at all and we can remove this direct usage of futurist.\n\nas a step 100 after we are done wiht the eventlet to thread move we can also look at dropign our use fo futurist and revertign to the executors form the std lib\n\nhttps://docs.python.org/3/library/concurrent.futures.html#threadpoolexecutor\n\ni dont really wanto to move too does now as futureis provide 4 interchanable executors that shoudl have the same api as the ones form concurrent.futures\nbtu once we are out of the transition phase swapign to concurrent.futures woudl allow use to stop maintining the futureist lib so it woudl likely be a good thing down the road.\n\nwith that in mind i would prefer if we centralise all of our usage of futureist in teh nova.utils module as we are doing for spawn ectra.\n\nthis isnt a blocker but just something i wanted to highlight.","commit_id":"87a982c0282686dfb175b1abbc07c6b626ff4d44"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"3d8208a67e3c1a5bef4bfd69f347ffef44bd6380","unresolved":false,"context_lines":[{"line_number":643,"context_line":"        self.compute_task_api \u003d conductor.ComputeTaskAPI()"},{"line_number":644,"context_line":"        self.query_client \u003d query.SchedulerQueryClient()"},{"line_number":645,"context_line":"        self.instance_events \u003d InstanceEvents()"},{"line_number":646,"context_line":"        self._sync_power_executor \u003d futurist.GreenThreadPoolExecutor("},{"line_number":647,"context_line":"            max_workers\u003dCONF.sync_power_state_pool_size)"},{"line_number":648,"context_line":"        self._syncs_in_progress \u003d {}"},{"line_number":649,"context_line":"        self.send_instance_updates \u003d ("},{"line_number":650,"context_line":"            CONF.filter_scheduler.track_instance_changes)"}],"source_content_type":"text/x-python","patch_set":13,"id":"cb5c19d1_975dca19","line":647,"range":{"start_line":646,"start_character":36,"end_line":647,"end_character":56},"in_reply_to":"56477492_82616039","updated":"2025-05-27 11:52:04.000000000","message":"\u003e i guess we are not using untils funciton to get the default or get_scatter_gather_executor fucntion because we want to use a diffent config option to contol the size\n\nYeah, historically this is different form the default executor. We can try to move the compute manager to use the default executor instead. I just did not want to do that in this series as it is not strictly necessary for the nova-scheduler to work.\n\n\u003e shoudl we condier adding a get_executor function to nova.utils that will take the max_workers and then internally constuct the correct executor based on concurrency_mode_threading?\n\nYeah when we translate the nova-compute for threading mode we probably need that. Or we can make a jump and re-use the default executor with some configuration massaging to get it properly sized. Bottom line, we need to think about it there and then.\n\n\u003e with that in mind i would prefer if we centralise all of our usage of futureist in teh nova.utils module as we are doing for spawn ectra.\n\nYeah this is not a bad idea and I don\u0027t think it will be hard to do. I will try to keep this in mind for the nova-compute translation to threading.\n\n--\n\nThis is the minimal change that needed to push all code to use the centralized spawn_on. I will revisit this once we start translating nova-compute.","commit_id":"87a982c0282686dfb175b1abbc07c6b626ff4d44"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"e49e28ec5702120ca0c0c4f44995322a2e480fc3","unresolved":false,"context_lines":[{"line_number":644,"context_line":"        self.query_client \u003d query.SchedulerQueryClient()"},{"line_number":645,"context_line":"        self.instance_events \u003d InstanceEvents()"},{"line_number":646,"context_line":"        self._sync_power_executor \u003d futurist.GreenThreadPoolExecutor("},{"line_number":647,"context_line":"            max_workers\u003dCONF.sync_power_state_pool_size)"},{"line_number":648,"context_line":"        self._syncs_in_progress \u003d {}"},{"line_number":649,"context_line":"        self.send_instance_updates \u003d ("},{"line_number":650,"context_line":"            CONF.filter_scheduler.track_instance_changes)"}],"source_content_type":"text/x-python","patch_set":21,"id":"d1f876e1_6d5b92a8","line":647,"updated":"2025-07-01 15:27:47.000000000","message":"🎉","commit_id":"d0bb697167493e8adf1f898a6d5b3f7b306e0c35"}],"nova/tests/unit/compute/eventlet_utils.py":[{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"e49e28ec5702120ca0c0c4f44995322a2e480fc3","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":21,"id":"05216d30_050ef63a","side":"PARENT","updated":"2025-07-01 15:27:47.000000000","message":"😎","commit_id":"05ba913a302ae778c7559937439714c07eb39eba"}],"nova/tests/unit/compute/test_compute.py":[{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"3e200c2f94fb935c657967fccdf2968965c02ccb","unresolved":true,"context_lines":[{"line_number":160,"context_line":"        self.compute.driver._set_nodes([NODENAME, NODENAME2])"},{"line_number":161,"context_line":""},{"line_number":162,"context_line":"        # execute power syncing synchronously for testing:"},{"line_number":163,"context_line":"        self.compute._sync_power_pool \u003d eventlet_utils.SyncPool()"},{"line_number":164,"context_line":""},{"line_number":165,"context_line":"        # override tracker with a version that doesn\u0027t need the database:"},{"line_number":166,"context_line":"        fake_rt \u003d fake_resource_tracker.FakeResourceTracker(self.compute.host,"}],"source_content_type":"text/x-python","patch_set":16,"id":"b10f0dbe_a7f4c06a","side":"PARENT","line":163,"updated":"2025-06-13 13:20:05.000000000","message":"This was another sync pool executor fixture I removed and replaced with the futurist sync executor. So the execution mode is not changed in these testes either.","commit_id":"47f8ac73209b31e1274773b28f08ba21bfb5e283"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"977be6e8cc7db0f090604b930874427c9bd6c56f","unresolved":true,"context_lines":[{"line_number":8568,"context_line":""},{"line_number":8569,"context_line":"        self.compute._sync_power_states(ctxt)"},{"line_number":8570,"context_line":"        # wait for sync to finish"},{"line_number":8571,"context_line":"        self.compute._sync_power_executor.shutdown(wait\u003dTrue)"},{"line_number":8572,"context_line":""},{"line_number":8573,"context_line":"        mock_get.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY),"},{"line_number":8574,"context_line":"                                   mock.call(mock.ANY)])"}],"source_content_type":"text/x-python","patch_set":23,"id":"e684fbfd_4e43f3bb","line":8571,"updated":"2025-07-02 17:01:01.000000000","message":"Is this because this could now be real threads and we don\u0027t want to orphan them in the future (a thing which is sort of socially-acceptable for greenthreads)?","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d7e366d26296e31da7d6419ef9c36e373a8427ea","unresolved":true,"context_lines":[{"line_number":8568,"context_line":""},{"line_number":8569,"context_line":"        self.compute._sync_power_states(ctxt)"},{"line_number":8570,"context_line":"        # wait for sync to finish"},{"line_number":8571,"context_line":"        self.compute._sync_power_executor.shutdown(wait\u003dTrue)"},{"line_number":8572,"context_line":""},{"line_number":8573,"context_line":"        mock_get.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY),"},{"line_number":8574,"context_line":"                                   mock.call(mock.ANY)])"}],"source_content_type":"text/x-python","patch_set":23,"id":"cb55335b_9512843d","line":8571,"in_reply_to":"3766d688_11d54dff","updated":"2025-07-03 13:18:32.000000000","message":"\u003e Is this because this could now be real threads and we don\u0027t want to orphan them in the future (a thing which is sort of socially-acceptable for greenthreads)?\n\nThe fun thing is that probably the original test was unstable as it dependent on some spawned task to actually finish before the assert so that the test can observe the expected effect of the task. With futurist in the picture the race between the task and the test assert was more likely for some reason. So I added in the shutdown to stabilize the test by forcing all the spawned tasks to finish.","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6f47e7060606cb567e24e7405e3337aae358cf9e","unresolved":true,"context_lines":[{"line_number":8568,"context_line":""},{"line_number":8569,"context_line":"        self.compute._sync_power_states(ctxt)"},{"line_number":8570,"context_line":"        # wait for sync to finish"},{"line_number":8571,"context_line":"        self.compute._sync_power_executor.shutdown(wait\u003dTrue)"},{"line_number":8572,"context_line":""},{"line_number":8573,"context_line":"        mock_get.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY),"},{"line_number":8574,"context_line":"                                   mock.call(mock.ANY)])"}],"source_content_type":"text/x-python","patch_set":23,"id":"3766d688_11d54dff","line":8571,"in_reply_to":"e684fbfd_4e43f3bb","updated":"2025-07-02 18:38:16.000000000","message":"well, for Greenthread, i have a poison fixture that will detect Greenthreat leeks\nbecause if we do leak them an exception in one test cna leak into anohter.\n\nso we shouldn\u0027t have any green thread leaks today.\n\nat least that why \n\nhttps://github.com/openstack/nova/blob/master/nova/tests/fixtures/nova.py#L1188-L1299\n\nexist to prevent it.\n\nbut since gibi is asgining it above as a new  futurist.SynchronousExecutor()\nwe shoudl clean it up. maybe using addCleanup might be better.\n\n\nthe sync pool just executed the funciton in the current greenthread so there was nothign to shutdown.\n\nbut i guess the futurist.SynchronousExecutor() might have some state we need to clean up.","commit_id":"5c180e17611f24b6eb14900b5170bed8df18b5fb"}]}
