)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"cf0e412f2b8659000cbac76e095eef6701ad395a","unresolved":true,"context_lines":[{"line_number":29,"context_line":"generations into account. There was also a related mailing list thread"},{"line_number":30,"context_line":"[2]."},{"line_number":31,"context_line":""},{"line_number":32,"context_line":"Co-Authored-By: melanie witt \u003cmelwittt@gmail.com\u003e"},{"line_number":33,"context_line":""},{"line_number":34,"context_line":"Closes-Bug: #1836754"},{"line_number":35,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":5,"id":"77ae35d5_f10fb617","line":32,"updated":"2021-08-27 18:18:06.000000000","message":"This Co-Authored-By line should be removed as I think the patch is now back to its PS1 state.","commit_id":"7d92769296ac4b9c70917f3d38b66ba8e05a995e"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"a1afd30db8d27f58d6f1c53e1073b865fb09dc13","unresolved":true,"context_lines":[{"line_number":29,"context_line":"generations into account. There was also a related mailing list thread"},{"line_number":30,"context_line":"[2]."},{"line_number":31,"context_line":""},{"line_number":32,"context_line":"Co-Authored-By: melanie witt \u003cmelwittt@gmail.com\u003e"},{"line_number":33,"context_line":""},{"line_number":34,"context_line":"Closes-Bug: #1836754"},{"line_number":35,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":5,"id":"58c7d032_9f0e4ee0","line":32,"in_reply_to":"77ae35d5_f10fb617","updated":"2021-08-27 18:30:39.000000000","message":"\u003e This Co-Authored-By line should be removed as I think the patch is now back to its PS1 state.\n\nTo be clear, I mean that I should be removed as co-author since anything I did is no longer in the patch.","commit_id":"7d92769296ac4b9c70917f3d38b66ba8e05a995e"}],"nova/compute/api.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":2318,"context_line":"            # Cleanup allocations in Placement since we can\u0027t do it from the"},{"line_number":2319,"context_line":"            # compute service."},{"line_number":2320,"context_line":"            self.placementclient.delete_allocation_for_instance("},{"line_number":2321,"context_line":"                context, instance.uuid, force\u003dTrue)"},{"line_number":2322,"context_line":"            cb(context, instance, bdms, local\u003dTrue)"},{"line_number":2323,"context_line":"            instance.destroy()"},{"line_number":2324,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_262ed68b","line":2321,"updated":"2019-10-15 20:03:13.000000000","message":"This is the case specific to bug 1836754 where the server is being deleted while building.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"198175c2d9154558033b40d3188d4f938d0d6820","unresolved":false,"context_lines":[{"line_number":2318,"context_line":"            # Cleanup allocations in Placement since we can\u0027t do it from the"},{"line_number":2319,"context_line":"            # compute service."},{"line_number":2320,"context_line":"            self.placementclient.delete_allocation_for_instance("},{"line_number":2321,"context_line":"                context, instance.uuid, force\u003dTrue)"},{"line_number":2322,"context_line":"            cb(context, instance, bdms, local\u003dTrue)"},{"line_number":2323,"context_line":"            instance.destroy()"},{"line_number":2324,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_676a85ed","line":2321,"in_reply_to":"3fa7e38b_062cfaa9","updated":"2019-10-16 12:48:24.000000000","message":"So _local delete happens if the server is being deleted while the compute host the server is assigned to is considered down. Race can happen between the compute and the api if the compute is not dead just inaccessible from the api perspective. In that case both the compute and the api can manipulate allocations in parallel. \n\nIf we know that a user initiated server delete is always superior to any allocation manipulation done by the compute on that server in the meantime. Then sure we can force the delete here. If the allocation manipulation on the compute side always uses consumer generations (I think it does) then that manipulation might fail due to conflict which feels OK as nothing needs to be done on a deleted server allocation wise anymore.\n\nI think one of you mentioned the case yesterday about deleting the allocation from the api, but as the compute racing with this delete the compute might re-create the allocation. Do we have safeguard against it?","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"8be9a96ce27a2bf1f1118bce6cc387096210ffda","unresolved":false,"context_lines":[{"line_number":2318,"context_line":"            # Cleanup allocations in Placement since we can\u0027t do it from the"},{"line_number":2319,"context_line":"            # compute service."},{"line_number":2320,"context_line":"            self.placementclient.delete_allocation_for_instance("},{"line_number":2321,"context_line":"                context, instance.uuid, force\u003dTrue)"},{"line_number":2322,"context_line":"            cb(context, instance, bdms, local\u003dTrue)"},{"line_number":2323,"context_line":"            instance.destroy()"},{"line_number":2324,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_062cfaa9","line":2321,"in_reply_to":"3fa7e38b_262ed68b","updated":"2019-10-15 20:47:30.000000000","message":"We hit this in a number of different paths though, right? Like, what\u0027s a \"soft\" delete?","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"dbcac39eab91bd29e1c10705aab6490a3f7e8ce2","unresolved":false,"context_lines":[{"line_number":2318,"context_line":"            # Cleanup allocations in Placement since we can\u0027t do it from the"},{"line_number":2319,"context_line":"            # compute service."},{"line_number":2320,"context_line":"            self.placementclient.delete_allocation_for_instance("},{"line_number":2321,"context_line":"                context, instance.uuid, force\u003dTrue)"},{"line_number":2322,"context_line":"            cb(context, instance, bdms, local\u003dTrue)"},{"line_number":2323,"context_line":"            instance.destroy()"},{"line_number":2324,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_8c5a40ef","line":2321,"in_reply_to":"3fa7e38b_676a85ed","updated":"2019-10-16 14:19:40.000000000","message":"A few things here:\n\n1. We also do the delete in the API if the server is shelved offloaded since it doesn\u0027t have a compute host. It wouldn\u0027t have allocations in that case either though...\n\n2. In what scenarios is the compute service down from the API perspective but is actually running and mucking with allocations? The resource tracker doesn\u0027t update allocations anymore. Are you thinking of like some weird scenario where a port with resource requests is detached from the server while the server is being deleted at the same time? If so, we likely can\u0027t get to the compute to do the detach if it\u0027s down.\n\n3. Where would the compute re-create the allocations while the API is trying to delete them because it\u0027s doing a local delete b/c the service group API says the compute service is down? Yesterday I found latent bug 1848343 which is about re-creating and leaking allocations for a deleted server during resize/cold migrate rollback, but I don\u0027t think that\u0027s the same issue here.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"}],"nova/compute/manager.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":813,"context_line":""},{"line_number":814,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":815,"context_line":"                                                         instance.uuid,"},{"line_number":816,"context_line":"                                                         force\u003dTrue)"},{"line_number":817,"context_line":""},{"line_number":818,"context_line":"        self._clean_instance_console_tokens(context, instance)"},{"line_number":819,"context_line":"        self._delete_scheduler_instance_info(context, instance.uuid)"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_4642f23f","line":816,"updated":"2019-10-15 20:03:13.000000000","message":"In this case we\u0027re deleting the server so why would we need to use consumer generations?","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"dbcac39eab91bd29e1c10705aab6490a3f7e8ce2","unresolved":false,"context_lines":[{"line_number":813,"context_line":""},{"line_number":814,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":815,"context_line":"                                                         instance.uuid,"},{"line_number":816,"context_line":"                                                         force\u003dTrue)"},{"line_number":817,"context_line":""},{"line_number":818,"context_line":"        self._clean_instance_console_tokens(context, instance)"},{"line_number":819,"context_line":"        self._delete_scheduler_instance_info(context, instance.uuid)"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_ccd7d822","line":816,"in_reply_to":"3fa7e38b_2a96944d","updated":"2019-10-16 14:19:40.000000000","message":"I\u0027m not too worried about the edge case of a soft-deleted instance being reclaimed while the periodic is running to clean it up and the user migrating the server. Soft-delete is not enabled by default and even if you do enable it you\u0027d still have to hit a tight window to hit the issue you\u0027re talking about. However, if you feel we shouldn\u0027t force the allocation delete when calling this from the soft delete reclaim periodic, then we could plumb a kwarg down to the method to determine what should happen with allocation cleanup.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"198175c2d9154558033b40d3188d4f938d0d6820","unresolved":false,"context_lines":[{"line_number":813,"context_line":""},{"line_number":814,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":815,"context_line":"                                                         instance.uuid,"},{"line_number":816,"context_line":"                                                         force\u003dTrue)"},{"line_number":817,"context_line":""},{"line_number":818,"context_line":"        self._clean_instance_console_tokens(context, instance)"},{"line_number":819,"context_line":"        self._delete_scheduler_instance_info(context, instance.uuid)"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_2a96944d","line":816,"in_reply_to":"3fa7e38b_4642f23f","updated":"2019-10-16 12:48:24.000000000","message":"This is called from \n\n* _init_instance() at compute restart. There we only trigger this if the server already in DELETING state. I guest the API does not allow any lifecycle operation on a server in such state. So it is OK to force the delete.\n\n* _reclaim_queued_deletes() periodically to clean up timed out soft deleted servers. Soft deleted servers can be restored. So there is a potential race between restore coming from the user and the periodic cleanup happening in the compute. Restore does not manipulate allocations it simply assumes that the allocations exists. So _reclaim_queued_deletes and restore cannot race on placement. There is a potential race still. The _reclaim_queued_deletes() queries the soft deleted instances and then goes into a loop to delete them if they are old. While this loop progresses the user can restore the server from the list of servers being reclaimed, and then start another lifecycle operation on this serer, like migrate it which does resource manipulation on the server. So _theoretically_ we can have a race on allocation manipulation here. And Blindly deleting the allocation in this is the wrong choice. I would simply fail the reclaim instead.\n\n* do_terminate_instance during user initiated delete. I think here we are safe to force the delete.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":2072,"context_line":"                        # have already been removed in"},{"line_number":2073,"context_line":"                        # self._do_build_and_run_instance()."},{"line_number":2074,"context_line":"                        self.reportclient.delete_allocation_for_instance("},{"line_number":2075,"context_line":"                            context, instance.uuid, force\u003dTrue)"},{"line_number":2076,"context_line":""},{"line_number":2077,"context_line":"                    if result in (build_results.FAILED,"},{"line_number":2078,"context_line":"                                  build_results.RESCHEDULED):"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_663daeb9","line":2075,"updated":"2019-10-15 20:03:13.000000000","message":"In this case the build failed and we\u0027re not rescheduling.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":2218,"context_line":"            # claim resources on that new host for the instance."},{"line_number":2219,"context_line":"            self.reportclient.delete_allocation_for_instance(context,"},{"line_number":2220,"context_line":"                                                             instance.uuid,"},{"line_number":2221,"context_line":"                                                             force\u003dTrue)"},{"line_number":2222,"context_line":""},{"line_number":2223,"context_line":"            self.compute_task_api.build_instances(context, [instance],"},{"line_number":2224,"context_line":"                    image, filter_properties, admin_password,"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_866d4acd","line":2221,"updated":"2019-10-15 20:03:13.000000000","message":"In this case the build failed on this host and we\u0027re going to try to reschedule.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":5749,"context_line":"                # instance claim will not remove the allocations."},{"line_number":5750,"context_line":"                self.reportclient.delete_allocation_for_instance(context,"},{"line_number":5751,"context_line":"                                                                 instance.uuid,"},{"line_number":5752,"context_line":"                                                                 force\u003dTrue)"},{"line_number":5753,"context_line":"                # FIXME: Umm, shouldn\u0027t we be rolling back port bindings too?"},{"line_number":5754,"context_line":"                self._terminate_volume_connections(context, instance, bdms)"},{"line_number":5755,"context_line":"                # The reverts_task_state decorator on unshelve_instance will"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_a6a5e604","line":5752,"updated":"2019-10-15 20:03:13.000000000","message":"Unshelve failed so the instance isn\u0027t on a host so it shouldn\u0027t have allocations.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"}],"nova/compute/resource_tracker.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":1678,"context_line":"                                                        instance):"},{"line_number":1679,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":1680,"context_line":"                                                         instance.uuid,"},{"line_number":1681,"context_line":"                                                         force\u003dTrue)"},{"line_number":1682,"context_line":""},{"line_number":1683,"context_line":"    def _verify_resources(self, resources):"},{"line_number":1684,"context_line":"        resource_keys \u003d [\"vcpus\", \"memory_mb\", \"local_gb\", \"cpu_info\","}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_06b63a3f","line":1681,"updated":"2019-10-15 20:03:13.000000000","message":"We\u0027ve shelved offloaded a server so it\u0027s not on a host and shouldn\u0027t have allocations.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"70a28bde99719c723c954c947d0af17227a344e9","unresolved":true,"context_lines":[{"line_number":1649,"context_line":"                                                        instance):"},{"line_number":1650,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":1651,"context_line":"                                                         instance.uuid,"},{"line_number":1652,"context_line":"                                                         force\u003dTrue)"},{"line_number":1653,"context_line":""},{"line_number":1654,"context_line":"    def _verify_resources(self, resources):"},{"line_number":1655,"context_line":"        resource_keys \u003d [\"vcpus\", \"memory_mb\", \"local_gb\", \"cpu_info\","}],"source_content_type":"text/x-python","patch_set":2,"id":"3358be70_634a1fc4","line":1652,"range":{"start_line":1652,"start_character":63,"end_line":1652,"end_character":67},"updated":"2021-07-27 16:58:50.000000000","message":"again default behavior and if you must extend this can you fix the formating\n\n        self.reportclient.delete_allocation_for_instance(\n            context, instance.uuid, force\u003dTrue)\n\n:)","commit_id":"a91cb05199d1da2a005ebc21ebc57c7a23be7663"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"3cf7a5108a38846cbb585e02361424ccc34411b9","unresolved":false,"context_lines":[{"line_number":1649,"context_line":"                                                        instance):"},{"line_number":1650,"context_line":"        self.reportclient.delete_allocation_for_instance(context,"},{"line_number":1651,"context_line":"                                                         instance.uuid,"},{"line_number":1652,"context_line":"                                                         force\u003dTrue)"},{"line_number":1653,"context_line":""},{"line_number":1654,"context_line":"    def _verify_resources(self, resources):"},{"line_number":1655,"context_line":"        resource_keys \u003d [\"vcpus\", \"memory_mb\", \"local_gb\", \"cpu_info\","}],"source_content_type":"text/x-python","patch_set":2,"id":"514a8025_78a09220","line":1652,"range":{"start_line":1652,"start_character":63,"end_line":1652,"end_character":67},"in_reply_to":"3358be70_634a1fc4","updated":"2021-08-27 09:05:24.000000000","message":"Done","commit_id":"a91cb05199d1da2a005ebc21ebc57c7a23be7663"}],"nova/conductor/manager.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":1400,"context_line":"                except exception.InstanceMappingNotFound:"},{"line_number":1401,"context_line":"                    pass"},{"line_number":1402,"context_line":"                self.report_client.delete_allocation_for_instance("},{"line_number":1403,"context_line":"                    context, instance.uuid, force\u003dTrue)"},{"line_number":1404,"context_line":"                continue"},{"line_number":1405,"context_line":"            else:"},{"line_number":1406,"context_line":"                if host.service_host not in host_az:"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_068f1a75","line":1403,"updated":"2019-10-15 20:03:13.000000000","message":"In this case the server was deleted while building (deleted the build request in the API while scheduling and we aren\u0027t going to create the instance record in the cell DB).","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"}],"nova/scheduler/client/report.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":1980,"context_line":"                side error occurred (if force\u003dTrue)"},{"line_number":1981,"context_line":"        \"\"\""},{"line_number":1982,"context_line":"        url \u003d \u0027/allocations/%s\u0027 % uuid"},{"line_number":1983,"context_line":"        if force:"},{"line_number":1984,"context_line":"            # Do not bother with consumer generations, just delete the"},{"line_number":1985,"context_line":"            # allocations."},{"line_number":1986,"context_line":"            r \u003d self.delete(url, global_request_id\u003dcontext.global_id)"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_86f0eafe","line":1983,"updated":"2019-10-15 20:03:13.000000000","message":"Everything in this condition is essentially what this method used to do before https://review.opendev.org/#/c/591597/.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"8be9a96ce27a2bf1f1118bce6cc387096210ffda","unresolved":false,"context_lines":[{"line_number":2182,"context_line":"            instance_uuids \u003d objects.InstanceList.get_uuids_by_host_and_node("},{"line_number":2183,"context_line":"                context, host, nodename)"},{"line_number":2184,"context_line":"            for instance_uuid in instance_uuids:"},{"line_number":2185,"context_line":"                self.delete_allocation_for_instance(context, instance_uuid)"},{"line_number":2186,"context_line":"        try:"},{"line_number":2187,"context_line":"            self._delete_provider(rp_uuid, global_request_id\u003dcontext.global_id)"},{"line_number":2188,"context_line":"        except (exception.ResourceProviderInUse,"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_8638eaed","line":2185,"range":{"start_line":2185,"start_character":16,"end_line":2185,"end_character":75},"updated":"2019-10-15 20:47:30.000000000","message":"This one will still be subject to the race. And this is an example where we *either* want to\n\n- eliminate that race (force\u003dTrue); or\n- widen the window, perhaps by GETting the generations outside of this loop. Not sure how much that really helps, though.\n\nIn either case we still have the problem of the other operation coming in and creating allocations after this deletion goes through, which would cause _delete_provider to fail.\n\nCurrently we hit this from two places:\n- When we\u0027re deleting the compute service. In this case I hope we\u0027ve disabled it first and prevented any other operations from starting -- but what if that operation had already started by the time we disabled? That operation would presumably fail out eventually, and that might clean up the allocations, but the compute service deletion still failed (leaving the service in a partially-deleted state? can you redrive the deletion?)\n- Removing orphaned ironic nodes (which presumably have been shuffled to another hashring). In this case any other in-flight operation on that node ought to fail soon and (hopefully) clean up, deleting its allocations. The node provider might still be orphaned this iteration (because _delete_provider fails), but would get cleaned up on the next periodic.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"}],"nova/scheduler/filter_scheduler.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"001c49728fd41dcd9aaa89e783711caacff4ec2a","unresolved":false,"context_lines":[{"line_number":308,"context_line":"        LOG.debug(\"Cleaning up allocations for %s\", instance_uuids)"},{"line_number":309,"context_line":"        for uuid in instance_uuids:"},{"line_number":310,"context_line":"            self.placement_client.delete_allocation_for_instance(context, uuid,"},{"line_number":311,"context_line":"                                                                 force\u003dTrue)"},{"line_number":312,"context_line":""},{"line_number":313,"context_line":"    def _legacy_find_hosts(self, context, num_instances, spec_obj, hosts,"},{"line_number":314,"context_line":"                           num_alts, instance_uuids\u003dNone):"}],"source_content_type":"text/x-python","patch_set":1,"id":"3fa7e38b_66d92e81","line":311,"updated":"2019-10-15 20:03:13.000000000","message":"In this case we got NoValidHost so we\u0027re cleaning up allocations since we failed to schedule.","commit_id":"b1a75d8d6b629f6509fd2da84377d519875f647d"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"70a28bde99719c723c954c947d0af17227a344e9","unresolved":true,"context_lines":[{"line_number":307,"context_line":"        LOG.debug(\"Cleaning up allocations for %s\", instance_uuids)"},{"line_number":308,"context_line":"        for uuid in instance_uuids:"},{"line_number":309,"context_line":"            self.placement_client.delete_allocation_for_instance(context, uuid,"},{"line_number":310,"context_line":"                                                                 force\u003dTrue)"},{"line_number":311,"context_line":""},{"line_number":312,"context_line":"    def _legacy_find_hosts(self, context, num_instances, spec_obj, hosts,"},{"line_number":313,"context_line":"                           num_alts, instance_uuids\u003dNone):"}],"source_content_type":"text/x-python","patch_set":2,"id":"70a7a5dc_40678523","line":310,"range":{"start_line":310,"start_character":65,"end_line":310,"end_character":75},"updated":"2021-07-27 16:58:50.000000000","message":"nit this is the default value of force so you can avoid the extra line and wrapping by deleteing it","commit_id":"a91cb05199d1da2a005ebc21ebc57c7a23be7663"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"3cf7a5108a38846cbb585e02361424ccc34411b9","unresolved":false,"context_lines":[{"line_number":307,"context_line":"        LOG.debug(\"Cleaning up allocations for %s\", instance_uuids)"},{"line_number":308,"context_line":"        for uuid in instance_uuids:"},{"line_number":309,"context_line":"            self.placement_client.delete_allocation_for_instance(context, uuid,"},{"line_number":310,"context_line":"                                                                 force\u003dTrue)"},{"line_number":311,"context_line":""},{"line_number":312,"context_line":"    def _legacy_find_hosts(self, context, num_instances, spec_obj, hosts,"},{"line_number":313,"context_line":"                           num_alts, instance_uuids\u003dNone):"}],"source_content_type":"text/x-python","patch_set":2,"id":"23a5b640_af9ac094","line":310,"range":{"start_line":310,"start_character":65,"end_line":310,"end_character":75},"in_reply_to":"70a7a5dc_40678523","updated":"2021-08-27 09:05:24.000000000","message":"Done","commit_id":"a91cb05199d1da2a005ebc21ebc57c7a23be7663"}]}
