)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"73c6790eead16f30376f76143129483d58b26b46","unresolved":false,"context_lines":[{"line_number":14,"context_line":"providers were not cleaned up in placement. This fixes the issue by"},{"line_number":15,"context_line":"iterating all the compute nodes and cleaning up their providers."},{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean then up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"dfbec78f_66e1fefe","line":17,"range":{"start_line":17,"start_character":58,"end_line":17,"end_character":62},"updated":"2019-05-03 19:33:19.000000000","message":"them","commit_id":"641fe32c47c281fdfff62349b97d95c3cc989501"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":7,"context_line":"Delete resource providers for all nodes when deleting compute service"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Change I7b8622b178d5043ed1556d7bdceaf60f47e5ac80 started deleting the"},{"line_number":10,"context_line":"compute node resource provider associated with a compute node when"},{"line_number":11,"context_line":"deleting a nova-compute service. However, it would only delete the"},{"line_number":12,"context_line":"first compute node associated with the service which means for an"},{"line_number":13,"context_line":"ironic compute service that is managing multiple nodes, the resource"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"dfbec78f_ef205afa","line":10,"range":{"start_line":10,"start_character":0,"end_line":10,"end_character":12},"updated":"2019-05-04 16:44:55.000000000","message":"nit: can nix this since we say \"associated with a compute node\" later on in the sentence.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"45e960cd2a69270da2ee19f98b5a62df5f934539","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Change I7b8622b178d5043ed1556d7bdceaf60f47e5ac80 started deleting the"},{"line_number":10,"context_line":"compute node resource provider associated with a compute node when"},{"line_number":11,"context_line":"deleting a nova-compute service. However, it would only delete the"},{"line_number":12,"context_line":"first compute node associated with the service which means for an"},{"line_number":13,"context_line":"ironic compute service that is managing multiple nodes, the resource"},{"line_number":14,"context_line":"providers were not cleaned up in placement. This fixes the issue by"},{"line_number":15,"context_line":"iterating all the compute nodes and cleaning up their providers."}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"dfbec78f_c6c8ea75","line":12,"range":{"start_line":12,"start_character":0,"end_line":12,"end_character":29},"updated":"2019-05-03 19:34:57.000000000","message":"oo yea right we don\u0027t handle the 1:N ironic case yet.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":15,"context_line":"iterating all the compute nodes and cleaning up their providers."},{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"dfbec78f_0a47dc4d","line":18,"range":{"start_line":18,"start_character":15,"end_line":18,"end_character":45},"updated":"2019-05-04 16:44:55.000000000","message":"if I remember correctly, when we did this delete patch first that was what introduced the requirement of having the placement endpoint configured correctly in the nova-api right? (https://review.opendev.org/#/c/560706/4) So if people have forgotten to do that we will just fail like you say here or will we continue with removing things on the nova side? Same question for if for some reason we could not connect to placement at that point of time. Not sure if we have test coverage for this but this is not related to this patch.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"aadf6a92b2bf358425c5787e5aba213a515c2ce1","unresolved":false,"context_lines":[{"line_number":15,"context_line":"iterating all the compute nodes and cleaning up their providers."},{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"dfbec78f_5ca02ff5","line":18,"range":{"start_line":18,"start_character":15,"end_line":18,"end_character":45},"in_reply_to":"dfbec78f_0a47dc4d","updated":"2019-05-06 16:42:05.000000000","message":"You\u0027re referring to the @safe_connect usage on the SchedulerReportClient.delete_allocation_for_instance method which means if placement isn\u0027t configured properly for nova-api to talk to it, we\u0027ll log warnings but not fail. We could hit an issue with placement being configured but something fails with the provider deletion, such as the race mentioned in the patch later in the series here:\n\nhttps://review.opendev.org/#/c/657070/2/nova/scheduler/client/report.py@2172\n\nBut like you said this is an existing issue. This change is just making the compute API call placement N times where N \u003d number of compute nodes for the service being deleted. Also as mentioned in the commit message here, if something does fail, the service record in nova is not yet gone so the user can retry the delete.\n\nAt this point (Train) we can likely *require* that nova-api is configured for placement and start removing the @safe_connect decorator from the SchedulerReportClient code, but that\u0027s a separate and more complicated series (which Jay and Eric have started and re-started at times). But for example, we have this TODO to stop gracefully ignoring placement failures due to config when adding/removing hosts to/from aggregates:\n\nhttps://github.com/openstack/nova/blob/5934c5dc6932fbf19ca7f3011c4ccc07b0038ac4/nova/compute/api.py#L5564\n\nhttps://github.com/openstack/nova/blob/5934c5dc6932fbf19ca7f3011c4ccc07b0038ac4/nova/compute/api.py#L5630","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"17a6d7bfab33d0dcb4dafc17b767a9c5648f5e8a","unresolved":false,"context_lines":[{"line_number":15,"context_line":"iterating all the compute nodes and cleaning up their providers."},{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"dfbec78f_cbca0119","line":18,"range":{"start_line":18,"start_character":15,"end_line":18,"end_character":45},"in_reply_to":"dfbec78f_5ca02ff5","updated":"2019-05-07 18:02:13.000000000","message":"\u003e You\u0027re referring to the @safe_connect usage on the\n \u003e SchedulerReportClient.delete_allocation_for_instance method which\n \u003e means if placement isn\u0027t configured properly for nova-api to talk\n \u003e to it, we\u0027ll log warnings but not fail.\n\ncorrect, yea when I suddenly read \"if anything fails in the loop\" part I started thinking about this.\n\n \u003e We could hit an issue with\n \u003e placement being configured but something fails with the provider\n \u003e deletion, such as the race mentioned in the patch later in the\n \u003e series here:\n \u003e \n \u003e https://review.opendev.org/#/c/657070/2/nova/scheduler/client/report.py@2172\n \u003e \n \u003e But like you said this is an existing issue. This change is just\n \u003e making the compute API call placement N times where N \u003d number of\n \u003e compute nodes for the service being deleted. Also as mentioned in\n \u003e the commit message here, if something does fail, the service record\n \u003e in nova is not yet gone so the user can retry the delete.\n\nyea this is the point that was confusing for me in the commit message, since indeed if we can\u0027t connect to placement (which is a fail scenario) we will log a warning and proceed to delete the service records. But okay this is just a nit.\n\n \u003e \n \u003e At this point (Train) we can likely *require* that nova-api is\n \u003e configured for placement and start removing the @safe_connect\n \u003e decorator from the SchedulerReportClient code, but that\u0027s a\n \u003e separate and more complicated series (which Jay and Eric have\n \u003e started and re-started at times).\n\noh nice wasn\u0027t aware of that.\n\n \u003e But for example, we have this\n \u003e TODO to stop gracefully ignoring placement failures due to config\n \u003e when adding/removing hosts to/from aggregates:\n \u003e \n \u003e https://github.com/openstack/nova/blob/5934c5dc6932fbf19ca7f3011c4ccc07b0038ac4/nova/compute/api.py#L5564\n \u003e \n \u003e https://github.com/openstack/nova/blob/5934c5dc6932fbf19ca7f3011c4ccc07b0038ac4/nova/compute/api.py#L5630\n\noh cool! I\u0027ll see if I can take this up.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"0229500b13c725722618413914b5b57bd97d1bc5","unresolved":false,"context_lines":[{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"},{"line_number":22,"context_line":"Closes-Bug: #1811726"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"9fb8cfa7_3366e6ef","line":19,"updated":"2019-06-03 13:02:25.000000000","message":"See my comment within, but it appears that some resource providers could be left around and the service deletion still happen. Not certain.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b19ff61a50aaac5f7f4bf6d97888bca8ac7b764b","unresolved":false,"context_lines":[{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"},{"line_number":22,"context_line":"Closes-Bug: #1811726"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"9fb8cfa7_5dad6e2f","line":19,"in_reply_to":"9fb8cfa7_3366e6ef","updated":"2019-06-05 21:00:40.000000000","message":"You\u0027re right, I hadn\u0027t noticed that and might be related to what Surya was asking above. I was just able to connect the dots after you brought this up and cfriesen mentioned it in IRC today - which also made bug 1829479 click with me.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b269a1af8f6703d66e636af65b244047f74e5d0f","unresolved":false,"context_lines":[{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"},{"line_number":22,"context_line":"Closes-Bug: #1811726"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"9fb8cfa7_8ad06221","line":19,"in_reply_to":"9fb8cfa7_36b9b4e8","updated":"2019-06-07 00:24:03.000000000","message":"FYI this is the change that recreates that issue:\n\nhttps://review.opendev.org/663737","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"82ecf96d77312b699a3d7de27ee1733812405060","unresolved":false,"context_lines":[{"line_number":16,"context_line":"Note this could be potentially a lot of nodes, but we don\u0027t really"},{"line_number":17,"context_line":"have many good options here but to iterate them and clean them up"},{"line_number":18,"context_line":"one at a time. If anything fails in that loop, we won\u0027t have deleted"},{"line_number":19,"context_line":"the compute service record so the user could retry the operation."},{"line_number":20,"context_line":""},{"line_number":21,"context_line":"Change-Id: I9e852e25ea89f32bf19cdaeb1f5dac8f749f5dbc"},{"line_number":22,"context_line":"Closes-Bug: #1811726"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":2,"id":"9fb8cfa7_36b9b4e8","line":19,"in_reply_to":"9fb8cfa7_5dad6e2f","updated":"2019-06-06 07:40:33.000000000","message":"\u003e You\u0027re right, I hadn\u0027t noticed that and might be related to what\n \u003e Surya was asking above.\n\nyea, I was talking about the same thing but since it was just wordings in commit message I just considered it a nit after you clarified it.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"}],"nova/api/openstack/compute/services.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"529d462b3e6a95566c15eb8c391ceacef7e621f5","unresolved":false,"context_lines":[{"line_number":244,"context_line":"            service \u003d self.host_api.service_get_by_id(context, id)"},{"line_number":245,"context_line":"            # remove the service from all the aggregates in which it\u0027s included"},{"line_number":246,"context_line":"            if service.binary \u003d\u003d \u0027nova-compute\u0027:"},{"line_number":247,"context_line":"                # Check to see if there are any instances on this compute host"},{"line_number":248,"context_line":"                # because if there are, we need to block the service (and"},{"line_number":249,"context_line":"                # related compute_nodes record) delete since it will impact"},{"line_number":250,"context_line":"                # resource accounting in Placement and orphan the compute node"},{"line_number":251,"context_line":"                # resource provider."},{"line_number":252,"context_line":"                # TODO(mriedem): Use a COUNT SQL query-based function instead"},{"line_number":253,"context_line":"                # of InstanceList.get_uuids_by_host for performance."},{"line_number":254,"context_line":"                instance_uuids \u003d objects.InstanceList.get_uuids_by_host("},{"line_number":255,"context_line":"                    context, service[\u0027host\u0027])"},{"line_number":256,"context_line":"                if instance_uuids:"},{"line_number":257,"context_line":"                    raise webob.exc.HTTPConflict("},{"line_number":258,"context_line":"                        explanation\u003d_(\u0027Unable to delete compute service that \u0027"},{"line_number":259,"context_line":"                                      \u0027is hosting instances. Migrate or \u0027"},{"line_number":260,"context_line":"                                      \u0027delete the instances first.\u0027))"},{"line_number":261,"context_line":""},{"line_number":262,"context_line":"                aggrs \u003d self.aggregate_api.get_aggregates_by_host(context,"},{"line_number":263,"context_line":"                                                                  service.host)"}],"source_content_type":"text/x-python","patch_set":2,"id":"9fb8cfa7_e2b450f4","line":260,"range":{"start_line":247,"start_character":16,"end_line":260,"end_character":69},"updated":"2019-06-12 14:36:57.000000000","message":"Unrelated to *this* change but related to the issue cdent pointed out in the commit message ( and recreated in https://review.opendev.org/#/c/663737/ ), counting the instance uuids on the host is akin to the cascade\u003dTrue logic in delete_resource_provider in that it\u0027s only checking for instances that are currently on the host because of the instances.host value in the database. But it\u0027s not counting other allocations on the resource providers like unconfirmed migrations and evacuated instances. Given that, to fix the ResourceProviderInUse issue we have a couple of options, which I don\u0027t think are mutually exclusive:\n\n1. we could change this check to look for allocations on any resource providers on the host\n2. we could change delete_resource_provider to not ignore ResourceProviderInUse so that if we fail we don\u0027t delete the compute service record until the allocations on those providers are cleaned up\n3. we change the cascade\u003dTrue logic in delete_resource_provider to cleanup all allocations on the provider, not just for the currently hosted instances\n\nThe one thing that worries me a bit about #3 is if we\u0027re cleaning up allocations for unconfirmed migrations, what happens when the migration is confirmed/reverted and the compute service code goes to remove/change the source node allocations that we\u0027ve now deleted? I\u0027m not even sure if we\u0027d get that far if the source compute service is deleted while an instance is in VERIFY_RESIZE status and then the user confirms/reverts the migration.\n\nThe easiest / most straight-forward thing we can do now is probably #2 while mulling #3 (and write a functional test or two for the #3 scenario).","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":259,"context_line":"                                      \u0027is hosting instances. Migrate or \u0027"},{"line_number":260,"context_line":"                                      \u0027delete the instances first.\u0027))"},{"line_number":261,"context_line":""},{"line_number":262,"context_line":"                aggrs \u003d self.aggregate_api.get_aggregates_by_host(context,"},{"line_number":263,"context_line":"                                                                  service.host)"},{"line_number":264,"context_line":"                for ag in aggrs:"},{"line_number":265,"context_line":"                    self.aggregate_api.remove_host_from_aggregate(context,"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_0a9b5c72","line":262,"range":{"start_line":262,"start_character":66,"end_line":262,"end_character":73},"updated":"2019-05-04 16:44:55.000000000","message":"this becomes global cell target here: https://github.com/openstack/nova/blob/1388855be2a26c8dc47639bc974633868dfdf9b9/nova/objects/aggregate.py#L424","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":270,"context_line":"                # remember that an ironic compute service can manage multiple"},{"line_number":271,"context_line":"                # nodes"},{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"},{"line_number":275,"context_line":"                    self.placementclient.delete_resource_provider("},{"line_number":276,"context_line":"                        context, compute_node, cascade\u003dTrue)"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_6acbf8a4","line":273,"range":{"start_line":273,"start_character":20,"end_line":273,"end_character":27},"updated":"2019-05-04 16:44:55.000000000","message":"I was just thinking how come this is still cell targeted. So that happens here: https://github.com/openstack/nova/blob/1388855be2a26c8dc47639bc974633868dfdf9b9/nova/compute/api.py#L5022 , same for L254 I guess.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"aadf6a92b2bf358425c5787e5aba213a515c2ce1","unresolved":false,"context_lines":[{"line_number":270,"context_line":"                # remember that an ironic compute service can manage multiple"},{"line_number":271,"context_line":"                # nodes"},{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"},{"line_number":275,"context_line":"                    self.placementclient.delete_resource_provider("},{"line_number":276,"context_line":"                        context, compute_node, cascade\u003dTrue)"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_dcabbfd3","line":273,"range":{"start_line":273,"start_character":20,"end_line":273,"end_character":27},"in_reply_to":"dfbec78f_6acbf8a4","updated":"2019-05-06 16:42:05.000000000","message":"Yup, it\u0027s a bit tricky.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"0229500b13c725722618413914b5b57bd97d1bc5","unresolved":false,"context_lines":[{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"},{"line_number":275,"context_line":"                    self.placementclient.delete_resource_provider("},{"line_number":276,"context_line":"                        context, compute_node, cascade\u003dTrue)"},{"line_number":277,"context_line":"                # remove the host_mapping of this host."},{"line_number":278,"context_line":"                try:"}],"source_content_type":"text/x-python","patch_set":2,"id":"9fb8cfa7_f3460e50","line":275,"updated":"2019-06-03 13:02:25.000000000","message":"Hmmm. It\u0027s rather unfortunate that delete_resource_provider neither returns nor raises an exception. You can\u0027t tell if a delete succeeded or failed or really anything at all.\n\nIf you could, you could repackage that error here to address you \"user could retry\" statement in the commit message.\n\nAs it stands, it looks you can pass right over this, delete some but not all of the nodes, and then disable the service.\n\nMeaning that if any of the nodes come back with the same name they may try to create a new resource provider with the same name but different uuid, and the uniq constraint on name may blow up.\n\n(I\u0027m not actually clear on how things will behave in the real world, but there may be tricksy bits here)","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b19ff61a50aaac5f7f4bf6d97888bca8ac7b764b","unresolved":false,"context_lines":[{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"},{"line_number":275,"context_line":"                    self.placementclient.delete_resource_provider("},{"line_number":276,"context_line":"                        context, compute_node, cascade\u003dTrue)"},{"line_number":277,"context_line":"                # remove the host_mapping of this host."},{"line_number":278,"context_line":"                try:"}],"source_content_type":"text/x-python","patch_set":2,"id":"9fb8cfa7_7d6dd202","line":275,"in_reply_to":"9fb8cfa7_f3460e50","updated":"2019-06-05 21:00:40.000000000","message":"Yup see comments in bug 1829479. I\u0027m going to write a functional test to recreate that issue.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"},{"line_number":275,"context_line":"                    self.placementclient.delete_resource_provider("},{"line_number":276,"context_line":"                        context, compute_node, cascade\u003dTrue)"},{"line_number":277,"context_line":"                # remove the host_mapping of this host."},{"line_number":278,"context_line":"                try:"},{"line_number":279,"context_line":"                    hm \u003d objects.HostMapping.get_by_host(context, service.host)"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_6a34584b","line":276,"range":{"start_line":276,"start_character":24,"end_line":276,"end_character":31},"updated":"2019-05-04 16:44:55.000000000","message":"this is again cell targeted for pulling instances and then deleting their allocations via REST API.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":283,"context_line":"                    # delete it (maybe it was accidental?) before mapping it to"},{"line_number":284,"context_line":"                    # a cell using discover_hosts, so we just ignore this."},{"line_number":285,"context_line":"                    pass"},{"line_number":286,"context_line":"            service.destroy()"},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"        except exception.ServiceNotFound:"},{"line_number":289,"context_line":"            explanation \u003d _(\"Service %s not found.\") % id"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_0a191c78","line":286,"updated":"2019-05-04 16:44:55.000000000","message":"and this query here https://github.com/openstack/nova/blob/1388855be2a26c8dc47639bc974633868dfdf9b9/nova/db/sqlalchemy/api.py#L395 ensures we soft delete all the compute_nodes associated with the hostname.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":268,"context_line":"                # remove the corresponding resource provider record from"},{"line_number":269,"context_line":"                # placement for the compute nodes managed by this service;"},{"line_number":270,"context_line":"                # remember that an ironic compute service can manage multiple"},{"line_number":271,"context_line":"                # nodes"},{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_103e5cc0","line":271,"updated":"2019-06-14 16:21:52.000000000","message":"Wouldn\u0027t object to some more commentary here (even including a TODO) about how the deletion is currently best-effort, failures ignored, etc., and how that makes things break if the host/nodes are resurrected under another service entry (link to those bugs?).","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d58e035743717d655463427411ec150c2c98bdfc","unresolved":false,"context_lines":[{"line_number":268,"context_line":"                # remove the corresponding resource provider record from"},{"line_number":269,"context_line":"                # placement for the compute nodes managed by this service;"},{"line_number":270,"context_line":"                # remember that an ironic compute service can manage multiple"},{"line_number":271,"context_line":"                # nodes"},{"line_number":272,"context_line":"                compute_nodes \u003d objects.ComputeNodeList.get_all_by_host("},{"line_number":273,"context_line":"                    context, service.host)"},{"line_number":274,"context_line":"                for compute_node in compute_nodes:"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_6190e796","line":271,"in_reply_to":"9fb8cfa7_103e5cc0","updated":"2019-06-19 14:43:09.000000000","message":"There is a note and related todo in delete_resource_provider itself:\n\nhttps://review.opendev.org/#/c/657070/3/nova/scheduler/client/report.py@2172","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"}],"nova/scheduler/client/report.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":682,"context_line":""},{"line_number":683,"context_line":"        return uuid"},{"line_number":684,"context_line":""},{"line_number":685,"context_line":"    @safe_connect"},{"line_number":686,"context_line":"    def _delete_provider(self, rp_uuid, global_request_id\u003dNone):"},{"line_number":687,"context_line":"        resp \u003d self.delete(\u0027/resource_providers/%s\u0027 % rp_uuid,"},{"line_number":688,"context_line":"                           global_request_id\u003dglobal_request_id)"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_6dd263d3","line":685,"range":{"start_line":685,"start_character":5,"end_line":685,"end_character":17},"updated":"2019-06-14 16:21:52.000000000","message":"die","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"b478e076bae1b0a913bec0a595f70110ef89fde6","unresolved":false,"context_lines":[{"line_number":682,"context_line":""},{"line_number":683,"context_line":"        return uuid"},{"line_number":684,"context_line":""},{"line_number":685,"context_line":"    @safe_connect"},{"line_number":686,"context_line":"    def _delete_provider(self, rp_uuid, global_request_id\u003dNone):"},{"line_number":687,"context_line":"        resp \u003d self.delete(\u0027/resource_providers/%s\u0027 % rp_uuid,"},{"line_number":688,"context_line":"                           global_request_id\u003dglobal_request_id)"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_8dbd50b8","line":685,"range":{"start_line":685,"start_character":5,"end_line":685,"end_character":17},"in_reply_to":"9fb8cfa7_6dd263d3","updated":"2019-07-19 21:43:07.000000000","message":"https://review.opendev.org/671866","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":710,"context_line":"        }"},{"line_number":711,"context_line":"        LOG.error(msg, args)"},{"line_number":712,"context_line":"        # On conflict, the caller may wish to delete allocations and"},{"line_number":713,"context_line":"        # redrive.  (Note that this is not the same as a"},{"line_number":714,"context_line":"        # PlacementAPIConflict case.)"},{"line_number":715,"context_line":"        if resp.status_code \u003d\u003d 409:"},{"line_number":716,"context_line":"            raise exception.ResourceProviderInUse()"},{"line_number":717,"context_line":"        raise exception.ResourceProviderDeletionFailed(uuid\u003drp_uuid)"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_ad361b02","line":714,"range":{"start_line":713,"start_character":21,"end_line":714,"end_character":36},"updated":"2019-06-14 16:21:52.000000000","message":"Mm. And we decided not to implement a generation-based DELETE.\n\nWe should definitely implement this retry - but here, or at the caller? Further investigation needed.\n\n[Later] At the caller. This method should not try to delete allocations.","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":1984,"context_line":"                raise Retry(\u0027put_allocations\u0027, reason)"},{"line_number":1985,"context_line":"        return r.status_code \u003d\u003d 204"},{"line_number":1986,"context_line":""},{"line_number":1987,"context_line":"    @safe_connect"},{"line_number":1988,"context_line":"    def delete_allocation_for_instance(self, context, uuid,"},{"line_number":1989,"context_line":"                                       consumer_type\u003d\u0027instance\u0027):"},{"line_number":1990,"context_line":"        \"\"\"Delete the instance allocation from placement"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_cd758f33","line":1987,"range":{"start_line":1987,"start_character":4,"end_line":1987,"end_character":17},"updated":"2019-06-14 16:21:52.000000000","message":"die","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2012,"context_line":"        # but returns an empty consumer instead. Putting an empty allocation to"},{"line_number":2013,"context_line":"        # that non-existing consumer won\u0027t be 404 or other error either."},{"line_number":2014,"context_line":"        r \u003d self.get(url, global_request_id\u003dcontext.global_id,"},{"line_number":2015,"context_line":"                     version\u003dCONSUMER_GENERATION_VERSION)"},{"line_number":2016,"context_line":"        if not r:"},{"line_number":2017,"context_line":"            # at the moment there is no way placement returns a failure so we"},{"line_number":2018,"context_line":"            # could even delete this code"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_2d7b0b28","line":2015,"range":{"start_line":2015,"start_character":29,"end_line":2015,"end_character":56},"updated":"2019-06-14 16:21:52.000000000","message":"This is using consumer generations, but without a generation conflict retry. So we could get failures on races. Pretty sure we\u0027ll want to account for those as we move forward with the cascading delete thing.","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2016,"context_line":"        if not r:"},{"line_number":2017,"context_line":"            # at the moment there is no way placement returns a failure so we"},{"line_number":2018,"context_line":"            # could even delete this code"},{"line_number":2019,"context_line":"            LOG.warning(\u0027Unable to delete allocation for %(consumer_type)s \u0027"},{"line_number":2020,"context_line":"                        \u0027%(uuid)s: (%(code)i %(text)s)\u0027,"},{"line_number":2021,"context_line":"                        {\u0027consumer_type\u0027: consumer_type,"},{"line_number":2022,"context_line":"                         \u0027uuid\u0027: uuid,"},{"line_number":2023,"context_line":"                         \u0027code\u0027: r.status_code,"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_ed1ad3c3","line":2020,"range":{"start_line":2019,"start_character":25,"end_line":2020,"end_character":54},"updated":"2019-06-14 16:21:52.000000000","message":"This message should be different from the one on L2045 to indicate that the problem happened during the read.","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2028,"context_line":"        if allocations[\u0027allocations\u0027] \u003d\u003d {}:"},{"line_number":2029,"context_line":"            # the consumer did not exist in the first place"},{"line_number":2030,"context_line":"            LOG.debug(\u0027Cannot delete allocation for %s consumer in placement \u0027"},{"line_number":2031,"context_line":"                      \u0027as consumer does not exists\u0027, uuid)"},{"line_number":2032,"context_line":"            return False"},{"line_number":2033,"context_line":""},{"line_number":2034,"context_line":"        # removing all resources from the allocation will auto delete the"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_ed709342","line":2031,"range":{"start_line":2031,"start_character":44,"end_line":2031,"end_character":50},"updated":"2019-06-14 16:21:52.000000000","message":"exist","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"b478e076bae1b0a913bec0a595f70110ef89fde6","unresolved":false,"context_lines":[{"line_number":2028,"context_line":"        if allocations[\u0027allocations\u0027] \u003d\u003d {}:"},{"line_number":2029,"context_line":"            # the consumer did not exist in the first place"},{"line_number":2030,"context_line":"            LOG.debug(\u0027Cannot delete allocation for %s consumer in placement \u0027"},{"line_number":2031,"context_line":"                      \u0027as consumer does not exists\u0027, uuid)"},{"line_number":2032,"context_line":"            return False"},{"line_number":2033,"context_line":""},{"line_number":2034,"context_line":"        # removing all resources from the allocation will auto delete the"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_6d7b34f4","line":2031,"range":{"start_line":2031,"start_character":44,"end_line":2031,"end_character":50},"in_reply_to":"9fb8cfa7_ed709342","updated":"2019-07-19 21:43:07.000000000","message":"https://review.opendev.org/671866","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2159,"context_line":"        :param compute_node: The nova.objects.ComputeNode object that is the"},{"line_number":2160,"context_line":"                             resource provider being deleted."},{"line_number":2161,"context_line":"        :param cascade: Boolean value that, when True, will first delete any"},{"line_number":2162,"context_line":"                        associated Allocation and Inventory records for the"},{"line_number":2163,"context_line":"                        compute node"},{"line_number":2164,"context_line":"        \"\"\""},{"line_number":2165,"context_line":"        nodename \u003d compute_node.hypervisor_hostname"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_ee6bae81","line":2162,"range":{"start_line":2162,"start_character":46,"end_line":2162,"end_character":59},"updated":"2019-06-14 16:21:52.000000000","message":"not true","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"b478e076bae1b0a913bec0a595f70110ef89fde6","unresolved":false,"context_lines":[{"line_number":2159,"context_line":"        :param compute_node: The nova.objects.ComputeNode object that is the"},{"line_number":2160,"context_line":"                             resource provider being deleted."},{"line_number":2161,"context_line":"        :param cascade: Boolean value that, when True, will first delete any"},{"line_number":2162,"context_line":"                        associated Allocation and Inventory records for the"},{"line_number":2163,"context_line":"                        compute node"},{"line_number":2164,"context_line":"        \"\"\""},{"line_number":2165,"context_line":"        nodename \u003d compute_node.hypervisor_hostname"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_2d81bcc3","line":2162,"range":{"start_line":2162,"start_character":46,"end_line":2162,"end_character":59},"in_reply_to":"9fb8cfa7_ee6bae81","updated":"2019-07-19 21:43:07.000000000","message":"https://review.opendev.org/671866","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2172,"context_line":"            instances \u003d objects.InstanceList.get_by_host_and_node(context,"},{"line_number":2173,"context_line":"                    host, nodename)"},{"line_number":2174,"context_line":"            for instance in instances:"},{"line_number":2175,"context_line":"                self.delete_allocation_for_instance(context, instance.uuid)"},{"line_number":2176,"context_line":"        try:"},{"line_number":2177,"context_line":"            self._delete_provider(rp_uuid, global_request_id\u003dcontext.global_id)"},{"line_number":2178,"context_line":"        except (exception.ResourceProviderInUse,"}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_4d819fc2","line":2175,"range":{"start_line":2175,"start_character":21,"end_line":2175,"end_character":51},"updated":"2019-06-14 16:21:52.000000000","message":"silently no-ops on ClientException due to @safe_connect","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":2177,"context_line":"            self._delete_provider(rp_uuid, global_request_id\u003dcontext.global_id)"},{"line_number":2178,"context_line":"        except (exception.ResourceProviderInUse,"},{"line_number":2179,"context_line":"                exception.ResourceProviderDeletionFailed):"},{"line_number":2180,"context_line":"            # TODO(efried): Raise these.  Right now this is being left a no-op"},{"line_number":2181,"context_line":"            # for backward compatibility."},{"line_number":2182,"context_line":"            pass"},{"line_number":2183,"context_line":""}],"source_content_type":"text/x-python","patch_set":3,"id":"9fb8cfa7_8d1c7778","line":2180,"range":{"start_line":2180,"start_character":28,"end_line":2180,"end_character":33},"updated":"2019-06-14 16:21:52.000000000","message":"For InUse we actually want to redrive this whole method (delete allocations + delete provider) *if* cascade\u003dTrue, which will be slightly weird (internal method conditionally decorated with retry?).\n\nDeletionFailed can raise.","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"}],"nova/tests/unit/api/openstack/compute/test_services.py":[{"author":{"_account_id":26936,"name":"Surya Seetharaman","email":"suryaseetharaman.9@gmail.com","username":"tssurya"},"change_message_id":"f0fc65b628c50cc82316235a8f84cb328bc2892d","unresolved":false,"context_lines":[{"line_number":739,"context_line":"            nodes \u003d cn_get_all_by_host.return_value"},{"line_number":740,"context_line":"            delete_resource_provider.assert_has_calls(["},{"line_number":741,"context_line":"                mock.call(ctxt, node, cascade\u003dTrue) for node in nodes"},{"line_number":742,"context_line":"            ], any_order\u003dTrue)"},{"line_number":743,"context_line":"            get_hm.assert_called_once_with(ctxt, \u0027host1\u0027)"},{"line_number":744,"context_line":"            service_delete.assert_called_once_with()"},{"line_number":745,"context_line":"        _test()"}],"source_content_type":"text/x-python","patch_set":2,"id":"dfbec78f_2a550051","line":742,"range":{"start_line":742,"start_character":15,"end_line":742,"end_character":24},"updated":"2019-05-04 16:44:55.000000000","message":"cool! didn\u0027t know this existed.","commit_id":"fbbe5579981b5045ce8b2f8e34da2c57ca86b6e7"}],"releasenotes/notes/bug-1811726-multi-node-delete-2ba17f02c6171fbb.yaml":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3777078ab77181a75ba3b0272ff4385024247a5d","unresolved":false,"context_lines":[{"line_number":5,"context_line":"    associated with each compute node record managed by a ``nova-compute``"},{"line_number":6,"context_line":"    service when that service is deleted via the"},{"line_number":7,"context_line":"    ``DELETE /os-services/{service_id}`` API. This is particularly important"},{"line_number":8,"context_line":"    for compute services managing ironic baremetal nodes."},{"line_number":9,"context_line":""},{"line_number":10,"context_line":"    .. _Bug 1811726: https://bugs.launchpad.net/nova/+bug/1811726"}],"source_content_type":"text/x-yaml","patch_set":3,"id":"9fb8cfa7_f05ce819","line":8,"updated":"2019-06-14 16:21:52.000000000","message":"worth mentioning that the other buggage still exists?","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"aba70a9f90c5cf2d338f1ffa907cf42fd6aa0448","unresolved":false,"context_lines":[{"line_number":5,"context_line":"    associated with each compute node record managed by a ``nova-compute``"},{"line_number":6,"context_line":"    service when that service is deleted via the"},{"line_number":7,"context_line":"    ``DELETE /os-services/{service_id}`` API. This is particularly important"},{"line_number":8,"context_line":"    for compute services managing ironic baremetal nodes."},{"line_number":9,"context_line":""},{"line_number":10,"context_line":"    .. _Bug 1811726: https://bugs.launchpad.net/nova/+bug/1811726"}],"source_content_type":"text/x-yaml","patch_set":3,"id":"9fb8cfa7_be203ab2","line":8,"in_reply_to":"9fb8cfa7_f05ce819","updated":"2019-06-19 14:33:31.000000000","message":"That could get messy if I mention something about how this doesn\u0027t fix some other bug, or if I add a known issue reno for the other bug and then end up landing something in the same release that addresses the other bug, then the release notes could be a confusing jumble of notes saying something isn\u0027t fixed and then it is, or is or is not a known issue.","commit_id":"650fe118d128f09f78552b82abc114bb4b84930e"}]}
