)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":6,"context_line":""},{"line_number":7,"context_line":"Add reshaper for PCPU"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Added upgrade code using reshape to report PCPU instead of VCPU in case"},{"line_number":10,"context_line":"the host is configured to use pinned CPUs. With this, when an existing"},{"line_number":11,"context_line":"compute node running guests which uses dedicated CPUs is upgraded to"},{"line_number":12,"context_line":"Train release, it will update allocation records of existing guest from"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"5faad753_5fc28087","line":9,"range":{"start_line":9,"start_character":36,"end_line":9,"end_character":42},"updated":"2019-09-06 22:29:00.000000000","message":"not really, see inline","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Added upgrade code using reshape to report PCPU instead of VCPU in case"},{"line_number":10,"context_line":"the host is configured to use pinned CPUs. With this, when an existing"},{"line_number":11,"context_line":"compute node running guests which uses dedicated CPUs is upgraded to"},{"line_number":12,"context_line":"Train release, it will update allocation records of existing guest from"},{"line_number":13,"context_line":"VCPU to PCPU using the reshape functionality."},{"line_number":14,"context_line":""},{"line_number":15,"context_line":"Part of blueprint cpu-resources"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"5faad753_ffbeccfd","line":12,"range":{"start_line":12,"start_character":23,"end_line":12,"end_character":48},"updated":"2019-09-06 22:29:00.000000000","message":"yeah, this.\n\n...but not inventories. Because that\u0027s already being done.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"}],"nova/tests/functional/libvirt/test_numa_servers.py":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":124,"context_line":"        self.assertEqual(2, len(inst.numa_topology.cells))"},{"line_number":125,"context_line":"        self.assertNotIn(\u0027cpu_topology\u0027, inst.numa_topology.cells[0])"},{"line_number":126,"context_line":"        self.assertNotIn(\u0027cpu_topology\u0027, inst.numa_topology.cells[1])"},{"line_number":127,"context_line":""},{"line_number":128,"context_line":"    def test_create_server_with_legacy_pinning_policy(self):"},{"line_number":129,"context_line":"        \"\"\"Create a server using the legacy \u0027hw:cpu_policy\u0027 extra spec."},{"line_number":130,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_9bdf95d5","line":127,"updated":"2019-08-09 10:05:46.000000000","message":"We could just checked resource allocations here?","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":149,"context_line":"        inst \u003d objects.Instance.get_by_uuid(ctx, server[\u0027id\u0027])"},{"line_number":150,"context_line":"        self.assertEqual(1, len(inst.numa_topology.cells))"},{"line_number":151,"context_line":"        self.assertEqual(5, inst.numa_topology.cells[0].cpu_topology.cores)"},{"line_number":152,"context_line":""},{"line_number":153,"context_line":"    def test_create_server_with_pcpu(self):"},{"line_number":154,"context_line":"        \"\"\"Create a server using an explicit \u0027resources:PCPU\u0027 request."},{"line_number":155,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_3beea18b","line":152,"updated":"2019-08-09 10:05:46.000000000","message":"And here","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":174,"context_line":"        self.assertEqual(1, len(inst.numa_topology.cells))"},{"line_number":175,"context_line":"        self.assertEqual(1, inst.numa_topology.cells[0].cpu_topology.cores)"},{"line_number":176,"context_line":"        self.assertEqual(2, inst.numa_topology.cells[0].cpu_topology.threads)"},{"line_number":177,"context_line":""},{"line_number":178,"context_line":"    def test_create_server_with_numa_fails(self):"},{"line_number":179,"context_line":"        \"\"\"Create a two NUMA node instance on a host with only one node."},{"line_number":180,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_5bf35d74","line":177,"updated":"2019-08-09 10:05:46.000000000","message":"And here","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":191,"context_line":"        flavor_id \u003d self._create_flavor(extra_spec\u003dextra_spec)"},{"line_number":192,"context_line":""},{"line_number":193,"context_line":"        self._run_build_test(flavor_id, end_status\u003d\u0027ERROR\u0027)"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":""},{"line_number":196,"context_line":"class NUMAServersWithNetworksTest(NUMAServersTestBase):"},{"line_number":197,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_9bf8f549","line":194,"updated":"2019-08-09 10:05:46.000000000","message":"And add the test with the workarounds option set here?","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":538,"context_line":"        self.assertIn(\u0027No valid host\u0027, six.text_type(ex))"},{"line_number":539,"context_line":""},{"line_number":540,"context_line":""},{"line_number":541,"context_line":"class CPUPinningUpgradeTest(NUMAServersTestBase,"},{"line_number":542,"context_line":"                            integrated_helpers.ProviderUsageBaseTestCase):"},{"line_number":543,"context_line":"    compute_driver \u003d \u0027libvirt.LibvirtDriver\u0027"},{"line_number":544,"context_line":"    microversion \u003d \u0027latest\u0027"},{"line_number":545,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_dbd50db9","line":542,"range":{"start_line":541,"start_character":0,"end_line":542,"end_character":74},"updated":"2019-08-09 10:05:46.000000000","message":"Does it make sense to do this separately? Perhaps we could fold all of these into \u0027NUMAServersTest\u0027 as noted above?","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":427,"context_line":""},{"line_number":428,"context_line":"class ReshapeForPCPUsTest(NUMAServersTestBase):"},{"line_number":429,"context_line":""},{"line_number":430,"context_line":"    def test_vcpu_to_pcpu_reshape(self):"},{"line_number":431,"context_line":"        \"\"\"Verify that VCPU to PCPU reshape works with libvirt driver"},{"line_number":432,"context_line":""},{"line_number":433,"context_line":"        1) create two pinned servers with an old tree where the compute"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_ff76cc3e","line":430,"range":{"start_line":430,"start_character":8,"end_line":430,"end_character":33},"updated":"2019-09-06 22:29:00.000000000","message":"lovely test","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":458,"context_line":""},{"line_number":459,"context_line":"        # ensure there is no PCPU inventory being reported"},{"line_number":460,"context_line":""},{"line_number":461,"context_line":"        compute_rp_uuid \u003d self.placement_api.get("},{"line_number":462,"context_line":"            \u0027/resource_providers?name\u003dcompute1\u0027).body["},{"line_number":463,"context_line":"                \u0027resource_providers\u0027][0][\u0027uuid\u0027]"},{"line_number":464,"context_line":"        compute_inventory \u003d self.placement_api.get("},{"line_number":465,"context_line":"            \u0027/resource_providers/%s/inventories\u0027 % compute_rp_uuid).body["},{"line_number":466,"context_line":"                \u0027inventories\u0027]"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_7fdc1c46","line":463,"range":{"start_line":461,"start_character":8,"end_line":463,"end_character":48},"updated":"2019-09-06 22:29:00.000000000","message":"helper (later)","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":461,"context_line":"        compute_rp_uuid \u003d self.placement_api.get("},{"line_number":462,"context_line":"            \u0027/resource_providers?name\u003dcompute1\u0027).body["},{"line_number":463,"context_line":"                \u0027resource_providers\u0027][0][\u0027uuid\u0027]"},{"line_number":464,"context_line":"        compute_inventory \u003d self.placement_api.get("},{"line_number":465,"context_line":"            \u0027/resource_providers/%s/inventories\u0027 % compute_rp_uuid).body["},{"line_number":466,"context_line":"                \u0027inventories\u0027]"},{"line_number":467,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":468,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":469,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_bfd29433","line":466,"range":{"start_line":464,"start_character":8,"end_line":466,"end_character":30},"updated":"2019-09-06 22:29:00.000000000","message":"self._get_provider_inventory(...)","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":465,"context_line":"            \u0027/resource_providers/%s/inventories\u0027 % compute_rp_uuid).body["},{"line_number":466,"context_line":"                \u0027inventories\u0027]"},{"line_number":467,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":468,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":469,"context_line":""},{"line_number":470,"context_line":"        # now we boot two servers with pinning these should boot even without"},{"line_number":471,"context_line":"        # PCPUs since we\u0027re not doing the translation yet"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_9fe1d801","line":468,"updated":"2019-09-06 22:29:00.000000000","message":"✔","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":467,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":468,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":469,"context_line":""},{"line_number":470,"context_line":"        # now we boot two servers with pinning these should boot even without"},{"line_number":471,"context_line":"        # PCPUs since we\u0027re not doing the translation yet"},{"line_number":472,"context_line":""},{"line_number":473,"context_line":"        extra_spec \u003d {\u0027hw:cpu_policy\u0027: \u0027dedicated\u0027}"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_3fe6a418","line":470,"range":{"start_line":470,"start_character":45,"end_line":470,"end_character":48},"updated":"2019-09-06 22:29:00.000000000","message":"punctuation missing here?","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":467,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":468,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":469,"context_line":""},{"line_number":470,"context_line":"        # now we boot two servers with pinning these should boot even without"},{"line_number":471,"context_line":"        # PCPUs since we\u0027re not doing the translation yet"},{"line_number":472,"context_line":""},{"line_number":473,"context_line":"        extra_spec \u003d {\u0027hw:cpu_policy\u0027: \u0027dedicated\u0027}"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_dc30a80a","line":470,"range":{"start_line":470,"start_character":45,"end_line":470,"end_character":48},"in_reply_to":"5faad753_3fe6a418","updated":"2019-09-09 14:55:27.000000000","message":"Done","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":485,"context_line":"        # the reshape. Note that the value of 8 VCPUs is derived from"},{"line_number":486,"context_line":"        # fakelibvirt.HostInfo with our overridden values"},{"line_number":487,"context_line":""},{"line_number":488,"context_line":"        compute_inventory \u003d self.placement_api.get("},{"line_number":489,"context_line":"            \u0027/resource_providers/%s/inventories\u0027 % compute_rp_uuid).body["},{"line_number":490,"context_line":"                \u0027inventories\u0027]"},{"line_number":491,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_bfbb74e3","line":488,"updated":"2019-09-06 22:29:00.000000000","message":"_get_provider_inventory\n\nI\u0027ll stop calling out duplicates; similar below.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":489,"context_line":"            \u0027/resource_providers/%s/inventories\u0027 % compute_rp_uuid).body["},{"line_number":490,"context_line":"                \u0027inventories\u0027]"},{"line_number":491,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":492,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":493,"context_line":"        compute_usages \u003d self.placement_api.get("},{"line_number":494,"context_line":"            \u0027/resource_providers/%s/usages\u0027 % compute_rp_uuid).body["},{"line_number":495,"context_line":"                \u0027usages\u0027]"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_7faadc85","line":492,"updated":"2019-09-06 22:29:00.000000000","message":"no harm, but not sure this (checking VCPU \u0026 !PCPU inventories) is worth repeating since all you did was spawn.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":490,"context_line":"                \u0027inventories\u0027]"},{"line_number":491,"context_line":"        self.assertEqual(8, compute_inventory[\u0027VCPU\u0027][\u0027total\u0027])"},{"line_number":492,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_inventory)"},{"line_number":493,"context_line":"        compute_usages \u003d self.placement_api.get("},{"line_number":494,"context_line":"            \u0027/resource_providers/%s/usages\u0027 % compute_rp_uuid).body["},{"line_number":495,"context_line":"                \u0027usages\u0027]"},{"line_number":496,"context_line":"        self.assertEqual(4, compute_usages[\u0027VCPU\u0027])"},{"line_number":497,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_usages)"},{"line_number":498,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_df8e702d","line":495,"range":{"start_line":493,"start_character":25,"end_line":495,"end_character":25},"updated":"2019-09-06 22:29:00.000000000","message":"self._get_provider_usages","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":497,"context_line":"        self.assertNotIn(\u0027PCPU\u0027, compute_usages)"},{"line_number":498,"context_line":""},{"line_number":499,"context_line":"        for server in (server1, server2):"},{"line_number":500,"context_line":"            allocations \u003d self.placement_api.get("},{"line_number":501,"context_line":"                \u0027/allocations/%s\u0027 % server[\u0027id\u0027]).body[\u0027allocations\u0027]"},{"line_number":502,"context_line":"            # the flavor has disk\u003d10 and ephemeral\u003d10"},{"line_number":503,"context_line":"            self.assertEqual("},{"line_number":504,"context_line":"                {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027VCPU\u0027: 2},"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_1f9ba872","line":501,"range":{"start_line":500,"start_character":26,"end_line":501,"end_character":69},"updated":"2019-09-06 22:29:00.000000000","message":"self._get_allocations_by_server_uuid","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":558,"context_line":"            {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027PCPU\u0027: 2},"},{"line_number":559,"context_line":"            allocations[compute_rp_uuid][\u0027resources\u0027])"},{"line_number":560,"context_line":""},{"line_number":561,"context_line":"        self._delete_server(server1[\u0027id\u0027])"},{"line_number":562,"context_line":"        self._delete_server(server2[\u0027id\u0027])"},{"line_number":563,"context_line":"        self._delete_server(server3[\u0027id\u0027])"},{"line_number":564,"context_line":""},{"line_number":565,"context_line":""},{"line_number":566,"context_line":"class NUMAServersWithNetworksTest(NUMAServersTestBase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_3f7d441c","line":563,"range":{"start_line":561,"start_character":0,"end_line":563,"end_character":42},"updated":"2019-09-06 22:29:00.000000000","message":"This is harmless, I guess, other than test time/resource, but it\u0027s not necessary, right?","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":558,"context_line":"            {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027PCPU\u0027: 2},"},{"line_number":559,"context_line":"            allocations[compute_rp_uuid][\u0027resources\u0027])"},{"line_number":560,"context_line":""},{"line_number":561,"context_line":"        self._delete_server(server1[\u0027id\u0027])"},{"line_number":562,"context_line":"        self._delete_server(server2[\u0027id\u0027])"},{"line_number":563,"context_line":"        self._delete_server(server3[\u0027id\u0027])"},{"line_number":564,"context_line":""},{"line_number":565,"context_line":""},{"line_number":566,"context_line":"class NUMAServersWithNetworksTest(NUMAServersTestBase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_fc3564f8","line":563,"range":{"start_line":561,"start_character":0,"end_line":563,"end_character":42},"in_reply_to":"5faad753_3f7d441c","updated":"2019-09-09 14:55:27.000000000","message":"It is necessary. If you don\u0027t do it, placement client complains about not being to delete inventory","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d0594553980d839fa0cfb7fc1c42f1dc5eada0a","unresolved":false,"context_lines":[{"line_number":558,"context_line":"            {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027PCPU\u0027: 2},"},{"line_number":559,"context_line":"            allocations[compute_rp_uuid][\u0027resources\u0027])"},{"line_number":560,"context_line":""},{"line_number":561,"context_line":"        self._delete_server(server1[\u0027id\u0027])"},{"line_number":562,"context_line":"        self._delete_server(server2[\u0027id\u0027])"},{"line_number":563,"context_line":"        self._delete_server(server3[\u0027id\u0027])"},{"line_number":564,"context_line":""},{"line_number":565,"context_line":""},{"line_number":566,"context_line":"class NUMAServersWithNetworksTest(NUMAServersTestBase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_8be17f9d","line":563,"range":{"start_line":561,"start_character":0,"end_line":563,"end_character":42},"in_reply_to":"5faad753_8bff3fa9","updated":"2019-09-09 15:46:46.000000000","message":"https://github.com/openstack/nova/blob/ba3147420c0a6f8b17a46b1a493b89bcd67af6f1/nova/tests/functional/libvirt/base.py#L63 is the thing that triggers the fallout. That is setup to only allow at most 2 servers per test that uses the fixture, which is extremely fragile. Better to fix the fixture to be more dynamic than hack around it with stuff like this.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"4c41073049ad7599b0f0139e4c3a09ea4edaf98b","unresolved":false,"context_lines":[{"line_number":558,"context_line":"            {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027PCPU\u0027: 2},"},{"line_number":559,"context_line":"            allocations[compute_rp_uuid][\u0027resources\u0027])"},{"line_number":560,"context_line":""},{"line_number":561,"context_line":"        self._delete_server(server1[\u0027id\u0027])"},{"line_number":562,"context_line":"        self._delete_server(server2[\u0027id\u0027])"},{"line_number":563,"context_line":"        self._delete_server(server3[\u0027id\u0027])"},{"line_number":564,"context_line":""},{"line_number":565,"context_line":""},{"line_number":566,"context_line":"class NUMAServersWithNetworksTest(NUMAServersTestBase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_8bff3fa9","line":563,"range":{"start_line":561,"start_character":0,"end_line":563,"end_character":42},"in_reply_to":"5faad753_fc3564f8","updated":"2019-09-09 15:30:08.000000000","message":"Where? This is the end of the test case, isn\u0027t it? Is some other test case inheriting from it? Is some tearDown somewhere in a superclass or fixture complaining?\n\n[Later] Okay, I pulled this down and tried it, and whereas it doesn\u0027t actually result in failure of the test case, I do see a bunch of exceptions. I\u0027m not sure what\u0027s going on there, but for the sake of not distracting from the success of the test case with a bunch of spurious output, I\u0027m happy to leave this in place.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"548b93486bfa2f9a2e82097a9ade94a7a2f704cf","unresolved":false,"context_lines":[{"line_number":547,"context_line":"        with test.nested("},{"line_number":548,"context_line":"                mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver\u0027"},{"line_number":549,"context_line":"                           \u0027.migrate_disk_and_power_off\u0027, return_value\u003d\u0027{}\u0027),"},{"line_number":550,"context_line":"                mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver\u0027"},{"line_number":551,"context_line":"                           \u0027._ensure_console_log_for_instance\u0027)):"},{"line_number":552,"context_line":"            post \u003d {\u0027migrate\u0027: None}"},{"line_number":553,"context_line":"            self.api.post_server_action(server2[\u0027id\u0027], post)"},{"line_number":554,"context_line":""}],"source_content_type":"text/x-python","patch_set":31,"id":"5faad753_cb21b7b6","line":551,"range":{"start_line":550,"start_character":16,"end_line":551,"end_character":62},"updated":"2019-09-09 15:52:25.000000000","message":"Yuck, don\u0027t do this. Fix the source problem which is the extremely brittle stubbing in the base class:\n\nhttps://github.com/openstack/nova/blob/ba3147420c0a6f8b17a46b1a493b89bcd67af6f1/nova/tests/functional/libvirt/base.py#L63","commit_id":"c7fb768ac454fe820cbe78c956efa993a9e3ce13"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"91d667913155cd7ab2c217dc689fe0e264a7603f","unresolved":false,"context_lines":[{"line_number":547,"context_line":"        with test.nested("},{"line_number":548,"context_line":"                mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver\u0027"},{"line_number":549,"context_line":"                           \u0027.migrate_disk_and_power_off\u0027, return_value\u003d\u0027{}\u0027),"},{"line_number":550,"context_line":"                mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver\u0027"},{"line_number":551,"context_line":"                           \u0027._ensure_console_log_for_instance\u0027)):"},{"line_number":552,"context_line":"            post \u003d {\u0027migrate\u0027: None}"},{"line_number":553,"context_line":"            self.api.post_server_action(server2[\u0027id\u0027], post)"},{"line_number":554,"context_line":""}],"source_content_type":"text/x-python","patch_set":31,"id":"5faad753_26be8265","line":551,"range":{"start_line":550,"start_character":16,"end_line":551,"end_character":62},"in_reply_to":"5faad753_cb21b7b6","updated":"2019-09-09 16:28:37.000000000","message":"Done","commit_id":"c7fb768ac454fe820cbe78c956efa993a9e3ce13"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0b10890d4aaae6db3c5836a8c70a9b5912a24754","unresolved":false,"context_lines":[{"line_number":631,"context_line":"        # fakelibvirt.HostInfo with our overridden values"},{"line_number":632,"context_line":""},{"line_number":633,"context_line":"        # first, check \u0027test_compute0\u0027, which should have the allocations for"},{"line_number":634,"context_line":"        # server1 (the one that hasn\u0027t been migrated) and for the migration"},{"line_number":635,"context_line":"        # record of server2 (the one that has been migrated)"},{"line_number":636,"context_line":""},{"line_number":637,"context_line":"        compute_rp_uuid \u003d self.compute_rp_uuids[\u0027test_compute0\u0027]"},{"line_number":638,"context_line":""}],"source_content_type":"text/x-python","patch_set":38,"id":"5faad753_21b9472d","line":635,"range":{"start_line":634,"start_character":62,"end_line":635,"end_character":27},"updated":"2019-09-12 05:39:58.000000000","message":"I guess you forget to check this.","commit_id":"b65f4ef8a6544037ff4cfcd8f4dc60a2cf49e66e"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0b10890d4aaae6db3c5836a8c70a9b5912a24754","unresolved":false,"context_lines":[{"line_number":726,"context_line":"        # the flavor has disk\u003d10 and ephemeral\u003d10"},{"line_number":727,"context_line":"        self.assertEqual("},{"line_number":728,"context_line":"            {\u0027DISK_GB\u0027: 20, \u0027MEMORY_MB\u0027: 2048, \u0027PCPU\u0027: 2},"},{"line_number":729,"context_line":"            allocations[compute_rp_uuid][\u0027resources\u0027])"},{"line_number":730,"context_line":""},{"line_number":731,"context_line":"        # then check \u0027test_compute1\u0027, which should have the allocations for"},{"line_number":732,"context_line":"        # server2 (the one that has been migrated)"}],"source_content_type":"text/x-python","patch_set":38,"id":"5faad753_c181534c","line":729,"updated":"2019-09-12 05:39:58.000000000","message":"it would be great to check the allocation record which consumer uuid is the migration one.","commit_id":"b65f4ef8a6544037ff4cfcd8f4dc60a2cf49e66e"}],"nova/tests/unit/virt/libvirt/test_driver.py":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":18942,"context_line":"        self.assertEqual(set([\u0027HW_CPU_X86_AVX512F\u0027, \u0027HW_CPU_X86_BMI\u0027]),"},{"line_number":18943,"context_line":"                         self.pt.data(self.cn_rp[\u0027uuid\u0027]).traits)"},{"line_number":18944,"context_line":""},{"line_number":18945,"context_line":"    @mock.patch(\u0027nova.objects.compute_node.ComputeNode.get_by_nodename\u0027)"},{"line_number":18946,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_gpu_inventories\u0027)"},{"line_number":18947,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits\u0027,"},{"line_number":18948,"context_line":"                new\u003dmock.Mock(return_value\u003dcpu_traits))"}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_5bca7d1a","line":18945,"updated":"2019-08-09 10:05:46.000000000","message":"We have \u0027nova/tests/functional/libvirt/test_reshape.py\u0027. Would this make sense in there instead? There\u0027s some techniques from the vGPU test that you could probably steal for this too?","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":18943,"context_line":"                         self.pt.data(self.cn_rp[\u0027uuid\u0027]).traits)"},{"line_number":18944,"context_line":""},{"line_number":18945,"context_line":"    @mock.patch(\u0027nova.objects.compute_node.ComputeNode.get_by_nodename\u0027)"},{"line_number":18946,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_gpu_inventories\u0027)"},{"line_number":18947,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_cpu_traits\u0027,"},{"line_number":18948,"context_line":"                new\u003dmock.Mock(return_value\u003dcpu_traits))"},{"line_number":18949,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info\u0027,"}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_db83cda8","line":18946,"range":{"start_line":18946,"start_character":0,"end_line":18946,"end_character":78},"updated":"2019-08-09 10:05:46.000000000","message":"You could pass a \u0027new\u0027 parameter to this...","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":18956,"context_line":"                new\u003dmock.Mock(return_value\u003drange(vcpus)))"},{"line_number":18957,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_reserved\u0027,"},{"line_number":18958,"context_line":"                new\u003dmock.Mock(return_value\u003d0))"},{"line_number":18959,"context_line":"    def test_update_provider_tree_for_cpus_reshape(self, mock_vgpu_inv,"},{"line_number":18960,"context_line":"                                                   mock_get_cn):"},{"line_number":18961,"context_line":"        \"\"\"Tests the CPU reshape scenario.\"\"\""},{"line_number":18962,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_5bc11de4","line":18959,"range":{"start_line":18959,"start_character":57,"end_line":18959,"end_character":70},"updated":"2019-08-09 10:05:46.000000000","message":"and avoid this, since you\u0027re not using it","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":19310,"context_line":"                new\u003dmock.Mock(return_value\u003dmemory_mb))"},{"line_number":19311,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.host.Host.get_online_cpus\u0027,"},{"line_number":19312,"context_line":"                new\u003dmock.Mock(return_value\u003drange(pcpus + vcpus)))"},{"line_number":19313,"context_line":"    def test_update_provider_tree_for_pcpu_reshape(self,"},{"line_number":19314,"context_line":"            mock_get_cn, mock_get_instances):"},{"line_number":19315,"context_line":"        \"\"\"Tests the CPU reshape scenario.\"\"\""},{"line_number":19316,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_7f989c69","line":19313,"range":{"start_line":19313,"start_character":8,"end_line":19313,"end_character":50},"updated":"2019-09-06 22:29:00.000000000","message":"I kind of skimmed this, but didn\u0027t go too deep. What is it covering that the functional test isn\u0027t?","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":19310,"context_line":"                new\u003dmock.Mock(return_value\u003dmemory_mb))"},{"line_number":19311,"context_line":"    @mock.patch(\u0027nova.virt.libvirt.host.Host.get_online_cpus\u0027,"},{"line_number":19312,"context_line":"                new\u003dmock.Mock(return_value\u003drange(pcpus + vcpus)))"},{"line_number":19313,"context_line":"    def test_update_provider_tree_for_pcpu_reshape(self,"},{"line_number":19314,"context_line":"            mock_get_cn, mock_get_instances):"},{"line_number":19315,"context_line":"        \"\"\"Tests the CPU reshape scenario.\"\"\""},{"line_number":19316,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_9c131067","line":19313,"range":{"start_line":19313,"start_character":8,"end_line":19313,"end_character":50},"in_reply_to":"5faad753_7f989c69","updated":"2019-09-09 14:55:27.000000000","message":"Very little. It\u0027s just more innards focused.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"}],"nova/virt/libvirt/driver.py":[{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"b80f709735cd242688dcc2ac181929ecf961e6bd","unresolved":false,"context_lines":[{"line_number":7328,"context_line":""},{"line_number":7329,"context_line":"            # If there is already is pinned instance is running on compute node"},{"line_number":7330,"context_line":"            # and the allocations are None means it\u0027s startup of compute node"},{"line_number":7331,"context_line":"            # then only Reshape is need so raised ReshapNeeded exception."},{"line_number":7332,"context_line":"            if allocations is None:"},{"line_number":7333,"context_line":"                LOG.info("},{"line_number":7334,"context_line":"                    \u0027Requesting provider tree reshape in order to move \u0027"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_5b25c7ad","line":7331,"range":{"start_line":7331,"start_character":43,"end_line":7331,"end_character":49},"updated":"2019-08-07 12:32:24.000000000","message":"raise","commit_id":"be8b52cb6c1a9a112df18e9192a3ad6d15da0a6f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"b80f709735cd242688dcc2ac181929ecf961e6bd","unresolved":false,"context_lines":[{"line_number":7339,"context_line":"            # TODO(stephenfin): This is doing the reshape unconditionally,"},{"line_number":7340,"context_line":"            # which will totally f*** things up for people with pinned and"},{"line_number":7341,"context_line":"            # unpinned instances on the same host. Perhaps there\u0027s a cleverer"},{"line_number":7342,"context_line":"            # way to do this?"},{"line_number":7343,"context_line":"            for consumer_uuid, alloc_data in allocations.items():"},{"line_number":7344,"context_line":"                allocs \u003d alloc_data[\u0027allocations\u0027]"},{"line_number":7345,"context_line":"                for rp_uuid in list(allocs.keys()):"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_1b1f4f5c","line":7342,"updated":"2019-08-07 12:32:24.000000000","message":"Yeah, this is the thought that drove me here to take a look.\n\nHow can you know?\n\nDo we allow that?\n\nOr even if we don\u0027t encourage it, has it been possible?","commit_id":"be8b52cb6c1a9a112df18e9192a3ad6d15da0a6f"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"f9a4abc1beac0e0eded66b294ddd77d6846fc076","unresolved":false,"context_lines":[{"line_number":7339,"context_line":"            # TODO(stephenfin): This is doing the reshape unconditionally,"},{"line_number":7340,"context_line":"            # which will totally f*** things up for people with pinned and"},{"line_number":7341,"context_line":"            # unpinned instances on the same host. Perhaps there\u0027s a cleverer"},{"line_number":7342,"context_line":"            # way to do this?"},{"line_number":7343,"context_line":"            for consumer_uuid, alloc_data in allocations.items():"},{"line_number":7344,"context_line":"                allocs \u003d alloc_data[\u0027allocations\u0027]"},{"line_number":7345,"context_line":"                for rp_uuid in list(allocs.keys()):"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_7b0099ac","line":7342,"in_reply_to":"7faddb67_1b1f4f5c","updated":"2019-08-09 09:40:24.000000000","message":"\u003e Yeah, this is the thought that drove me here to take a look.\n \u003e \n \u003e How can you know?\n\nWe have the \u0027Instance.numa_topology.cells[*].cpu_pinning\u0027 fields. If we can map that to a given allocation, we should be able to decide whether to transfer N VCPUs to N PCPUs or not.\n\n \u003e Do we allow that?\n \u003e\n \u003e Or even if we don\u0027t encourage it, has it been possible?\n\nWe don\u0027t stop people doing it. You\u0027d be getting almost no benefit from using pinning if you were doing this, but nothing in the API or elsewhere prevents it.","commit_id":"be8b52cb6c1a9a112df18e9192a3ad6d15da0a6f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"5098d9a12ce9dd735aca29d5cc198fe5593609ac","unresolved":false,"context_lines":[{"line_number":7339,"context_line":"            # TODO(stephenfin): This is doing the reshape unconditionally,"},{"line_number":7340,"context_line":"            # which will totally f*** things up for people with pinned and"},{"line_number":7341,"context_line":"            # unpinned instances on the same host. Perhaps there\u0027s a cleverer"},{"line_number":7342,"context_line":"            # way to do this?"},{"line_number":7343,"context_line":"            for consumer_uuid, alloc_data in allocations.items():"},{"line_number":7344,"context_line":"                allocs \u003d alloc_data[\u0027allocations\u0027]"},{"line_number":7345,"context_line":"                for rp_uuid in list(allocs.keys()):"}],"source_content_type":"text/x-python","patch_set":3,"id":"7faddb67_1e44f31a","line":7342,"in_reply_to":"7faddb67_7b0099ac","updated":"2019-08-09 10:27:28.000000000","message":"Mapping to a given allocation requires some kind of mapping between a device/thing here in nova land and some thing in placement land (a resource provider).\n\nIf PCPU and VCPU are modelled as a resource class of inventory with total \u003e 1 on a generic resource provider like a compute host or a numa node, when we allocate \"1\" of that total, placement doesn\u0027t know which \"1\", only that it is one of the several available \"1\".\n\nIf the PCPU is modelled in placement as a nested resource, as inventory on a child NUMA node, and the NUMA node resource provider can be clearly associated with a reference in the numa_topology, then you can at least narrow down where the allocation should go.\n\nBut looking earlier in the stack that doesn\u0027t appear to be what\u0027s happening here, PCPU is not nested.\n\nGiven that, if you want to avoid arbitrarily moving all VCPU allocations to PCPU, the piece of information you have that _might_ allow you to branch one way or another is the consumer_uuid, which is the instance_uuid. Presumably the instance knows whether it is pinned or not?","commit_id":"be8b52cb6c1a9a112df18e9192a3ad6d15da0a6f"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"2a0606ea21dd9345322a77c1ac333b7a02da7675","unresolved":false,"context_lines":[{"line_number":7328,"context_line":""},{"line_number":7329,"context_line":"            # If there is already is pinned instance is running on compute node"},{"line_number":7330,"context_line":"            # and the allocations are None means it\u0027s startup of compute node"},{"line_number":7331,"context_line":"            # then only Reshape is need so raised ReshapNeeded exception."},{"line_number":7332,"context_line":"            if allocations is None:"},{"line_number":7333,"context_line":"                LOG.info("},{"line_number":7334,"context_line":"                    \u0027Requesting provider tree reshape in order to move \u0027"}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_1b2ba530","line":7331,"range":{"start_line":7331,"start_character":50,"end_line":7331,"end_character":62},"updated":"2019-08-09 09:57:32.000000000","message":"ReshapeNeeded","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"2a0606ea21dd9345322a77c1ac333b7a02da7675","unresolved":false,"context_lines":[{"line_number":7328,"context_line":""},{"line_number":7329,"context_line":"            # If there is already is pinned instance is running on compute node"},{"line_number":7330,"context_line":"            # and the allocations are None means it\u0027s startup of compute node"},{"line_number":7331,"context_line":"            # then only Reshape is need so raised ReshapNeeded exception."},{"line_number":7332,"context_line":"            if allocations is None:"},{"line_number":7333,"context_line":"                LOG.info("},{"line_number":7334,"context_line":"                    \u0027Requesting provider tree reshape in order to move \u0027"}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_3b2c6149","line":7331,"range":{"start_line":7331,"start_character":43,"end_line":7331,"end_character":49},"updated":"2019-08-09 09:57:32.000000000","message":"raise","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4040017f1429639530bc6fe8c7fd9318e0cca587","unresolved":false,"context_lines":[{"line_number":7328,"context_line":""},{"line_number":7329,"context_line":"            # If there is already is pinned instance is running on compute node"},{"line_number":7330,"context_line":"            # and the allocations are None means it\u0027s startup of compute node"},{"line_number":7331,"context_line":"            # then only Reshape is need so raised ReshapNeeded exception."},{"line_number":7332,"context_line":"            if allocations is None:"},{"line_number":7333,"context_line":"                LOG.info("},{"line_number":7334,"context_line":"                    \u0027Requesting provider tree reshape in order to move \u0027"}],"source_content_type":"text/x-python","patch_set":6,"id":"7faddb67_3b77810b","line":7331,"range":{"start_line":7331,"start_character":43,"end_line":7331,"end_character":49},"in_reply_to":"7faddb67_3b2c6149","updated":"2019-08-09 10:05:46.000000000","message":"Actually, could you reword this? I\u0027m having a hard time understanding it /o\\","commit_id":"5312a3e4898ff415571caaf8abc914a272db4a37"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"8de98b34cc16d8ac90a3ea6d62600169e770f76d","unresolved":false,"context_lines":[{"line_number":6896,"context_line":"                \u0027step_size\u0027: 1,"},{"line_number":6897,"context_line":"                \u0027allocation_ratio\u0027: 1,"},{"line_number":6898,"context_line":"                \u0027reserved\u0027: 0,"},{"line_number":6899,"context_line":"            }"},{"line_number":6900,"context_line":""},{"line_number":6901,"context_line":"        # If a sharing DISK_GB provider exists in the provider tree, then our"},{"line_number":6902,"context_line":"        # storage is shared, and we should not report the DISK_GB inventory in"}],"source_content_type":"text/x-python","patch_set":7,"id":"7faddb67_9d0d02ea","line":6899,"updated":"2019-08-14 10:46:57.000000000","message":"Here","commit_id":"fd6a97efea477b55ddcd61e630bddf0c676ac25f"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"8de98b34cc16d8ac90a3ea6d62600169e770f76d","unresolved":false,"context_lines":[{"line_number":7321,"context_line":"        ctx \u003d nova_context.get_admin_context()"},{"line_number":7322,"context_line":"        compute_node \u003d objects.ComputeNode.get_by_nodename(ctx, nodename)"},{"line_number":7323,"context_line":"        root_node \u003d provider_tree.data(nodename)"},{"line_number":7324,"context_line":"        # If the PCPU inventories are reported then there is no need of"},{"line_number":7325,"context_line":"        # reshape."},{"line_number":7326,"context_line":"        if orc.PCPU in root_node.inventory:"},{"line_number":7327,"context_line":"            return"},{"line_number":7328,"context_line":""},{"line_number":7329,"context_line":"        if not compute_node.numa_topology:"},{"line_number":7330,"context_line":"            return"}],"source_content_type":"text/x-python","patch_set":7,"id":"7faddb67_7df286e5","line":7327,"range":{"start_line":7324,"start_character":0,"end_line":7327,"end_character":18},"updated":"2019-08-14 10:46:57.000000000","message":"So if I understand this correctly, this is checking if there are any PCPU resources registered against this node. Can we actually do that? I would assume not, since we\u0027ll have always reported PCPU inventory at [1] (as noted above). I\u0027m guessing this was taken from the VGPU reshaper, where it made sense since \u0027update_provider_tree\u0027 would have changed from reporting vGPU inventory against the root provider to reporting it against child providers.\n\n[1] https://review.opendev.org/#/c/674895/7/nova/virt/libvirt/driver.py@6891","commit_id":"fd6a97efea477b55ddcd61e630bddf0c676ac25f"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"8de98b34cc16d8ac90a3ea6d62600169e770f76d","unresolved":false,"context_lines":[{"line_number":7358,"context_line":"                        # Remove the VCPU allocation from compute node"},{"line_number":7359,"context_line":"                        # resource provider"},{"line_number":7360,"context_line":"                        del resources[orc.VCPU]"},{"line_number":7361,"context_line":""},{"line_number":7362,"context_line":"                        # Remove VCPU inventory from compute node"},{"line_number":7363,"context_line":"                        # resource provider and update the inventory"},{"line_number":7364,"context_line":"                        if orc.VCPU in root_node.inventory:"},{"line_number":7365,"context_line":"                            del root_node.inventory[orc.VCPU]"},{"line_number":7366,"context_line":"                            provider_tree.update_inventory("},{"line_number":7367,"context_line":"                                nodename, root_node.inventory)"},{"line_number":7368,"context_line":""},{"line_number":7369,"context_line":"    def get_available_resource(self, nodename):"},{"line_number":7370,"context_line":"        \"\"\"Retrieve resource information."}],"source_content_type":"text/x-python","patch_set":7,"id":"7faddb67_dd2cda4b","line":7367,"range":{"start_line":7361,"start_character":0,"end_line":7367,"end_character":62},"updated":"2019-08-14 10:46:57.000000000","message":"That also means this probably isn\u0027t needed.\n\nAm I reading that right?","commit_id":"fd6a97efea477b55ddcd61e630bddf0c676ac25f"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"479adb37633da7b5076c5e4d74348750a1a1856c","unresolved":false,"context_lines":[{"line_number":7365,"context_line":"        # Go figure out how many VCPUs to migrate to PCPUs. In theory we"},{"line_number":7366,"context_line":"        # shouldn\u0027t need to do this because we\u0027ve been telling people for years"},{"line_number":7367,"context_line":"        # *not* to mix pinned and unpinned instances, meaning we should be able"},{"line_number":7368,"context_line":"        # to move all PCPUs to VCPUs, but there\u0027s a chance someone didn\u0027t get"},{"line_number":7369,"context_line":"        # the memo"},{"line_number":7370,"context_line":""},{"line_number":7371,"context_line":"        instances_with_bad_allocations \u003d []"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_73ecdcba","line":7368,"range":{"start_line":7368,"start_character":22,"end_line":7368,"end_character":36},"updated":"2019-08-15 16:15:25.000000000","message":"VCPUs to PCPUs right?","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"cff236a033952a676fcc5ec030363525f0402d57","unresolved":false,"context_lines":[{"line_number":7365,"context_line":"        # Go figure out how many VCPUs to migrate to PCPUs. In theory we"},{"line_number":7366,"context_line":"        # shouldn\u0027t need to do this because we\u0027ve been telling people for years"},{"line_number":7367,"context_line":"        # *not* to mix pinned and unpinned instances, meaning we should be able"},{"line_number":7368,"context_line":"        # to move all PCPUs to VCPUs, but there\u0027s a chance someone didn\u0027t get"},{"line_number":7369,"context_line":"        # the memo"},{"line_number":7370,"context_line":""},{"line_number":7371,"context_line":"        instances_with_bad_allocations \u003d []"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_b3525466","line":7368,"range":{"start_line":7368,"start_character":22,"end_line":7368,"end_character":36},"in_reply_to":"7faddb67_73ecdcba","updated":"2019-08-15 16:22:57.000000000","message":"Whoops, yeah","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"799c33321af306d68b337322f5f79827b35e5cb4","unresolved":false,"context_lines":[{"line_number":7365,"context_line":"        # Go figure out how many VCPUs to migrate to PCPUs. In theory we"},{"line_number":7366,"context_line":"        # shouldn\u0027t need to do this because we\u0027ve been telling people for years"},{"line_number":7367,"context_line":"        # *not* to mix pinned and unpinned instances, meaning we should be able"},{"line_number":7368,"context_line":"        # to move all PCPUs to VCPUs, but there\u0027s a chance someone didn\u0027t get"},{"line_number":7369,"context_line":"        # the memo"},{"line_number":7370,"context_line":""},{"line_number":7371,"context_line":"        instances_with_bad_allocations \u003d []"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_cadea73d","line":7368,"range":{"start_line":7368,"start_character":22,"end_line":7368,"end_character":36},"in_reply_to":"7faddb67_b3525466","updated":"2019-08-23 16:42:42.000000000","message":"Done","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"479adb37633da7b5076c5e4d74348750a1a1856c","unresolved":false,"context_lines":[{"line_number":7370,"context_line":""},{"line_number":7371,"context_line":"        instances_with_bad_allocations \u003d []"},{"line_number":7372,"context_line":""},{"line_number":7373,"context_line":"        # TODO(stephenfin): Would \u0027get_by_host\u0027 be okay since libvirt has a 1:1"},{"line_number":7374,"context_line":"        # mapping of host:node"},{"line_number":7375,"context_line":"        instances \u003d objects.InstanceList.get_by_host_and_node("},{"line_number":7376,"context_line":"            ctx, compute_node.host, compute_node.hypervisor_hostname,"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_f3352c46","line":7373,"updated":"2019-08-15 16:15:25.000000000","message":"Yes, I\u0027m not sure how much it matters though.","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"799c33321af306d68b337322f5f79827b35e5cb4","unresolved":false,"context_lines":[{"line_number":7370,"context_line":""},{"line_number":7371,"context_line":"        instances_with_bad_allocations \u003d []"},{"line_number":7372,"context_line":""},{"line_number":7373,"context_line":"        # TODO(stephenfin): Would \u0027get_by_host\u0027 be okay since libvirt has a 1:1"},{"line_number":7374,"context_line":"        # mapping of host:node"},{"line_number":7375,"context_line":"        instances \u003d objects.InstanceList.get_by_host_and_node("},{"line_number":7376,"context_line":"            ctx, compute_node.host, compute_node.hypervisor_hostname,"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_cab787f5","line":7373,"in_reply_to":"7faddb67_f3352c46","updated":"2019-08-23 16:42:42.000000000","message":"Done","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"479adb37633da7b5076c5e4d74348750a1a1856c","unresolved":false,"context_lines":[{"line_number":7385,"context_line":"            instances_with_bad_allocations.append(instance.uuid)"},{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_93271813","line":7388,"range":{"start_line":7388,"start_character":35,"end_line":7388,"end_character":61},"updated":"2019-08-15 16:15:25.000000000","message":"Is the instance_uuid guaranteed to be in the allocations?\n\nAlso, what happens if there are in-progress migrations, like a pending resize/cold migration, where there are allocations against this compute node but the consumer is the migration UUID for the old flavor?","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"056b62bfacf9de749e84cb367dc6de9c24ac9080","unresolved":false,"context_lines":[{"line_number":7385,"context_line":"            instances_with_bad_allocations.append(instance.uuid)"},{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_b30a5423","line":7388,"range":{"start_line":7388,"start_character":35,"end_line":7388,"end_character":61},"in_reply_to":"7faddb67_1363281c","updated":"2019-08-15 16:26:21.000000000","message":"\u003e \u003e Is the instance_uuid guaranteed to be in the allocations?\n \u003e \n \u003e I mean, it should, right? If the instances is on this host, then it\n \u003e should be consuming inventory unless something is completely\n \u003e borked.\n\nYeah, it\u0027s the borked case I worry about. Admins could be mucking with allocations in placement directly to try and fix things, e.g. scheduler keeps kicking out this host, maybe it\u0027s placement. I\u0027ll delete all of the allocations for this host and then run the heal_allocations CLI to fix things up. That kind of stuff. So I\u0027d probably be defensive and check using \u0027in\u0027 or get() on the dict and log an error if allocations are missing for the instance on this node and continue.\n\n \u003e \n \u003e \u003e Also, what happens if there are in-progress migrations, like a\n \u003e \u003e pending resize/cold migration, where there are allocations\n \u003e against\n \u003e \u003e this compute node but the consumer is the migration UUID for the\n \u003e \u003e old flavor?\n \u003e \n \u003e Hmm, that I have no idea about. This will trigger on node startup\n \u003e after an upgrade so I\u0027d be _hoping_ there wouldn\u0027t be any\n \u003e in-progress migrations hanging about, but I don\u0027t know if I can be\n \u003e sure of this or not.\n\nYou could definitely upgrade computes that have instances with pending resizes, so the dest compute would have the instance consumer for the new flavor allocations but the source compute would have an essentially placeholder migration record as the consumer with the old flavor allocations.","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"cff236a033952a676fcc5ec030363525f0402d57","unresolved":false,"context_lines":[{"line_number":7385,"context_line":"            instances_with_bad_allocations.append(instance.uuid)"},{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_1363281c","line":7388,"range":{"start_line":7388,"start_character":35,"end_line":7388,"end_character":61},"in_reply_to":"7faddb67_93271813","updated":"2019-08-15 16:22:57.000000000","message":"\u003e Is the instance_uuid guaranteed to be in the allocations?\n\nI mean, it should, right? If the instances is on this host, then it should be consuming inventory unless something is completely borked.\n\n \u003e Also, what happens if there are in-progress migrations, like a\n \u003e pending resize/cold migration, where there are allocations against\n \u003e this compute node but the consumer is the migration UUID for the\n \u003e old flavor?\n\nHmm, that I have no idea about. This will trigger on node startup after an upgrade so I\u0027d be _hoping_ there wouldn\u0027t be any in-progress migrations hanging about, but I don\u0027t know if I can be sure of this or not.","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"479adb37633da7b5076c5e4d74348750a1a1856c","unresolved":false,"context_lines":[{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"},{"line_number":7392,"context_line":"            for rp_uuid in instance_allocations:"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_1362483f","line":7389,"updated":"2019-08-15 16:15:25.000000000","message":"You mean just ignore anything that\u0027s not rp_uuid !\u003d compute_node.uuid, right? Or even better:\n\nresources \u003d instance_allocations.get(compute_node.uuid).get(\u0027resources\u0027)\n\nYeah that\u0027s true for VCPU, I don\u0027t know about PCPU since I haven\u0027t been following this series or the spec. Is PCPU inventory reported on the root compute node resource provider or a nested provider like VGPU?","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"cff236a033952a676fcc5ec030363525f0402d57","unresolved":false,"context_lines":[{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"},{"line_number":7392,"context_line":"            for rp_uuid in instance_allocations:"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_33f50458","line":7389,"in_reply_to":"7faddb67_1362483f","updated":"2019-08-15 16:22:57.000000000","message":"\u003e You mean just ignore anything that\u0027s not rp_uuid !\u003d\n \u003e compute_node.uuid, right? Or even better:\n \u003e \n \u003e resources \u003d instance_allocations.get(compute_node.uuid).get(\u0027resources\u0027)\n\nYup\n\n \u003e Yeah that\u0027s true for VCPU, I don\u0027t know about PCPU since I haven\u0027t\n \u003e been following this series or the spec. Is PCPU inventory reported\n \u003e on the root compute node resource provider or a nested provider\n \u003e like VGPU?\n\nThe former. PCPU inventory is almost identical to VCPU in how it\u0027s used and consumed, with the key difference being the lack of an overcommit ratio for the former. They\u0027ll both become nested once we model NUMA in placement.","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"799c33321af306d68b337322f5f79827b35e5cb4","unresolved":false,"context_lines":[{"line_number":7386,"context_line":""},{"line_number":7387,"context_line":"        for instance_uuid in instances_with_bad_allocations:"},{"line_number":7388,"context_line":"            instance_allocations \u003d allocations[instance_uuid][\u0027allocations\u0027]"},{"line_number":7389,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"},{"line_number":7390,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7391,"context_line":"            # VCPU and PCPU resources"},{"line_number":7392,"context_line":"            for rp_uuid in instance_allocations:"}],"source_content_type":"text/x-python","patch_set":8,"id":"7faddb67_6a7a1326","line":7389,"in_reply_to":"7faddb67_33f50458","updated":"2019-08-23 16:42:42.000000000","message":"I left this one TODO as it\u0027s Friday and I need to experiment with it. Will come back to it.","commit_id":"88cb3978a4063bc4503b5fbf7be3c1248ff33bf1"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"a23845c2e8ffde0b9dc1d8071506368f6b41bedb","unresolved":false,"context_lines":[{"line_number":7313,"context_line":"                del root_node.inventory[orc.VGPU]"},{"line_number":7314,"context_line":"                provider_tree.update_inventory(nodename, root_node.inventory)"},{"line_number":7315,"context_line":""},{"line_number":7316,"context_line":"    def _update_provider_tree_for_pcpu(self, provider_tree, nodename,"},{"line_number":7317,"context_line":"                                       allocations\u003dNone):"},{"line_number":7318,"context_line":"        \"\"\"Updates the provider tree for PCPU inventory."},{"line_number":7319,"context_line":""}],"source_content_type":"text/x-python","patch_set":11,"id":"7faddb67_9cd91957","line":7316,"range":{"start_line":7316,"start_character":9,"end_line":7316,"end_character":38},"updated":"2019-09-02 04:16:55.000000000","message":"the name of this method sounds wrong.","commit_id":"c5b27b1433c3d6ed43a68384176b0eab2e3aec2a"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"731ef723c8d654e6dcfd2670eccbc35e19d988bb","unresolved":false,"context_lines":[{"line_number":7313,"context_line":"                del root_node.inventory[orc.VGPU]"},{"line_number":7314,"context_line":"                provider_tree.update_inventory(nodename, root_node.inventory)"},{"line_number":7315,"context_line":""},{"line_number":7316,"context_line":"    def _update_provider_tree_for_pcpu(self, provider_tree, nodename,"},{"line_number":7317,"context_line":"                                       allocations\u003dNone):"},{"line_number":7318,"context_line":"        \"\"\"Updates the provider tree for PCPU inventory."},{"line_number":7319,"context_line":""}],"source_content_type":"text/x-python","patch_set":11,"id":"7faddb67_0b79962d","line":7316,"range":{"start_line":7316,"start_character":9,"end_line":7316,"end_character":38},"in_reply_to":"7faddb67_9cd91957","updated":"2019-09-03 09:03:59.000000000","message":"I\u0027ve just copied the name from the vGPU equivalent (_update_provider_tree_for_vgpu)","commit_id":"c5b27b1433c3d6ed43a68384176b0eab2e3aec2a"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"a23845c2e8ffde0b9dc1d8071506368f6b41bedb","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_d0d6dbc3","line":7358,"updated":"2019-09-02 04:16:55.000000000","message":"When I set the cpu_dedicated_set option for a host with shared numa instance, then the inventory update will fail, but the nova-compute is still running, if stop and start the instance, the instance is going to error status. So I think we should give a check on this, in case the operator set a wrong option for the host, then making the instance into danger status.","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e8376f3234b77ded696e672c66a6441c8f2255f7","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_cb937bff","line":7358,"in_reply_to":"7faddb67_0b9e7653","updated":"2019-09-03 12:30:24.000000000","message":"Emm..I don\u0027t think they are same thing. The case I found is the reshape failed, then continue reporting VCPU, so the shared numa instance can be schedule to this host, but this host without cpu_shared_set, so the cpusets in xml is empty which leads to the error from libvirt. We can have check when reshape if there are VCPU consuming, then refuse to reshape and stop the nova-compute.","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"e8467e9521ab899fae9097eee7879fd6f7687e51","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_0b9e7653","line":7358,"in_reply_to":"7faddb67_1aaa51d1","updated":"2019-09-03 09:09:07.000000000","message":"I suspect the exact same thing will happen on stable/stein if you boot NUMA-based instances with \u0027vcpu_pin_set\u0027 and then change \u0027vcpu_pin_set\u0027 to exclude some host CPUs that were previously included.\n\nI don\u0027t think there\u0027s anything we can do about this. I could add a startup check to ensure \u0027vcpu_pin_set\u0027/\u0027cpu_dedicated_set\u0027/\u0027cpu_shared_set\u0027 are correctly set given the cpuset of existing instances, but that will be very slow. Another thing we could do is just catch that particular error and suggest the user cold migrate the instance, but I\u0027m not sure if that\u0027s possible either since we\u0027d have to fine tune our exception handling in a way that libvirt might not support. I\u0027m open to other ideas too","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"5ac5bdf9b5bb57267eee623c0314aa4fda93aa8d","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_fea9498e","line":7358,"in_reply_to":"7faddb67_aed08dc9","updated":"2019-09-04 03:58:07.000000000","message":"The steps as below:\n\n* Boot shared numa instance.\n* Upgrade the control plane and compute node\n* Update the cpu_dedicated_set which trigger the reshape but failed\n* Stop that existed shared numa instance\n* Start that existed shared numa instance but failed\n* Boot new shared numa instance, it is going to error status.\n* Fix the cpu_dedicated_set to cpu_shared_set, then the existed shared numa instance can be started.\n\nThe error log of reshape failed:\nSep 01 19:26:29 jfz1r04h09 nova-compute[42679]: ERROR nova.scheduler.client.report [None req-110d601c-d87b-4520-abd9-afe72fda5b04 None None] [req-a9d6eda2-bf12-41ce-97c6-b07b3e958152] Failed to update inventory to [{\u0027MEMORY_MB\u0027: {\u0027allocation_ratio\u0027: 1.5, \u0027total\u0027: 257851, \u0027reserved\u0027: 512, \u0027step_size\u0027: 1, \u0027min_unit\u0027: 1, \u0027max_unit\u0027: 257851}, \u0027PCPU\u0027: {\u0027allocation_ratio\u0027: 1, \u0027total\u0027: 72, \u0027reserved\u0027: 0, \u0027step_size\u0027: 1, \u0027min_unit\u0027: 1, \u0027max_unit\u0027: 72}, \u0027DISK_GB\u0027: {\u0027allocation_ratio\u0027: 1.0, \u0027total\u0027: 438, \u0027reserved\u0027: 0, \u0027step_size\u0027: 1, \u0027min_unit\u0027: 1, \u0027max_unit\u0027: 438}}] for resource provider with UUID 651de9ee-26a7-4e5a-8ea1-b0ec2076b1a3.  Got 409: {\"errors\": [{\"status\": 409, \"request_id\": \"req-a9d6eda2-bf12-41ce-97c6-b07b3e958152\", \"code\": \"placement.inventory.inuse\", \"detail\": \"There was a conflict when trying to complete your request.\\n\\n update conflict: Inventory for \u0027VCPU\u0027 on resource provider \u0027651de9ee-26a7-4e5a-8ea1-b0ec2076b1a3\u0027 in use.  \", \"title\": \"Conflict\"}]}\n\nThe start the existing shared numa errored info:\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]: ERROR oslo_messaging.rpc.server   File \"/usr/local/lib/python2.7/dist-packages/libvirt.py\", line 3698, in defineXML\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]: ERROR oslo_messaging.rpc.server     if ret is None:raise libvirtError(\u0027virDomainDefineXML() failed\u0027, conn\u003dself)\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]: ERROR oslo_messaging.rpc.server libvirtError: invalid argument: Failed to parse bitmap \u0027\u0027","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"ba78556592468359c9a447f5c29fc59914be5714","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_aed08dc9","line":7358,"in_reply_to":"7faddb67_cb937bff","updated":"2019-09-03 13:27:04.000000000","message":"Oh, sorry, I misunderstood you. Can you provide reproduction steps and logs so I know where to start looking?","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0892fa013ae410d3ed3b8ea2cbf27ff462f9cb4e","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_1aaa51d1","line":7358,"in_reply_to":"7faddb67_d0d6dbc3","updated":"2019-09-02 06:21:49.000000000","message":"Debug that, due to I set the CONF.compute.cpu_dedicated_set on a host with shared numa instance. The inventory update will fail due to there are usage on the VCPU.\n\nThe nova-compute is still running. The libvirt virt driver will report 0 shared vcpu available due to CONF.compute.cpu_dedicated_set has value. After restart a VM, the VM\u0027s vcpu can\u0027t pin to the shared cpus. The xml as below:\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:   \u003ccputune\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cshares\u003e4096\u003c/shares\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cemulatorpin cpuset\u003d\"\"/\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cvcpupin vcpu\u003d\"0\" cpuset\u003d\"\"/\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cvcpupin vcpu\u003d\"1\" cpuset\u003d\"\"/\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cvcpupin vcpu\u003d\"2\" cpuset\u003d\"\"/\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:     \u003cvcpupin vcpu\u003d\"3\" cpuset\u003d\"\"/\u003e\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]:   \u003c/cputune\u003e\n\n\nDue to cpuset\u003d\"\", then get error as below:\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]: ERROR oslo_messaging.rpc.server     if ret is None:raise libvirtError(\u0027virDomainDefineXML() failed\u0027, conn\u003dself)\nSep 01 19:34:25 jfz1r04h09 nova-compute[42679]: ERROR oslo_messaging.rpc.server libvirtError: invalid argument: Failed to parse bitmap \u0027\u0027","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"ff7a1340fd788a34ef7f864508356362404ee781","unresolved":false,"context_lines":[{"line_number":7355,"context_line":""},{"line_number":7356,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"},{"line_number":7357,"context_line":"        # (yet)"},{"line_number":7358,"context_line":"        if not CONF.compute.cpu_dedicated_set:"},{"line_number":7359,"context_line":"            return"},{"line_number":7360,"context_line":""},{"line_number":7361,"context_line":"        # Similarly, if PCPU inventories are already reported then there is no"}],"source_content_type":"text/x-python","patch_set":22,"id":"7faddb67_5112aa59","line":7358,"in_reply_to":"7faddb67_fea9498e","updated":"2019-09-05 13:43:47.000000000","message":"Okay, so the crucial thing was that you set aside all host CPUs for pinned instances when there were actually unpinned instances on the host. I\u0027ve added [1] to the series which should prevent you from doing this. Would you mind retesting to see if it resolves your issue?\n\n[1] https://review.opendev.org/#/c/680107/","commit_id":"ef55a6b0bdfd8f314bcf662bd484989e72c9242b"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":6910,"context_line":"                \u0027reserved\u0027: CONF.reserved_host_cpus,"},{"line_number":6911,"context_line":"            }"},{"line_number":6912,"context_line":""},{"line_number":6913,"context_line":"        if pcpus:"},{"line_number":6914,"context_line":"            result[orc.PCPU] \u003d {"},{"line_number":6915,"context_line":"                \u0027total\u0027: pcpus,"},{"line_number":6916,"context_line":"                \u0027min_unit\u0027: 1,"},{"line_number":6917,"context_line":"                \u0027max_unit\u0027: pcpus,"},{"line_number":6918,"context_line":"                \u0027step_size\u0027: 1,"},{"line_number":6919,"context_line":"                \u0027allocation_ratio\u0027: 1,"},{"line_number":6920,"context_line":"                \u0027reserved\u0027: 0,"},{"line_number":6921,"context_line":"            }"},{"line_number":6922,"context_line":""},{"line_number":6923,"context_line":"        # If a sharing DISK_GB provider exists in the provider tree, then our"},{"line_number":6924,"context_line":"        # storage is shared, and we should not report the DISK_GB inventory in"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_3fc5448f","line":6921,"range":{"start_line":6913,"start_character":0,"end_line":6921,"end_character":13},"updated":"2019-09-06 22:29:00.000000000","message":"here","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":7302,"context_line":"                del root_node.inventory[orc.VGPU]"},{"line_number":7303,"context_line":"                provider_tree.update_inventory(nodename, root_node.inventory)"},{"line_number":7304,"context_line":""},{"line_number":7305,"context_line":"    def _update_provider_tree_for_pcpu(self, provider_tree, nodename,"},{"line_number":7306,"context_line":"                                       allocations\u003dNone):"},{"line_number":7307,"context_line":"        \"\"\"Updates the provider tree for PCPU inventory."},{"line_number":7308,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_5fd92081","line":7305,"range":{"start_line":7305,"start_character":8,"end_line":7305,"end_character":38},"updated":"2019-09-06 22:29:00.000000000","message":"Normally a reshaper would update inventories as well as allocations. The reason that\u0027s not needed in here is that you\u0027ve done the \"duplicate reporting\" thing earlier in the series, yah?\n\nMight be worth calling that out in the commit message.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":7302,"context_line":"                del root_node.inventory[orc.VGPU]"},{"line_number":7303,"context_line":"                provider_tree.update_inventory(nodename, root_node.inventory)"},{"line_number":7304,"context_line":""},{"line_number":7305,"context_line":"    def _update_provider_tree_for_pcpu(self, provider_tree, nodename,"},{"line_number":7306,"context_line":"                                       allocations\u003dNone):"},{"line_number":7307,"context_line":"        \"\"\"Updates the provider tree for PCPU inventory."},{"line_number":7308,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_f7033f0c","line":7305,"range":{"start_line":7305,"start_character":8,"end_line":7305,"end_character":38},"in_reply_to":"5faad753_5fd92081","updated":"2019-09-09 14:55:27.000000000","message":"No, it\u0027s because we handle inventories in the parent method (your \"here\" comment above)","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"1dd93da797ff50938e60303f74abfd9355dce1e3","unresolved":false,"context_lines":[{"line_number":7302,"context_line":"                del root_node.inventory[orc.VGPU]"},{"line_number":7303,"context_line":"                provider_tree.update_inventory(nodename, root_node.inventory)"},{"line_number":7304,"context_line":""},{"line_number":7305,"context_line":"    def _update_provider_tree_for_pcpu(self, provider_tree, nodename,"},{"line_number":7306,"context_line":"                                       allocations\u003dNone):"},{"line_number":7307,"context_line":"        \"\"\"Updates the provider tree for PCPU inventory."},{"line_number":7308,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_ab21db4f","line":7305,"range":{"start_line":7305,"start_character":8,"end_line":7305,"end_character":38},"in_reply_to":"5faad753_f7033f0c","updated":"2019-09-09 15:34:09.000000000","message":"Yeah, we\u0027re saying the same thing.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":7344,"context_line":"        :raises: nova.exception.ReshapeFailed if the requested tree reshape"},{"line_number":7345,"context_line":"            fails for whatever reason."},{"line_number":7346,"context_line":"        \"\"\""},{"line_number":7347,"context_line":"        ctx \u003d nova_context.get_admin_context()"},{"line_number":7348,"context_line":"        compute_node \u003d objects.ComputeNode.get_by_nodename(ctx, nodename)"},{"line_number":7349,"context_line":"        root_node \u003d provider_tree.data(nodename)"},{"line_number":7350,"context_line":""},{"line_number":7351,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_3fcb0454","line":7348,"range":{"start_line":7347,"start_character":0,"end_line":7348,"end_character":73},"updated":"2019-09-06 22:29:00.000000000","message":"Do this after the `PCPU in inventory` condition, because you won\u0027t need it in the common case, and it (db call) isn\u0027t cheap.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":7344,"context_line":"        :raises: nova.exception.ReshapeFailed if the requested tree reshape"},{"line_number":7345,"context_line":"            fails for whatever reason."},{"line_number":7346,"context_line":"        \"\"\""},{"line_number":7347,"context_line":"        ctx \u003d nova_context.get_admin_context()"},{"line_number":7348,"context_line":"        compute_node \u003d objects.ComputeNode.get_by_nodename(ctx, nodename)"},{"line_number":7349,"context_line":"        root_node \u003d provider_tree.data(nodename)"},{"line_number":7350,"context_line":""},{"line_number":7351,"context_line":"        # If we\u0027re not configuring PCPUs, then we\u0027ve nothing to worry about"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_171cdb68","line":7348,"range":{"start_line":7347,"start_character":0,"end_line":7348,"end_character":73},"in_reply_to":"5faad753_3fcb0454","updated":"2019-09-09 14:55:27.000000000","message":"Done","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":7382,"context_line":"                \u0027provider %s\u0027, nodename)"},{"line_number":7383,"context_line":"            raise exception.ReshapeNeeded()"},{"line_number":7384,"context_line":""},{"line_number":7385,"context_line":"        # Go figure out how many VCPUs to migrate to PCPUs. In theory we"},{"line_number":7386,"context_line":"        # shouldn\u0027t need to do this because we\u0027ve been telling people for years"},{"line_number":7387,"context_line":"        # *not* to mix pinned and unpinned instances, meaning we should be able"},{"line_number":7388,"context_line":"        # to move all VCPUs to PCPUs, but there\u0027s a chance someone didn\u0027t get"},{"line_number":7389,"context_line":"        # the memo"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_3f40e435","line":7386,"range":{"start_line":7385,"start_character":60,"end_line":7386,"end_character":35},"updated":"2019-09-06 22:29:00.000000000","message":"Is it still just a theory since [1]? Is there a way, e.g. because of [2], where this can still happen?\n\n[1] https://review.opendev.org/#/c/680107/\n[2] https://review.opendev.org/#/c/680107/3/nova/compute/manager.py@858","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":7382,"context_line":"                \u0027provider %s\u0027, nodename)"},{"line_number":7383,"context_line":"            raise exception.ReshapeNeeded()"},{"line_number":7384,"context_line":""},{"line_number":7385,"context_line":"        # Go figure out how many VCPUs to migrate to PCPUs. In theory we"},{"line_number":7386,"context_line":"        # shouldn\u0027t need to do this because we\u0027ve been telling people for years"},{"line_number":7387,"context_line":"        # *not* to mix pinned and unpinned instances, meaning we should be able"},{"line_number":7388,"context_line":"        # to move all VCPUs to PCPUs, but there\u0027s a chance someone didn\u0027t get"},{"line_number":7389,"context_line":"        # the memo"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_1735bbda","line":7386,"range":{"start_line":7385,"start_character":60,"end_line":7386,"end_character":35},"in_reply_to":"5faad753_3f40e435","updated":"2019-09-09 14:55:27.000000000","message":"Not quite. Reworded. Hopefully this makes more sense.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"95e30e50a067684017da8b7b682e635b453c35a9","unresolved":false,"context_lines":[{"line_number":7390,"context_line":""},{"line_number":7391,"context_line":"        instances_with_bad_allocations \u003d []"},{"line_number":7392,"context_line":""},{"line_number":7393,"context_line":"        instances \u003d objects.InstanceList.get_by_host("},{"line_number":7394,"context_line":"            ctx, compute_node.host, expected_attrs\u003d[\u0027numa_topology\u0027])"},{"line_number":7395,"context_line":"        for instance in instances:"},{"line_number":7396,"context_line":"            if not instance.numa_topology:"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_8b5bd62c","line":7393,"updated":"2019-09-07 05:59:51.000000000","message":"an interesting case I found is I have instance does the resize.\n\nThe instance resize sucessful, and boot up on the destination host, and waiting for confirm resize.\n\nThen the instance have an allocation record on the source host. And that allocation record\u0027s consumer uuid is the migration uuid.\n\nso this reshape doesn\u0027t care about migration case. so the reshape failed in the end\n\nSep 06 22:55:04 jfz1r03h15 nova-compute[89374]: ERROR oslo_service.service     raise exception.ReshapeFailed(error\u003dresp.text)\nSep 06 22:55:04 jfz1r03h15 nova-compute[89374]: ERROR oslo_service.service ReshapeFailed: Resource provider inventory and allocation data migration failed: {\"errors\": [{\"status\": 409, \"request_id\": \"req-c22ba9c2-b39e-4adc-8899-e2bcf2311d21\", \"code\": \"placement.inventory.inuse\", \"detail\": \"There was a conflict when trying to complete your request.\\n\\n update conflict: Inventory for \u0027VCPU\u0027 on resource provider \u0027875c52ae-fcdd-405a-8f8b-07ea57fe3225\u0027 in use.  \", \"title\": \"Conflict\"}]}","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4a36426c2be0b579937c2a861c0ef8c009516902","unresolved":false,"context_lines":[{"line_number":7390,"context_line":""},{"line_number":7391,"context_line":"        instances_with_bad_allocations \u003d []"},{"line_number":7392,"context_line":""},{"line_number":7393,"context_line":"        instances \u003d objects.InstanceList.get_by_host("},{"line_number":7394,"context_line":"            ctx, compute_node.host, expected_attrs\u003d[\u0027numa_topology\u0027])"},{"line_number":7395,"context_line":"        for instance in instances:"},{"line_number":7396,"context_line":"            if not instance.numa_topology:"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_e69e2a06","line":7393,"in_reply_to":"5faad753_4bab9ed5","updated":"2019-09-09 16:27:59.000000000","message":"Good spot. Now that you bring it up, mriedem had noted this would be an issue in an earlier revision and I forgot to address it. Have fixed along with a(nother) significant revision of these test","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"966deff1f34e0d379e507c0433f58f0e143766d8","unresolved":false,"context_lines":[{"line_number":7390,"context_line":""},{"line_number":7391,"context_line":"        instances_with_bad_allocations \u003d []"},{"line_number":7392,"context_line":""},{"line_number":7393,"context_line":"        instances \u003d objects.InstanceList.get_by_host("},{"line_number":7394,"context_line":"            ctx, compute_node.host, expected_attrs\u003d[\u0027numa_topology\u0027])"},{"line_number":7395,"context_line":"        for instance in instances:"},{"line_number":7396,"context_line":"            if not instance.numa_topology:"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_4bab9ed5","line":7393,"in_reply_to":"5faad753_8b5bd62c","updated":"2019-09-07 06:09:32.000000000","message":"The reproduce steps:\n1. boot instance on hostA\n2. resize the instance, but no confirm\n3. upgrade the control plane and hostA\n4. set CONF.compute.cpu_dedicated_set for hostA\n5. hostA reshape failed.","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":7408,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7409,"context_line":"            # VCPU and PCPU resources"},{"line_number":7410,"context_line":"            for rp_uuid in instance_allocations:"},{"line_number":7411,"context_line":"                resources \u003d instance_allocations[rp_uuid][\u0027resources\u0027]"},{"line_number":7412,"context_line":""},{"line_number":7413,"context_line":"                if orc.PCPU in resources or orc.VCPU not in resources:"},{"line_number":7414,"context_line":"                    # Either this has been migrated or it\u0027s not a compute node"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_5f6600f0","line":7411,"range":{"start_line":7411,"start_character":28,"end_line":7411,"end_character":57},"updated":"2019-09-06 22:29:00.000000000","message":"could have used .items() above and already have this","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"26317a0922271b9776e6cc45a40aae5bbf27ca99","unresolved":false,"context_lines":[{"line_number":7408,"context_line":"            # ComputeNode.uuid since compute nodes are the only (?) provider of"},{"line_number":7409,"context_line":"            # VCPU and PCPU resources"},{"line_number":7410,"context_line":"            for rp_uuid in instance_allocations:"},{"line_number":7411,"context_line":"                resources \u003d instance_allocations[rp_uuid][\u0027resources\u0027]"},{"line_number":7412,"context_line":""},{"line_number":7413,"context_line":"                if orc.PCPU in resources or orc.VCPU not in resources:"},{"line_number":7414,"context_line":"                    # Either this has been migrated or it\u0027s not a compute node"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_f77a5f86","line":7411,"range":{"start_line":7411,"start_character":28,"end_line":7411,"end_character":57},"in_reply_to":"5faad753_5f6600f0","updated":"2019-09-09 14:55:27.000000000","message":"I had to get at the \u0027resources\u0027 subdict so in this particular instance it doesn\u0027t save me all that much","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e5d0ee2d44ee2089a5acf3248b94149e06c98b3c","unresolved":false,"context_lines":[{"line_number":7415,"context_line":"                    continue"},{"line_number":7416,"context_line":""},{"line_number":7417,"context_line":"                # Switch stuff around. We can do a straight swap since an"},{"line_number":7418,"context_line":"                # instance is either pinned or unpinned. By doing this, we\u0027re"},{"line_number":7419,"context_line":"                # modifying the provided \u0027allocations\u0027 dict, which will"},{"line_number":7420,"context_line":"                # eventually be used by the resource tracker to update"},{"line_number":7421,"context_line":"                # placement"}],"source_content_type":"text/x-python","patch_set":28,"id":"5faad753_bf9a94c2","line":7418,"range":{"start_line":7418,"start_character":18,"end_line":7418,"end_character":55},"updated":"2019-09-06 22:29:00.000000000","message":":(","commit_id":"07e4410474b9ee44015e15737fb1edb616a59b92"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b0d2f25e6f4b37e49eb66480408c443e84109fd5","unresolved":false,"context_lines":[{"line_number":7413,"context_line":""},{"line_number":7414,"context_line":"            allocations_needing_reshape.append(instance.uuid)"},{"line_number":7415,"context_line":""},{"line_number":7416,"context_line":"        # ...and those for any migrations"},{"line_number":7417,"context_line":""},{"line_number":7418,"context_line":"        migrations \u003d objects.MigrationList.get_in_progress_by_host_and_node("},{"line_number":7419,"context_line":"            ctx, compute_node.host, compute_node.hypervisor_hostname)"}],"source_content_type":"text/x-python","patch_set":32,"id":"5faad753_d36de498","line":7416,"updated":"2019-09-10 06:08:53.000000000","message":"tested, it works.","commit_id":"dd3945bf0aed8eafd5a52dbba89a5c74e1df7615"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"3a81a66a37e6910480adf96f562b08334494d5af","unresolved":false,"context_lines":[{"line_number":7454,"context_line":"                # eventually be used by the resource tracker to update"},{"line_number":7455,"context_line":"                # placement"},{"line_number":7456,"context_line":"                resources[\u0027PCPU\u0027] \u003d resources[\u0027VCPU\u0027]"},{"line_number":7457,"context_line":"                del resources[orc.VCPU]"},{"line_number":7458,"context_line":""},{"line_number":7459,"context_line":"    def get_available_resource(self, nodename):"},{"line_number":7460,"context_line":"        \"\"\"Retrieve resource information."}],"source_content_type":"text/x-python","patch_set":32,"id":"5faad753_cc00ed37","line":7457,"updated":"2019-09-09 18:00:26.000000000","message":"Hmm, so these instances will have no VCPU inventory? I guess that makes sense, but I thought we previously used the presence of VCPU inventory to detect which allocations were from nova (like as compared to from cinder or neutron). I\u0027m not sure that case really exists right now, but I should probably go look to make sure.\n\n\u003clater\u003e\n\nOkay I don\u0027t see what I was looking for -- this might have been in the old inventory doubling code which is now gone. It\u0027s possible that we\u0027d do something like that in the future, so this is something to watch for.\n\nNow, what about quotas? Presumably the old fields we\u0027re using to count things for quotas will still contain the sum of the two things, right? I guess the code to do counting quotas via placement will need to be sure to handle adding VCPU and PCPU allocation elements together in its calculus whenever that happens...","commit_id":"dd3945bf0aed8eafd5a52dbba89a5c74e1df7615"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d194706007661bf66babfa814603631ba901f317","unresolved":false,"context_lines":[{"line_number":7454,"context_line":"                # eventually be used by the resource tracker to update"},{"line_number":7455,"context_line":"                # placement"},{"line_number":7456,"context_line":"                resources[\u0027PCPU\u0027] \u003d resources[\u0027VCPU\u0027]"},{"line_number":7457,"context_line":"                del resources[orc.VCPU]"},{"line_number":7458,"context_line":""},{"line_number":7459,"context_line":"    def get_available_resource(self, nodename):"},{"line_number":7460,"context_line":"        \"\"\"Retrieve resource information."}],"source_content_type":"text/x-python","patch_set":32,"id":"5faad753_b5b35a4d","line":7457,"in_reply_to":"5faad753_cc00ed37","updated":"2019-09-10 19:17:31.000000000","message":"\u003e Hmm, so these instances will have no VCPU inventory? I guess that\n \u003e makes sense, but I thought we previously used the presence of VCPU\n \u003e inventory to detect which allocations were from nova (like as\n \u003e compared to from cinder or neutron). I\u0027m not sure that case really\n \u003e exists right now, but I should probably go look to make sure.\n \u003e \n \u003e \u003clater\u003e\n \u003e \n \u003e Okay I don\u0027t see what I was looking for -- this might have been in\n \u003e the old inventory doubling code which is now gone. It\u0027s possible\n \u003e that we\u0027d do something like that in the future, so this is\n \u003e something to watch for.\n\nWe used to detect compute node resource providers via VCPU inventory in a nova-status upgrade check that has since been removed:\n\nhttps://github.com/openstack/nova/blob/stable/rocky/nova/cmd/status.py#L235\n\nAnd we still do doubling of allocations for same-host resize but that\u0027s allocations not inventory.","commit_id":"dd3945bf0aed8eafd5a52dbba89a5c74e1df7615"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b0d2f25e6f4b37e49eb66480408c443e84109fd5","unresolved":false,"context_lines":[{"line_number":7454,"context_line":"                # eventually be used by the resource tracker to update"},{"line_number":7455,"context_line":"                # placement"},{"line_number":7456,"context_line":"                resources[\u0027PCPU\u0027] \u003d resources[\u0027VCPU\u0027]"},{"line_number":7457,"context_line":"                del resources[orc.VCPU]"},{"line_number":7458,"context_line":""},{"line_number":7459,"context_line":"    def get_available_resource(self, nodename):"},{"line_number":7460,"context_line":"        \"\"\"Retrieve resource information."}],"source_content_type":"text/x-python","patch_set":32,"id":"5faad753_f3fd80b1","line":7457,"in_reply_to":"5faad753_cc00ed37","updated":"2019-09-10 06:08:53.000000000","message":"btw, I tested the quota thing, the PCPU will lose the quota limit when I enable CONF.quota.count_usage_from_placement. But the quota still works when CONF.quota.count_usage_from_placement is False.","commit_id":"dd3945bf0aed8eafd5a52dbba89a5c74e1df7615"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d194706007661bf66babfa814603631ba901f317","unresolved":false,"context_lines":[{"line_number":7454,"context_line":"                # eventually be used by the resource tracker to update"},{"line_number":7455,"context_line":"                # placement"},{"line_number":7456,"context_line":"                resources[\u0027PCPU\u0027] \u003d resources[\u0027VCPU\u0027]"},{"line_number":7457,"context_line":"                del resources[orc.VCPU]"},{"line_number":7458,"context_line":""},{"line_number":7459,"context_line":"    def get_available_resource(self, nodename):"},{"line_number":7460,"context_line":"        \"\"\"Retrieve resource information."}],"source_content_type":"text/x-python","patch_set":32,"id":"5faad753_b5ba7a07","line":7457,"in_reply_to":"5faad753_f3fd80b1","updated":"2019-09-10 19:17:31.000000000","message":"\u003e But the quota still works when CONF.quota.count_usage_from_placement is False.\n\nThis is because that is counting from the instance.vcpus which from the flavor:\n\nhttps://github.com/openstack/nova/blob/f4ca3e70852c0a7ed7904a9f2d7177c9118d3d1c/nova/objects/instance.py#L1470\n\nCounting quota usage from placement is disabled by default but CERN uses it to avoid quota relying on counting from down cells. Either way we need to strive for a consistent experience from the end user perspective in the API, even though we have some known wrinkles when you\u0027re using one configuration over the other which are documented in the config option itself:\n\nhttps://docs.openstack.org/nova/latest/configuration/config.html#quota.count_usage_from_placement\n\nWe probably don\u0027t want to add to that list of differences by saying something like, \"oh by the way if you\u0027re using PCPU your cores quota no longer counts\".","commit_id":"dd3945bf0aed8eafd5a52dbba89a5c74e1df7615"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0b10890d4aaae6db3c5836a8c70a9b5912a24754","unresolved":false,"context_lines":[{"line_number":7638,"context_line":"        migrations \u003d objects.MigrationList.get_in_progress_by_host_and_node("},{"line_number":7639,"context_line":"            ctx, compute_node.host, compute_node.hypervisor_hostname)"},{"line_number":7640,"context_line":"        for migration in migrations:"},{"line_number":7641,"context_line":"            # we don\u0027t care about migrations that have landed here, since we"},{"line_number":7642,"context_line":"            # already have those instances above"},{"line_number":7643,"context_line":"            if not migration.dest_compute or ("},{"line_number":7644,"context_line":"                    migration.dest_compute \u003d\u003d compute_node.host):"},{"line_number":7645,"context_line":"                continue"},{"line_number":7646,"context_line":""},{"line_number":7647,"context_line":"            instance \u003d objects.Instance.get_by_uuid("},{"line_number":7648,"context_line":"                ctx, migration.instance_uuid, expected_attrs\u003d[\u0027numa_topology\u0027])"}],"source_content_type":"text/x-python","patch_set":38,"id":"5faad753_1ee58aa8","line":7645,"range":{"start_line":7641,"start_character":12,"end_line":7645,"end_character":24},"updated":"2019-09-12 05:39:58.000000000","message":"I don\u0027t think this check is needed. \n\nWe create the migration obj and claim on Placement for the dest host in nova-conductor first, at that time, the instance.host is still the source host. The instance.host switches to the dest host after the resize finished.\n\nSo I think we should remove this check, counting all migration.instance_uuid. And we check the allocation after line 7658, if it has allocation record, then reshape it.\n\n\u003e\u003e\u003e\n\nLater: But yes, the operator shouldn\u0027t restart the nova-compute in the middle of user operating the instance. I think we have way to disable any action for specific node for a moment? Anyway, this probably a case I\u0027m over thinking.","commit_id":"b65f4ef8a6544037ff4cfcd8f4dc60a2cf49e66e"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0b10890d4aaae6db3c5836a8c70a9b5912a24754","unresolved":false,"context_lines":[{"line_number":7655,"context_line":""},{"line_number":7656,"context_line":"            allocations_needing_reshape.append(migration.uuid)"},{"line_number":7657,"context_line":""},{"line_number":7658,"context_line":"        for allocation_uuid in allocations_needing_reshape:"},{"line_number":7659,"context_line":"            consumer_allocations \u003d allocations.get(allocation_uuid, {}).get("},{"line_number":7660,"context_line":"                \u0027allocations\u0027, {})"},{"line_number":7661,"context_line":"            # TODO(stephenfin): We can probably just check the allocations for"}],"source_content_type":"text/x-python","patch_set":38,"id":"5faad753_5e04624a","line":7658,"range":{"start_line":7658,"start_character":12,"end_line":7658,"end_character":21},"updated":"2019-09-12 05:39:58.000000000","message":"consumer_uuid, there is no allocation_uuid.","commit_id":"b65f4ef8a6544037ff4cfcd8f4dc60a2cf49e66e"}]}
