)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"f24c2c26fb4b695e45314289d0ebe4ee6e32badf","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"d3a90c3a_d9c1544c","updated":"2022-08-25 07:56:03.000000000","message":"recheck nova-next fails in the post test script with some quota related issue. ","commit_id":"822e1848f7c4e1e810daf07b4972196fd11f35ca"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"deb56ba64a5977708aa4b53df5b11acb8b543aa5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"151cefc2_c77857f3","updated":"2022-08-31 10:28:19.000000000","message":"Good test coverage. All LGTM","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"cd8fe3009cbfad38fb934a417f03da311c3c1747","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"4bf28702_cf7eaa58","updated":"2022-08-26 15:36:23.000000000","message":"I need to respin this due to same host resize needs special handling during healing. I will do the respin today.","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"302e33eb76d721da9ebe32486ce3b1a3258b24b8","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"c86c44af_6e70f18a","updated":"2022-08-29 19:40:31.000000000","message":"this looks pretty good in terms of the resize testing\nboth revert and confim are tested. \nsame host resize is adressed later in the seriese","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"30501cd0bf83025babac3f0160bb4e080ad8cccb","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"cb71dc6d_1a7a5918","in_reply_to":"4bf28702_cf7eaa58","updated":"2022-08-26 17:57:56.000000000","message":"done, add a new patch above instead","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"}],"doc/source/admin/pci-passthrough.rst":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"deb56ba64a5977708aa4b53df5b11acb8b543aa5","unresolved":true,"context_lines":[{"line_number":394,"context_line":"   (Zed) the nova-compute service will refuse to start with such configuration."},{"line_number":395,"context_line":"   It is suggested to use the PCI address of the device instead."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"The nova-compute service makes sure that already existing instances with PCI"},{"line_number":398,"context_line":"allocations in the nova DB will have a corresponding PCI allocation in"},{"line_number":399,"context_line":"placement. This allocation healing also acts on any new instances regardless of"},{"line_number":400,"context_line":"the status of the scheduling part of this feature to make sure that the nova"}],"source_content_type":"text/x-rst","patch_set":7,"id":"da912def_ada70edd","line":397,"range":{"start_line":397,"start_character":41,"end_line":397,"end_character":48},"updated":"2022-08-31 10:28:19.000000000","message":"nit: drop this, it\u0027s unnecessary","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"ee8bf9286c683e4c736cf41818796f189e581a0e","unresolved":false,"context_lines":[{"line_number":394,"context_line":"   (Zed) the nova-compute service will refuse to start with such configuration."},{"line_number":395,"context_line":"   It is suggested to use the PCI address of the device instead."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"The nova-compute service makes sure that already existing instances with PCI"},{"line_number":398,"context_line":"allocations in the nova DB will have a corresponding PCI allocation in"},{"line_number":399,"context_line":"placement. This allocation healing also acts on any new instances regardless of"},{"line_number":400,"context_line":"the status of the scheduling part of this feature to make sure that the nova"}],"source_content_type":"text/x-rst","patch_set":7,"id":"019834e5_1b8f7f88","line":397,"range":{"start_line":397,"start_character":41,"end_line":397,"end_character":48},"in_reply_to":"da912def_ada70edd","updated":"2022-09-02 12:25:25.000000000","message":"Done in a FUP","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"deb56ba64a5977708aa4b53df5b11acb8b543aa5","unresolved":true,"context_lines":[{"line_number":400,"context_line":"the status of the scheduling part of this feature to make sure that the nova"},{"line_number":401,"context_line":"DB and placement are in sync. There is one limitation of the healing logic."},{"line_number":402,"context_line":"It assumes that there is no in-progress migration when the nova-compute service"},{"line_number":403,"context_line":"is upgraded. If there is an in-progress migration, then the PCI allocation on"},{"line_number":404,"context_line":"the source host of the migration will not be healed. The placement view will be"},{"line_number":405,"context_line":"consistent after such migration is completed or reverted."},{"line_number":406,"context_line":""}],"source_content_type":"text/x-rst","patch_set":7,"id":"ed0a2892_638893cf","line":403,"range":{"start_line":403,"start_character":49,"end_line":403,"end_character":50},"updated":"2022-08-31 10:28:19.000000000","message":"nit: drop","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"ee8bf9286c683e4c736cf41818796f189e581a0e","unresolved":false,"context_lines":[{"line_number":400,"context_line":"the status of the scheduling part of this feature to make sure that the nova"},{"line_number":401,"context_line":"DB and placement are in sync. There is one limitation of the healing logic."},{"line_number":402,"context_line":"It assumes that there is no in-progress migration when the nova-compute service"},{"line_number":403,"context_line":"is upgraded. If there is an in-progress migration, then the PCI allocation on"},{"line_number":404,"context_line":"the source host of the migration will not be healed. The placement view will be"},{"line_number":405,"context_line":"consistent after such migration is completed or reverted."},{"line_number":406,"context_line":""}],"source_content_type":"text/x-rst","patch_set":7,"id":"f612fcc4_081accb2","line":403,"range":{"start_line":403,"start_character":49,"end_line":403,"end_character":50},"in_reply_to":"ed0a2892_638893cf","updated":"2022-09-02 12:25:25.000000000","message":"Done in a FUP","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"}],"nova/tests/functional/libvirt/test_pci_in_placement.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"302e33eb76d721da9ebe32486ce3b1a3258b24b8","unresolved":true,"context_lines":[{"line_number":794,"context_line":"    @staticmethod"},{"line_number":795,"context_line":"    def _move_allocation(allocations, from_uuid, to_uuid):"},{"line_number":796,"context_line":"        allocations[to_uuid] \u003d allocations[from_uuid]"},{"line_number":797,"context_line":"        del allocations[from_uuid]"},{"line_number":798,"context_line":""},{"line_number":799,"context_line":"    def _move_server_allocation(self, allocations, server_uuid, revert\u003dFalse):"},{"line_number":800,"context_line":"        migration_uuid \u003d self.get_migration_uuid_for_instance(server_uuid)"}],"source_content_type":"text/x-python","patch_set":7,"id":"86c26ad4_689fd784","line":797,"updated":"2022-08-29 19:40:31.000000000","message":"ok this should work.\nmodifying two keys in the same dict in as single statement then delteign is fine as long as you are not iterating over it at the same time","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"2cda909c5433c9725d824daad0521e04d9e6c090","unresolved":false,"context_lines":[{"line_number":794,"context_line":"    @staticmethod"},{"line_number":795,"context_line":"    def _move_allocation(allocations, from_uuid, to_uuid):"},{"line_number":796,"context_line":"        allocations[to_uuid] \u003d allocations[from_uuid]"},{"line_number":797,"context_line":"        del allocations[from_uuid]"},{"line_number":798,"context_line":""},{"line_number":799,"context_line":"    def _move_server_allocation(self, allocations, server_uuid, revert\u003dFalse):"},{"line_number":800,"context_line":"        migration_uuid \u003d self.get_migration_uuid_for_instance(server_uuid)"}],"source_content_type":"text/x-python","patch_set":7,"id":"74a53590_a0807d24","line":797,"in_reply_to":"86c26ad4_689fd784","updated":"2022-08-30 11:21:00.000000000","message":"Ack","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"302e33eb76d721da9ebe32486ce3b1a3258b24b8","unresolved":false,"context_lines":[{"line_number":1020,"context_line":"        extra_spec \u003d {\"pci_passthrough:alias\": \"a-vf:1\"}"},{"line_number":1021,"context_line":"        flavor_id \u003d self._create_flavor(extra_spec\u003dextra_spec)"},{"line_number":1022,"context_line":"        server_vf \u003d self._create_server(flavor_id\u003dflavor_id, networks\u003d[])"},{"line_number":1023,"context_line":"        self.assertPCIDeviceCounts(\"compute1\", total\u003d7, free\u003d6)"},{"line_number":1024,"context_line":"        # As scheduling does not support PCI in placement yet no allocation"},{"line_number":1025,"context_line":"        # is created for the PCI consumption by the scheduler. BUT the resource"},{"line_number":1026,"context_line":"        # tracker in the compute will heal the missing PCI allocation"}],"source_content_type":"text/x-python","patch_set":7,"id":"347d0665_e67fa06e","line":1023,"updated":"2022-08-29 19:40:31.000000000","message":"ack so this goes form 7 to 6 since the parent is not allowed by the dev spec\nwhich is also not allowed because the palcment reporting is enabeld\n\nthis is more a note to myself as i normally expect this to decresase by 2 the first time","commit_id":"98e9989cad0708c9254ec612a9eca249690b1b80"}]}
