)]}'
{"nova/tests/functional/test_cross_cell_migrate.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"6a1ad4a7167eb3571f4219a696ab9bf4bae18701","unresolved":false,"context_lines":[{"line_number":488,"context_line":"                self.admin_api, server, \u0027ERROR\u0027)"},{"line_number":489,"context_line":""},{"line_number":490,"context_line":"        # Assert a fault was recorded."},{"line_number":491,"context_line":"        self.assertIn(\u0027fault\u0027, server)"},{"line_number":492,"context_line":"        self.assertIn(\u0027Connection to the hypervisor is broken\u0027,"},{"line_number":493,"context_line":"                      server[\u0027fault\u0027][\u0027message\u0027])"},{"line_number":494,"context_line":"        # The migration should be in \u0027error\u0027 status."}],"source_content_type":"text/x-python","patch_set":24,"id":"7faddb67_1a82edc9","line":491,"updated":"2019-07-03 20:55:48.000000000","message":"This is racy - the fault is recorded after the server goes to ERROR status. The migration status is changed to error after the fault is recorded so we should wait for the migration status change first.","commit_id":"16a728c1e68acecf9b523178fd53356ce05cc918"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"5043868c6d3f5d8e2d23cb6c36cf48a8e9f3b11d","unresolved":false,"context_lines":[{"line_number":488,"context_line":"                self.admin_api, server, \u0027ERROR\u0027)"},{"line_number":489,"context_line":""},{"line_number":490,"context_line":"        # Assert a fault was recorded."},{"line_number":491,"context_line":"        self.assertIn(\u0027fault\u0027, server)"},{"line_number":492,"context_line":"        self.assertIn(\u0027Connection to the hypervisor is broken\u0027,"},{"line_number":493,"context_line":"                      server[\u0027fault\u0027][\u0027message\u0027])"},{"line_number":494,"context_line":"        # The migration should be in \u0027error\u0027 status."}],"source_content_type":"text/x-python","patch_set":24,"id":"7faddb67_1a9e6d07","line":491,"in_reply_to":"7faddb67_1a82edc9","updated":"2019-07-03 21:00:21.000000000","message":"Done","commit_id":"16a728c1e68acecf9b523178fd53356ce05cc918"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"6fcf4b44b2c3406a01bc8787470c846a53e9dda2","unresolved":false,"context_lines":[{"line_number":488,"context_line":"                self.admin_api, server, \u0027ERROR\u0027)"},{"line_number":489,"context_line":""},{"line_number":490,"context_line":"        # Assert a fault was recorded."},{"line_number":491,"context_line":"        self.assertIn(\u0027fault\u0027, server)"},{"line_number":492,"context_line":"        self.assertIn(\u0027Connection to the hypervisor is broken\u0027,"},{"line_number":493,"context_line":"                      server[\u0027fault\u0027][\u0027message\u0027])"},{"line_number":494,"context_line":"        # The migration should be in \u0027error\u0027 status."}],"source_content_type":"text/x-python","patch_set":24,"id":"7faddb67_d5f746d4","line":491,"in_reply_to":"7faddb67_1a9e6d07","updated":"2019-07-03 21:35:31.000000000","message":"Actually even if we wait for the migration status to change, we could have a stale copy of the server here in the test. _wait_for_state_change should wait for the ERROR status and the task_state to be None.","commit_id":"16a728c1e68acecf9b523178fd53356ce05cc918"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"12186795de1ee679fb1e4244b7e15f3a707c813f","unresolved":false,"context_lines":[{"line_number":505,"context_line":"        # the one in the target cell was deleted."},{"line_number":506,"context_line":"        self.assertEqual(1, self._count_volume_attachments(server[\u0027id\u0027]),"},{"line_number":507,"context_line":"                         self.cinder.volume_to_attachment)"},{"line_number":508,"context_line":""},{"line_number":509,"context_line":"        # Now hard reboot the server in the source cell and it should go back"},{"line_number":510,"context_line":"        # to ACTIVE."},{"line_number":511,"context_line":"        self.api.post_server_action(server[\u0027id\u0027], {\u0027reboot\u0027: {\u0027type\u0027: \u0027HARD\u0027}})"}],"source_content_type":"text/x-python","patch_set":24,"id":"9fb8cfa7_10fbbc78","line":508,"updated":"2019-07-01 17:52:44.000000000","message":"Per the discussion here https://review.opendev.org/#/c/634832/36/nova/compute/manager.py@4599 we should probably assert the allocations were cleaned up as expected.","commit_id":"16a728c1e68acecf9b523178fd53356ce05cc918"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"5043868c6d3f5d8e2d23cb6c36cf48a8e9f3b11d","unresolved":false,"context_lines":[{"line_number":505,"context_line":"        # the one in the target cell was deleted."},{"line_number":506,"context_line":"        self.assertEqual(1, self._count_volume_attachments(server[\u0027id\u0027]),"},{"line_number":507,"context_line":"                         self.cinder.volume_to_attachment)"},{"line_number":508,"context_line":""},{"line_number":509,"context_line":"        # Now hard reboot the server in the source cell and it should go back"},{"line_number":510,"context_line":"        # to ACTIVE."},{"line_number":511,"context_line":"        self.api.post_server_action(server[\u0027id\u0027], {\u0027reboot\u0027: {\u0027type\u0027: \u0027HARD\u0027}})"}],"source_content_type":"text/x-python","patch_set":24,"id":"7faddb67_3aa32952","line":508,"in_reply_to":"9fb8cfa7_10fbbc78","updated":"2019-07-03 21:00:21.000000000","message":"Done","commit_id":"16a728c1e68acecf9b523178fd53356ce05cc918"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"193d4520b8926739a2e105fd836206c7e93fea00","unresolved":false,"context_lines":[{"line_number":508,"context_line":"        # Assert that migration-based allocations were properly reverted."},{"line_number":509,"context_line":"        mig_uuid \u003d self.get_migration_uuid_for_instance(server[\u0027id\u0027])"},{"line_number":510,"context_line":"        mig_allocs \u003d self._get_allocations_by_server_uuid(mig_uuid)"},{"line_number":511,"context_line":"        self.assertEqual({}, mig_allocs)"},{"line_number":512,"context_line":"        source_rp_uuid \u003d self._get_provider_uuid_by_host("},{"line_number":513,"context_line":"            server[\u0027OS-EXT-SRV-ATTR:host\u0027])"},{"line_number":514,"context_line":"        server_allocs \u003d self._get_allocations_by_server_uuid(server[\u0027id\u0027])"}],"source_content_type":"text/x-python","patch_set":25,"id":"7faddb67_e47152e2","line":511,"updated":"2019-07-05 18:49:30.000000000","message":"Something is racing here:\n\nhttp://logs.openstack.org/51/643451/25/check/nova-tox-functional/e8b697a/testr_results.html.gz","commit_id":"d1373d66f408e9cb337b781fcd296f1992fb2ca3"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"26fdfd13618a65636e52ba745d52d74dfc2e66eb","unresolved":false,"context_lines":[{"line_number":508,"context_line":"        # Assert that migration-based allocations were properly reverted."},{"line_number":509,"context_line":"        mig_uuid \u003d self.get_migration_uuid_for_instance(server[\u0027id\u0027])"},{"line_number":510,"context_line":"        mig_allocs \u003d self._get_allocations_by_server_uuid(mig_uuid)"},{"line_number":511,"context_line":"        self.assertEqual({}, mig_allocs)"},{"line_number":512,"context_line":"        source_rp_uuid \u003d self._get_provider_uuid_by_host("},{"line_number":513,"context_line":"            server[\u0027OS-EXT-SRV-ATTR:host\u0027])"},{"line_number":514,"context_line":"        server_allocs \u003d self._get_allocations_by_server_uuid(server[\u0027id\u0027])"}],"source_content_type":"text/x-python","patch_set":25,"id":"7faddb67_f4ed8125","line":511,"in_reply_to":"7faddb67_e47152e2","updated":"2019-07-05 21:43:19.000000000","message":"The problem is finish_snapshot_based_resize_at_dest no longer reverts the allocation before raising back to conductor, and we\u0027re not waiting long enough to hit the MigrationTask.rollback code which will revert the allocation. So we need to wait longer, which likely means waiting for the notification that conductor sends here:\n\nhttps://github.com/openstack/nova/blob/713288286f3bc3d6db53d51d0450d37514d6a86e/nova/conductor/manager.py#L361","commit_id":"d1373d66f408e9cb337b781fcd296f1992fb2ca3"}]}
