)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"c0a14fb9830961a622b29563567da16db5d92084","unresolved":false,"context_lines":[{"line_number":25,"context_line":"duplication with the scheduler is to have conductor call the"},{"line_number":26,"context_line":"scheduler even when force\u003dTrue but pass a flag to the scheduler"},{"line_number":27,"context_line":"so it skips the filters but still makes the claim on the destination"},{"line_number":28,"context_line":"node."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Finally, some comments are left in the live_migrate method in the"},{"line_number":31,"context_line":"compute API code since this is all tightly-coupled between the"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":3,"id":"7f287b81_6fcfa27f","line":28,"updated":"2017-08-23 13:28:02.000000000","message":"le sigh. forced\u003dcrazypants.","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"}],"nova/conductor/tasks/live_migrate.py":[{"author":{"_account_id":8768,"name":"Chris Friesen","email":"chris.friesen@windriver.com","username":"cbf123"},"change_message_id":"9b0766599bb9470a7e268ff62791e1e7e2e83e40","unresolved":false,"context_lines":[{"line_number":91,"context_line":"    def _check_requested_destination(self):"},{"line_number":92,"context_line":"        \"\"\"Performs pre-live migration checks on the forced destination host\"\"\""},{"line_number":93,"context_line":"        self._check_destination_is_not_source()"},{"line_number":94,"context_line":"        # TODO(mriedem): Remove when calling the scheduler since ComputeFilter"},{"line_number":95,"context_line":"        # takes care of this check for us."},{"line_number":96,"context_line":"        self._check_host_is_up(self.destination)"},{"line_number":97,"context_line":"        # TODO(mriedem): Remove when calling the scheduler since Placement"},{"line_number":98,"context_line":"        # and/or the RamFilter takes care of this check for us."}],"source_content_type":"text/x-python","patch_set":1,"id":"7f287b81_ff2000b3","line":95,"range":{"start_line":94,"start_character":0,"end_line":95,"end_character":42},"updated":"2017-08-22 16:27:51.000000000","message":"not true if we\u0027re going to do the thing to tell the scheduler to skip the filters","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":8768,"name":"Chris Friesen","email":"chris.friesen@windriver.com","username":"cbf123"},"change_message_id":"9b0766599bb9470a7e268ff62791e1e7e2e83e40","unresolved":false,"context_lines":[{"line_number":94,"context_line":"        # TODO(mriedem): Remove when calling the scheduler since ComputeFilter"},{"line_number":95,"context_line":"        # takes care of this check for us."},{"line_number":96,"context_line":"        self._check_host_is_up(self.destination)"},{"line_number":97,"context_line":"        # TODO(mriedem): Remove when calling the scheduler since Placement"},{"line_number":98,"context_line":"        # and/or the RamFilter takes care of this check for us."},{"line_number":99,"context_line":"        self._check_destination_has_enough_memory()"},{"line_number":100,"context_line":""},{"line_number":101,"context_line":"        # Run the forced host through the scheduler\u0027s select_destinations"}],"source_content_type":"text/x-python","patch_set":1,"id":"7f287b81_5f139467","line":98,"range":{"start_line":97,"start_character":0,"end_line":98,"end_character":63},"updated":"2017-08-22 16:27:51.000000000","message":"not true if we\u0027re going to tell the scheduler to skip the filters...but it\u0027s redundant since we\u0027ll be claiming resources on the destination.","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"3aa608fe7e4597f4f02d70614a2064a08bf174e1","unresolved":false,"context_lines":[{"line_number":102,"context_line":"        # routine which will do a few things for us:"},{"line_number":103,"context_line":"        # 1. Make sure the forced host is in the same cell as the instance."},{"line_number":104,"context_line":"        # 2. Create an allocation in Placement for the target host."},{"line_number":105,"context_line":"        # 3. Make sure the forced host has vcpu/ram/disk capacity in the"},{"line_number":106,"context_line":"        #    Placement service."},{"line_number":107,"context_line":"        # 4. Run configured filters on the host to make sure it\u0027s otherwise"},{"line_number":108,"context_line":"        #    OK to take the instance."},{"line_number":109,"context_line":""},{"line_number":110,"context_line":"        # Get the instance mapping to get the cell mapping"},{"line_number":111,"context_line":"        # TODO(mriedem): Will InstanceMappingNotFound be handled?"}],"source_content_type":"text/x-python","patch_set":1,"id":"9f436f4f_87303f09","line":108,"range":{"start_line":105,"start_character":8,"end_line":108,"end_character":37},"updated":"2017-08-22 01:02:26.000000000","message":"These break the contract on the force flag in the API. The latter could be solved with a skip_filters flag to select_destinations, but given we call placement for the allocation candidates long before we get to the filters, that\u0027s going to be a problem.","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"3b6f369655160b0351eea600ea3982dd5d0c6235","unresolved":false,"context_lines":[{"line_number":105,"context_line":"        # 3. Make sure the forced host has vcpu/ram/disk capacity in the"},{"line_number":106,"context_line":"        #    Placement service."},{"line_number":107,"context_line":"        # 4. Run configured filters on the host to make sure it\u0027s otherwise"},{"line_number":108,"context_line":"        #    OK to take the instance."},{"line_number":109,"context_line":""},{"line_number":110,"context_line":"        # Get the instance mapping to get the cell mapping"},{"line_number":111,"context_line":"        # TODO(mriedem): Will InstanceMappingNotFound be handled?"}],"source_content_type":"text/x-python","patch_set":1,"id":"7f287b81_2b888e1d","line":108,"in_reply_to":"7f287b81_a84487b8","updated":"2017-08-22 14:37:17.000000000","message":"Correct. Similar with the force_hosts/force_nodes stuff in the request spec, those are set when you\u0027re telling the scheduler where something should go. The \"force\" flag really means \"skip the filters\" and it was just always doing that by completely bypassing the scheduler altogether.","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"3d79f9725c4d672ccfb33e6b5b39bc000118a8e1","unresolved":false,"context_lines":[{"line_number":105,"context_line":"        # 3. Make sure the forced host has vcpu/ram/disk capacity in the"},{"line_number":106,"context_line":"        #    Placement service."},{"line_number":107,"context_line":"        # 4. Run configured filters on the host to make sure it\u0027s otherwise"},{"line_number":108,"context_line":"        #    OK to take the instance."},{"line_number":109,"context_line":""},{"line_number":110,"context_line":"        # Get the instance mapping to get the cell mapping"},{"line_number":111,"context_line":"        # TODO(mriedem): Will InstanceMappingNotFound be handled?"}],"source_content_type":"text/x-python","patch_set":1,"id":"7f287b81_a84487b8","line":108,"in_reply_to":"9f436f4f_87303f09","updated":"2017-08-22 12:27:05.000000000","message":"I guess the presence of \u0027requested_destination\u0027 is insufficient to be that flag, because it will be there when a destination is requested, but not forced?","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"3aa608fe7e4597f4f02d70614a2064a08bf174e1","unresolved":false,"context_lines":[{"line_number":107,"context_line":"        # 4. Run configured filters on the host to make sure it\u0027s otherwise"},{"line_number":108,"context_line":"        #    OK to take the instance."},{"line_number":109,"context_line":""},{"line_number":110,"context_line":"        # Get the instance mapping to get the cell mapping"},{"line_number":111,"context_line":"        # TODO(mriedem): Will InstanceMappingNotFound be handled?"},{"line_number":112,"context_line":"        instance_mapping \u003d objects.InstanceMapping.get_by_instance_uuid("},{"line_number":113,"context_line":"            self.context, self.instance.uuid)"},{"line_number":114,"context_line":"        LOG.debug(\u0027Requesting cell %(cell)s while migrating\u0027,"},{"line_number":115,"context_line":"                  {\u0027cell\u0027: instance_mapping.cell_mapping.identity},"},{"line_number":116,"context_line":"                  instance\u003dself.instance)"},{"line_number":117,"context_line":"        # Get the compute node to get the host/node; note the context is"},{"line_number":118,"context_line":"        # already targeted from the API"},{"line_number":119,"context_line":"        # TODO(mriedem): Will ComputeHostNotFound be handled?"}],"source_content_type":"text/x-python","patch_set":1,"id":"9f436f4f_473a57e7","line":116,"range":{"start_line":110,"start_character":8,"end_line":116,"end_character":41},"updated":"2017-08-22 01:02:26.000000000","message":"Should probably leave this for a separate fix for bug 1712210.","commit_id":"4c58bf182b488548046ecd2fffcc0da7432d12b7"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"447b75a18f53c2ff5c03d67efcd14ddadca79d5a","unresolved":false,"context_lines":[{"line_number":120,"context_line":"        return source_node, dest_node"},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"    def _claim_resources_on_destination(self, source_node, dest_node):"},{"line_number":123,"context_line":"        \"\"\"Copies allocations from source node to dest node in Placement"},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"        :param source_node: source ComputeNode where the instance currently"},{"line_number":126,"context_line":"                            lives"}],"source_content_type":"text/x-python","patch_set":2,"id":"7f287b81_23e427ac","line":123,"range":{"start_line":123,"start_character":11,"end_line":123,"end_character":17},"updated":"2017-08-22 23:11:19.000000000","message":"Well, this is confusing. I had several comments drawn up about how this is actually moving them, not copying and duplicating. Then I saw your test, and then remembered that the claim_resources() thing has that doubling behavior if it decides that it\u0027s a move.","commit_id":"e098860a256885dc83ec72cd190617ab4c865045"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"447b75a18f53c2ff5c03d67efcd14ddadca79d5a","unresolved":false,"context_lines":[{"line_number":140,"context_line":"                            \u0027uuid\u0027: dest_node.uuid"},{"line_number":141,"context_line":"                        },"},{"line_number":142,"context_line":"                        \u0027resources\u0027: source_node_allocations"},{"line_number":143,"context_line":"                    }"},{"line_number":144,"context_line":"                ]"},{"line_number":145,"context_line":"            }"},{"line_number":146,"context_line":"            if reportclient.claim_resources("}],"source_content_type":"text/x-python","patch_set":2,"id":"7f287b81_a3e93781","line":143,"updated":"2017-08-22 23:11:19.000000000","message":"I think maybe a comment above this or above the call to claim_resources(), reminding the reader that claim_resources() will double the allocation, effectively adding in the above to what is already there if an allocation exists is warranted.","commit_id":"e098860a256885dc83ec72cd190617ab4c865045"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"9bdb01c7ea724c591b902836943629b0c5ae9860","unresolved":false,"context_lines":[{"line_number":140,"context_line":"                            \u0027uuid\u0027: dest_node.uuid"},{"line_number":141,"context_line":"                        },"},{"line_number":142,"context_line":"                        \u0027resources\u0027: source_node_allocations"},{"line_number":143,"context_line":"                    }"},{"line_number":144,"context_line":"                ]"},{"line_number":145,"context_line":"            }"},{"line_number":146,"context_line":"            if reportclient.claim_resources("}],"source_content_type":"text/x-python","patch_set":2,"id":"7f287b81_6376afdb","line":143,"in_reply_to":"7f287b81_a3e93781","updated":"2017-08-22 23:14:57.000000000","message":"Will do.","commit_id":"e098860a256885dc83ec72cd190617ab4c865045"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"c0a14fb9830961a622b29563567da16db5d92084","unresolved":false,"context_lines":[{"line_number":170,"context_line":"                            \u0027the host for the instance.\u0027) %"},{"line_number":171,"context_line":"                          {\u0027instance_uuid\u0027: self.instance.uuid,"},{"line_number":172,"context_line":"                           \u0027host\u0027: self.destination})"},{"line_number":173,"context_line":"                raise exception.MigrationPreCheckError(reason\u003dreason)"},{"line_number":174,"context_line":"        else:"},{"line_number":175,"context_line":"            # This shouldn\u0027t happen, but it could be a case where there are"},{"line_number":176,"context_line":"            # older (Ocata) computes still so the existing allocations are"}],"source_content_type":"text/x-python","patch_set":3,"id":"7f287b81_6fb1e28f","line":173,"updated":"2017-08-23 13:28:02.000000000","message":"I\u0027m good with this, since it\u0027s reasonable and sensible. That said, this *does* change the behaviour of Nova for the force host case and this patch is intended for backporting, right?","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"908957ffe706ed63ea40afdd42710583db1f8f3a","unresolved":false,"context_lines":[{"line_number":170,"context_line":"                            \u0027the host for the instance.\u0027) %"},{"line_number":171,"context_line":"                          {\u0027instance_uuid\u0027: self.instance.uuid,"},{"line_number":172,"context_line":"                           \u0027host\u0027: self.destination})"},{"line_number":173,"context_line":"                raise exception.MigrationPreCheckError(reason\u003dreason)"},{"line_number":174,"context_line":"        else:"},{"line_number":175,"context_line":"            # This shouldn\u0027t happen, but it could be a case where there are"},{"line_number":176,"context_line":"            # older (Ocata) computes still so the existing allocations are"}],"source_content_type":"text/x-python","patch_set":3,"id":"7f287b81_52cdad5b","line":173,"in_reply_to":"7f287b81_6fb1e28f","updated":"2017-08-23 13:45:54.000000000","message":"Yes. Since we don\u0027t do claims on the compute during live migration, we wouldn\u0027t have failed with a claim going over on the compute, but as the comment says, we need to be bound to the placement data being accurate for other parts of the system to work correctly, and the force flag in the API predates the scheduler using placement. So meh.","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"c0a14fb9830961a622b29563567da16db5d92084","unresolved":false,"context_lines":[{"line_number":183,"context_line":"                        \u0027for destination node %(dest)s and assuming the \u0027"},{"line_number":184,"context_line":"                        \u0027compute service will heal the allocations.\u0027,"},{"line_number":185,"context_line":"                        {\u0027source\u0027: source_node.uuid, \u0027dest\u0027: dest_node.uuid},"},{"line_number":186,"context_line":"                        instance\u003dself.instance)"},{"line_number":187,"context_line":""},{"line_number":188,"context_line":"    def _check_destination_is_not_source(self):"},{"line_number":189,"context_line":"        if self.destination \u003d\u003d self.source:"}],"source_content_type":"text/x-python","patch_set":3,"id":"7f287b81_2fabda1d","line":186,"updated":"2017-08-23 13:28:02.000000000","message":"++","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"}],"nova/tests/functional/notification_sample_tests/test_instance.py":[{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"c0a14fb9830961a622b29563567da16db5d92084","unresolved":false,"context_lines":[{"line_number":43,"context_line":"        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)"},{"line_number":44,"context_line":"        # server will boot on host1"},{"line_number":45,"context_line":"        fake.set_nodes([\u0027host2\u0027])"},{"line_number":46,"context_line":"        self.addCleanup(fake.restore_nodes)"},{"line_number":47,"context_line":"        self.useFixture(fixtures.ConfPatcher(host\u003d\u0027host2\u0027))"},{"line_number":48,"context_line":"        self.compute2 \u003d self.start_service(\u0027compute\u0027, host\u003d\u0027host2\u0027)"},{"line_number":49,"context_line":""}],"source_content_type":"text/x-python","patch_set":3,"id":"7f287b81_af3a2ae4","line":46,"updated":"2017-08-23 13:28:02.000000000","message":"this is a bit curious. could you elaborate on why this change was necessary? was it necessary because without it, no destination host information was gettable?","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"4472bb8a305649d3d4cf836749684046db78df12","unresolved":false,"context_lines":[{"line_number":43,"context_line":"        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)"},{"line_number":44,"context_line":"        # server will boot on host1"},{"line_number":45,"context_line":"        fake.set_nodes([\u0027host2\u0027])"},{"line_number":46,"context_line":"        self.addCleanup(fake.restore_nodes)"},{"line_number":47,"context_line":"        self.useFixture(fixtures.ConfPatcher(host\u003d\u0027host2\u0027))"},{"line_number":48,"context_line":"        self.compute2 \u003d self.start_service(\u0027compute\u0027, host\u003d\u0027host2\u0027)"},{"line_number":49,"context_line":""}],"source_content_type":"text/x-python","patch_set":3,"id":"7f287b81_12f04590","line":46,"in_reply_to":"7f287b81_af3a2ae4","updated":"2017-08-23 13:46:52.000000000","message":"No, it was because when compute2 starts up, it\u0027s not using a unique node so when we do registrations for the compute node resource provider in placement things don\u0027t work as we need them to.","commit_id":"5d3a11b9c9a6a5aecd46ad7ecc635215184d930e"}]}
