)]}'
{"nova/scheduler/manager.py":[{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"6f3d73fdca7b86b941adce1db0814ebcf8dc2f64","unresolved":false,"context_lines":[{"line_number":183,"context_line":"                    if retry:"},{"line_number":184,"context_line":"                        retry[\u0027hosts\u0027].append([host_state.host, node])"},{"line_number":185,"context_line":"                    filter_properties[\u0027limits\u0027] \u003d host_state.limits"},{"line_number":186,"context_line":"                    host \u003d host_state.host"},{"line_number":187,"context_line":"                else:"},{"line_number":188,"context_line":"                    host \u003d target_host"},{"line_number":189,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"AAAAPn%2F%2Fs0E%3D","line":186,"updated":"2013-06-24 23:16:10.000000000","message":"I feel like this is potentially incorrectly hoisting internal code from the filter scheduler into the manager to make the refactor easier.\n\nIs there any way this could/should be wrapped into select_hosts() or into another simpler method that you add to the scheduler driver that lets the filter scheduler do its needed retry work?","commit_id":"d157c8aee5dace2f678342447775b7ce93889f50"},{"author":{"_account_id":5441,"name":"Andrew Laski","email":"andrew@lascii.com","username":"alaski"},"change_message_id":"c2d9b1d7b9036c3b6e09c5cb842b8058ff360f47","unresolved":false,"context_lines":[{"line_number":183,"context_line":"                    if retry:"},{"line_number":184,"context_line":"                        retry[\u0027hosts\u0027].append([host_state.host, node])"},{"line_number":185,"context_line":"                    filter_properties[\u0027limits\u0027] \u003d host_state.limits"},{"line_number":186,"context_line":"                    host \u003d host_state.host"},{"line_number":187,"context_line":"                else:"},{"line_number":188,"context_line":"                    host \u003d target_host"},{"line_number":189,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"AAAAPn%2F%2Fp7I%3D","line":186,"in_reply_to":"AAAAPn%2F%2Fs0E%3D","updated":"2013-06-25 21:12:45.000000000","message":"I\u0027m not entirely sure what the right direction is for the \u0027retry\u0027 attribute of filter_properties, but somehow it needs to be updated after every call to select_hosts.  \n\nOne possibility would be to have select_hosts update filter_properties transparently in scheduler/rpcapi.  I tend to avoid updating args without an explicit return but it would work without the logic escaping the scheduler which is nice.  It is somewhat complicated in this case by not calling select_hosts through the rpcapi, which you will once this moves to conductor.  What I mean is something like \n\n    def select_hosts(..., filter_properties):\n        hosts \u003d self.call(...\u0027select_hosts\u0027...)\n        filter_properties[\u0027retry\u0027] \u003d \u003cupdated retry\u003e\n        return hosts\n\nAnother option I was looking at was adding some methods to scheduler.utils which could update filter_properties by passing in filter_properties and the target host.  It would return an updated filter_properties.  This moves retry under the control of the conductor, which really may be the best place for it.  But I would be interested in second opinions on this before moving that direction.\n\n\nAlso, I\u0027m not sure exactly what \u0027limits\u0027 is used for yet.  That needs similar consideration I think.","commit_id":"d157c8aee5dace2f678342447775b7ce93889f50"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"429410ce2fe21b45ab75e66b02209954c4ed9c51","unresolved":false,"context_lines":[{"line_number":183,"context_line":"                filter_properties.pop(\u0027context\u0027, None)"},{"line_number":184,"context_line":""},{"line_number":185,"context_line":"                (host, node) \u003d (host_state.host, host_state.nodename)"},{"line_number":186,"context_line":"                self.compute_rpcapi.prep_resize("},{"line_number":187,"context_line":"                    context, image, instance, instance_type, host,"},{"line_number":188,"context_line":"                    reservations, request_spec\u003drequest_spec,"},{"line_number":189,"context_line":"                    filter_properties\u003dfilter_properties, node\u003dnode)"}],"source_content_type":"text/x-python","patch_set":12,"id":"AAAAP3%2F%2FujQ%3D","line":186,"updated":"2013-07-12 10:48:36.000000000","message":"I guess we move this into the conductor eventually, but I like this small step.","commit_id":"b77438fef71881f44a5638064a80533d76453b4f"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"d9d5c414d8125a99ff48416788120b22127e9bb4","unresolved":false,"context_lines":[{"line_number":191,"context_line":"                    filter_properties\u003dfilter_properties, node\u003dnode)"},{"line_number":192,"context_line":""},{"line_number":193,"context_line":"            except exception.NoValidHost as ex:"},{"line_number":194,"context_line":"                self._set_vm_state_and_notify(\u0027prep_resize\u0027,"},{"line_number":195,"context_line":"                                             {\u0027vm_state\u0027: vm_states.ACTIVE,"},{"line_number":196,"context_line":"                                              \u0027task_state\u0027: None},"},{"line_number":197,"context_line":"                                             context, ex, request_spec)"}],"source_content_type":"text/x-python","patch_set":21,"id":"AAAAQn%2F%2F56I%3D","line":194,"updated":"2013-07-19 14:00:16.000000000","message":"Not sure we have coverage for these error cases, might be good to check that, but granted, you have tested the code you have changed.","commit_id":"9090c069a1a8e50df3200954c2aca50de1408b1b"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"046e5b9be7b49eea399a54117bf7ae9106b6c8eb","unresolved":false,"context_lines":[{"line_number":191,"context_line":"                    filter_properties\u003dfilter_properties, node\u003dnode)"},{"line_number":192,"context_line":""},{"line_number":193,"context_line":"            except exception.NoValidHost as ex:"},{"line_number":194,"context_line":"                self._set_vm_state_and_notify(\u0027prep_resize\u0027,"},{"line_number":195,"context_line":"                                             {\u0027vm_state\u0027: vm_states.ACTIVE,"},{"line_number":196,"context_line":"                                              \u0027task_state\u0027: None},"},{"line_number":197,"context_line":"                                             context, ex, request_spec)"}],"source_content_type":"text/x-python","patch_set":21,"id":"AAAAQn%2F%2F53c%3D","line":194,"in_reply_to":"AAAAQn%2F%2F56I%3D","updated":"2013-07-19 14:11:38.000000000","message":"My bad, I spotted its already covered, I think.","commit_id":"9090c069a1a8e50df3200954c2aca50de1408b1b"}]}
