)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"58a0c2464c05371bb828d9f0e51a0becd4f02efe","unresolved":false,"context_lines":[{"line_number":22,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":23,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":24,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":25,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":26,"context_line":"support from placement which is not available yet and 2) information"},{"line_number":27,"context_line":"about which PCI PF an SRIOV port is allocated from its VF and which RP"},{"line_number":28,"context_line":"represents that PCI device in placement. This information is only"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":9,"id":"5fc1f717_0ec11e55","line":25,"range":{"start_line":25,"start_character":43,"end_line":25,"end_character":50},"updated":"2019-03-07 08:28:31.000000000","message":"Is now (assuming you\u0027re planning to finish this in Train at this point?)","commit_id":"5c5c02bee98e7044c221fcd5f89f70bb254bdaa3"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"44583cb8be04238cff594767471242904ee75841","unresolved":false,"context_lines":[{"line_number":22,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":23,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":24,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":25,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":26,"context_line":"support from placement which is not available yet and 2) information"},{"line_number":27,"context_line":"about which PCI PF an SRIOV port is allocated from its VF and which RP"},{"line_number":28,"context_line":"represents that PCI device in placement. This information is only"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":9,"id":"5fc1f717_d329c889","line":25,"range":{"start_line":25,"start_character":43,"end_line":25,"end_character":50},"in_reply_to":"5fc1f717_0ec11e55","updated":"2019-03-28 10:48:35.000000000","message":"We agreed with Matt that we would like to backport this solution to Stein if possible. So I\u0027d like to keep it backportable.","commit_id":"5c5c02bee98e7044c221fcd5f89f70bb254bdaa3"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":7,"context_line":"nova-manage: heal port allocations"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports had resource_request (e.g. a port with"},{"line_number":11,"context_line":"QoS minimum bandwidth policy rule) without allocated the requested"},{"line_number":12,"context_line":"resources in placement. So there could be servers which allocation needs"},{"line_number":13,"context_line":"to be healed in placement."}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_d4cd425d","line":10,"range":{"start_line":10,"start_character":34,"end_line":10,"end_character":37},"updated":"2019-06-27 21:36:12.000000000","message":"which had","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":7,"context_line":"nova-manage: heal port allocations"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports had resource_request (e.g. a port with"},{"line_number":11,"context_line":"QoS minimum bandwidth policy rule) without allocated the requested"},{"line_number":12,"context_line":"resources in placement. So there could be servers which allocation needs"},{"line_number":13,"context_line":"to be healed in placement."}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_d303f30b","line":10,"range":{"start_line":10,"start_character":34,"end_line":10,"end_character":37},"in_reply_to":"9fb8cfa7_d4cd425d","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports had resource_request (e.g. a port with"},{"line_number":11,"context_line":"QoS minimum bandwidth policy rule) without allocated the requested"},{"line_number":12,"context_line":"resources in placement. So there could be servers which allocation needs"},{"line_number":13,"context_line":"to be healed in placement."},{"line_number":14,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_94d34a01","line":11,"range":{"start_line":11,"start_character":43,"end_line":11,"end_character":52},"updated":"2019-06-27 21:36:12.000000000","message":"allocating","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports had resource_request (e.g. a port with"},{"line_number":11,"context_line":"QoS minimum bandwidth policy rule) without allocated the requested"},{"line_number":12,"context_line":"resources in placement. So there could be servers which allocation needs"},{"line_number":13,"context_line":"to be healed in placement."},{"line_number":14,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_7306c71e","line":11,"range":{"start_line":11,"start_character":43,"end_line":11,"end_character":52},"in_reply_to":"9fb8cfa7_94d34a01","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"There are known limiations of this patch. It does not try to reimplement"},{"line_number":20,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":21,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":22,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":23,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":24,"context_line":"support from placement which is not available yet and 2) information"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_540b928c","line":21,"range":{"start_line":21,"start_character":52,"end_line":21,"end_character":54},"updated":"2019-06-27 21:36:12.000000000","message":"nix","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"There are known limiations of this patch. It does not try to reimplement"},{"line_number":20,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":21,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":22,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":23,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":24,"context_line":"support from placement which is not available yet and 2) information"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_9309fb2b","line":21,"range":{"start_line":21,"start_character":52,"end_line":21,"end_character":54},"in_reply_to":"9fb8cfa7_540b928c","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":20,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":21,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":22,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":23,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":24,"context_line":"support from placement which is not available yet and 2) information"},{"line_number":25,"context_line":"about which PCI PF an SRIOV port is allocated from its VF and which RP"},{"line_number":26,"context_line":"represents that PCI device in placement. This information is only"},{"line_number":27,"context_line":"available on the compute hosts."}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_b4fbee76","line":24,"range":{"start_line":23,"start_character":25,"end_line":24,"end_character":49},"updated":"2019-06-27 21:36:12.000000000","message":"You\u0027re talking about 1.31 right?\n\nhttps://docs.openstack.org/placement/latest/placement-api-microversion-history.html#add-in-tree-queryparam-on-get-allocation-candidates\n\nSo yes it\u0027s available since Stein. Would we use it? Or is that a follow up? Or is that a moot point because of your next issue (needing information from the compute host)?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":20,"context_line":"Placement\u0027s allocation candidate functionality. Therefore it cannot"},{"line_number":21,"context_line":"handle the situation when there is more than one RP on in the compute"},{"line_number":22,"context_line":"tree which provides the required traits for a port. In this situation"},{"line_number":23,"context_line":"deciding which RP to use would require the in_tree allocation candidate"},{"line_number":24,"context_line":"support from placement which is not available yet and 2) information"},{"line_number":25,"context_line":"about which PCI PF an SRIOV port is allocated from its VF and which RP"},{"line_number":26,"context_line":"represents that PCI device in placement. This information is only"},{"line_number":27,"context_line":"available on the compute hosts."}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_116522b5","line":24,"range":{"start_line":23,"start_character":25,"end_line":24,"end_character":49},"in_reply_to":"9fb8cfa7_b4fbee76","updated":"2019-06-28 12:38:06.000000000","message":"Besides the missing information from the compute node I still has the intention to backport this to stein.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":26,"context_line":"represents that PCI device in placement. This information is only"},{"line_number":27,"context_line":"available on the compute hosts."},{"line_number":28,"context_line":""},{"line_number":29,"context_line":"For the unsupported cases the command will fail gracefully. As soon as"},{"line_number":30,"context_line":"migration support for such server are implemented the admin can heal the"},{"line_number":31,"context_line":"allocation of such servers by migrating them."},{"line_number":32,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_74f1f694","line":29,"updated":"2019-06-27 21:36:12.000000000","message":"OK this seems like a reasonable trade-off.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":28,"context_line":""},{"line_number":29,"context_line":"For the unsupported cases the command will fail gracefully. As soon as"},{"line_number":30,"context_line":"migration support for such server are implemented the admin can heal the"},{"line_number":31,"context_line":"allocation of such servers by migrating them."},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"There is one edge case that needs manual steps to recover from. If the"},{"line_number":34,"context_line":"allocation in placement is healed successfully for an instance but the"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_94ecaabb","line":31,"updated":"2019-06-27 21:36:12.000000000","message":"You might want to refer to blueprint support-move-ops-with-qos-ports here.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":28,"context_line":""},{"line_number":29,"context_line":"For the unsupported cases the command will fail gracefully. As soon as"},{"line_number":30,"context_line":"migration support for such server are implemented the admin can heal the"},{"line_number":31,"context_line":"allocation of such servers by migrating them."},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"There is one edge case that needs manual steps to recover from. If the"},{"line_number":34,"context_line":"allocation in placement is healed successfully for an instance but the"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_93225ba2","line":31,"in_reply_to":"9fb8cfa7_94ecaabb","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":33,"context_line":"There is one edge case that needs manual steps to recover from. If the"},{"line_number":34,"context_line":"allocation in placement is healed successfully for an instance but the"},{"line_number":35,"context_line":"binding:profile.allocation key of any of the ports of the instance"},{"line_number":36,"context_line":"cannot be updated in neutron then the script does not try to rollback"},{"line_number":37,"context_line":"the placement allocation. The script will print out a detailed error"},{"line_number":38,"context_line":"message and a list of OpenStack CLI commands asking the admin to run"},{"line_number":39,"context_line":"them before continue healing allocatons. If the healing is run again"},{"line_number":40,"context_line":"without this step the placement allocations for the ports of this"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_34161eaa","line":37,"range":{"start_line":36,"start_character":45,"end_line":37,"end_character":25},"updated":"2019-06-27 21:36:12.000000000","message":"The manual recovery sounds pretty nasty, and doubling the allocations sounds even worse. How hard would it be to rollback? We talked a bit about this in IRC today and I think you said it would be possible but we\u0027d have to keep a copy of the allocations before they are updated to make sure we remove the diff (whatever came from the port resource request). I don\u0027t know how hard that would be but I\u0027d think it would involve knowing which providers and resource classes are involved in the port resource request allocation.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":33,"context_line":"There is one edge case that needs manual steps to recover from. If the"},{"line_number":34,"context_line":"allocation in placement is healed successfully for an instance but the"},{"line_number":35,"context_line":"binding:profile.allocation key of any of the ports of the instance"},{"line_number":36,"context_line":"cannot be updated in neutron then the script does not try to rollback"},{"line_number":37,"context_line":"the placement allocation. The script will print out a detailed error"},{"line_number":38,"context_line":"message and a list of OpenStack CLI commands asking the admin to run"},{"line_number":39,"context_line":"them before continue healing allocatons. If the healing is run again"},{"line_number":40,"context_line":"without this step the placement allocations for the ports of this"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_f4f15c6c","line":37,"range":{"start_line":36,"start_character":45,"end_line":37,"end_character":25},"in_reply_to":"9fb8cfa7_34161eaa","updated":"2019-06-28 12:38:06.000000000","message":"I will write up something for the rollback. I feel it will be hard to make it right for all fault cases.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":36,"context_line":"cannot be updated in neutron then the script does not try to rollback"},{"line_number":37,"context_line":"the placement allocation. The script will print out a detailed error"},{"line_number":38,"context_line":"message and a list of OpenStack CLI commands asking the admin to run"},{"line_number":39,"context_line":"them before continue healing allocatons. If the healing is run again"},{"line_number":40,"context_line":"without this step the placement allocations for the ports of this"},{"line_number":41,"context_line":"instance are doubled."},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Closes-Bug: #1819923"},{"line_number":44,"context_line":"Change-Id: I4b2b1688822eb2f0174df0c8c6c16d554781af85"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_f40ba679","line":41,"range":{"start_line":39,"start_character":41,"end_line":41,"end_character":21},"updated":"2019-06-27 21:36:12.000000000","message":"Could we detect this and fail instead? Or is it some kind of situation where we don\u0027t have enough information to detect and make a decision?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":36,"context_line":"cannot be updated in neutron then the script does not try to rollback"},{"line_number":37,"context_line":"the placement allocation. The script will print out a detailed error"},{"line_number":38,"context_line":"message and a list of OpenStack CLI commands asking the admin to run"},{"line_number":39,"context_line":"them before continue healing allocatons. If the healing is run again"},{"line_number":40,"context_line":"without this step the placement allocations for the ports of this"},{"line_number":41,"context_line":"instance are doubled."},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Closes-Bug: #1819923"},{"line_number":44,"context_line":"Change-Id: I4b2b1688822eb2f0174df0c8c6c16d554781af85"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":28,"id":"9fb8cfa7_740fac36","line":41,"range":{"start_line":39,"start_character":41,"end_line":41,"end_character":21},"in_reply_to":"9fb8cfa7_f40ba679","updated":"2019-06-28 12:38:06.000000000","message":"If we updated placement successfully but cannot update the ports then the re-run will detect that there are port without allocation information in neutron. If in the same time we see that the instance allocation has neutron related resource classes (e.g. Bandwidth) then we can fail to prevent allocation duplication. But this also become complicated if some of the neutron ports of the instance are updated but some doesn\u0027t. Then we have to see if there are extra bandwidth allocation on top of amount that is connected to ports with proper allocation key. \n\nEven if we detect this, recovering from it automatically seem really complicated (e.g. matching up the existing allocation with handled ports, and trying to see whats left for unhandled ports)","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports which had resource_request (e.g. a"},{"line_number":11,"context_line":"port with QoS minimum bandwidth policy rule) without allocating the"},{"line_number":12,"context_line":"requested resources in placement. So there could be servers which"},{"line_number":13,"context_line":"allocation needs to be healed in placement."},{"line_number":14,"context_line":""},{"line_number":15,"context_line":"This patch extends the nova-manage heal_allocation CLI to create the"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_516a11b3","line":12,"range":{"start_line":12,"start_character":60,"end_line":12,"end_character":65},"updated":"2019-07-11 20:04:17.000000000","message":"\"whose\" or \"for which the\"","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Before I97f06d0ec34cbd75c182caaa686b8de5c777a576 it was possible to"},{"line_number":10,"context_line":"create servers with neutron ports which had resource_request (e.g. a"},{"line_number":11,"context_line":"port with QoS minimum bandwidth policy rule) without allocating the"},{"line_number":12,"context_line":"requested resources in placement. So there could be servers which"},{"line_number":13,"context_line":"allocation needs to be healed in placement."},{"line_number":14,"context_line":""},{"line_number":15,"context_line":"This patch extends the nova-manage heal_allocation CLI to create the"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_ef444d49","line":12,"range":{"start_line":12,"start_character":60,"end_line":12,"end_character":65},"in_reply_to":"7faddb67_516a11b3","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":27,"context_line":"available on the compute hosts."},{"line_number":28,"context_line":""},{"line_number":29,"context_line":"For the unsupported cases the command will fail gracefully. As soon as"},{"line_number":30,"context_line":"migration support for such server are implemented in the blueprint"},{"line_number":31,"context_line":"support-move-ops-with-qos-ports the admin can heal the allocation of"},{"line_number":32,"context_line":"such servers by migrating them."},{"line_number":33,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_91a5e906","line":30,"range":{"start_line":30,"start_character":27,"end_line":30,"end_character":33},"updated":"2019-07-11 20:04:17.000000000","message":"servers","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":27,"context_line":"available on the compute hosts."},{"line_number":28,"context_line":""},{"line_number":29,"context_line":"For the unsupported cases the command will fail gracefully. As soon as"},{"line_number":30,"context_line":"migration support for such server are implemented in the blueprint"},{"line_number":31,"context_line":"support-move-ops-with-qos-ports the admin can heal the allocation of"},{"line_number":32,"context_line":"such servers by migrating them."},{"line_number":33,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_6f729daf","line":30,"range":{"start_line":30,"start_character":27,"end_line":30,"end_character":33},"in_reply_to":"7faddb67_91a5e906","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":32,"context_line":"such servers by migrating them."},{"line_number":33,"context_line":""},{"line_number":34,"context_line":"During healing both placement and neutron need to be updated. If any of"},{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_51af71e2","line":35,"range":{"start_line":35,"start_character":28,"end_line":35,"end_character":31},"updated":"2019-07-11 20:04:17.000000000","message":"tries","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":32,"context_line":"such servers by migrating them."},{"line_number":33,"context_line":""},{"line_number":34,"context_line":"During healing both placement and neutron need to be updated. If any of"},{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_0f6be9b6","line":35,"range":{"start_line":35,"start_character":28,"end_line":35,"end_character":31},"in_reply_to":"7faddb67_51af71e2","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"},{"line_number":39,"context_line":"recover from such a partially healed situation manually."},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Closes-Bug: #1819923"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_11b9f92c","line":38,"range":{"start_line":38,"start_character":50,"end_line":38,"end_character":58},"updated":"2019-07-11 20:04:17.000000000","message":"describes","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"},{"line_number":39,"context_line":"recover from such a partially healed situation manually."},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Closes-Bug: #1819923"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_71aaadd2","line":38,"range":{"start_line":38,"start_character":29,"end_line":38,"end_character":30},"updated":"2019-07-11 20:04:17.000000000","message":"strike","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"},{"line_number":39,"context_line":"recover from such a partially healed situation manually."},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Closes-Bug: #1819923"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_2f6825b7","line":38,"range":{"start_line":38,"start_character":50,"end_line":38,"end_character":58},"in_reply_to":"7faddb67_11b9f92c","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":35,"context_line":"those updates fail the code try to roll back the previous updates for"},{"line_number":36,"context_line":"the instance to make sure that the healing can be re-run later without"},{"line_number":37,"context_line":"issue. However if the rollback fails then the script will terminate with"},{"line_number":38,"context_line":"an error message pointing to a documentation that describe how to"},{"line_number":39,"context_line":"recover from such a partially healed situation manually."},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Closes-Bug: #1819923"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":35,"id":"7faddb67_cf6071d2","line":38,"range":{"start_line":38,"start_character":29,"end_line":38,"end_character":30},"in_reply_to":"7faddb67_71aaadd2","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"}],"doc/source/cli/nova-manage.rst":[{"author":{"_account_id":7634,"name":"Takashi Natsume","email":"takanattie@gmail.com","username":"natsumet"},"change_message_id":"437e4755c8b64008f12d57c656e1ff78cea88b93","unresolved":false,"context_lines":[{"line_number":336,"context_line":"    associated with the instance."},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"    Also if the instance has any port attached that has resource request"},{"line_number":339,"context_line":"    (e.g. port has `QoS minimum bandwidth rule`_) but the corresponding"},{"line_number":340,"context_line":"    allocation is not found then the allocation is created against the"},{"line_number":341,"context_line":"    network device resource providers according to the resource request of"},{"line_number":342,"context_line":"    given port. It is possible that the missing allocation cannot be created"}],"source_content_type":"text/x-rst","patch_set":22,"id":"dfbec78f_a58282c7","line":339,"range":{"start_line":339,"start_character":19,"end_line":339,"end_character":48},"updated":"2019-05-04 19:32:14.000000000","message":":neutron-doc: should be used.\nSee https://opendev.org/openstack/nova/raw/branch/master/doc/source/admin/port_with_resource_request.rst","commit_id":"d0be5b103c900a0d97b67e56e022dd3b13fa9da1"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6a54df7138b916bfcd11ed886cd2aa8f76579fe7","unresolved":false,"context_lines":[{"line_number":336,"context_line":"    associated with the instance."},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"    Also if the instance has any port attached that has resource request"},{"line_number":339,"context_line":"    (e.g. port has `QoS minimum bandwidth rule`_) but the corresponding"},{"line_number":340,"context_line":"    allocation is not found then the allocation is created against the"},{"line_number":341,"context_line":"    network device resource providers according to the resource request of"},{"line_number":342,"context_line":"    given port. It is possible that the missing allocation cannot be created"}],"source_content_type":"text/x-rst","patch_set":22,"id":"dfbec78f_ba561906","line":339,"range":{"start_line":339,"start_character":19,"end_line":339,"end_character":48},"in_reply_to":"dfbec78f_a58282c7","updated":"2019-05-05 12:18:53.000000000","message":"Done","commit_id":"d0be5b103c900a0d97b67e56e022dd3b13fa9da1"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"144d0737a3cb6c6cd8f9312e37708dd0bfb1e17a","unresolved":false,"context_lines":[{"line_number":344,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":345,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":346,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":347,"context_line":"    given port needs to be detached. When nova will support migrating instances"},{"line_number":348,"context_line":"    with such port the migration will also heal the missing allocation for"},{"line_number":349,"context_line":"    these instance."},{"line_number":350,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":351,"context_line":"    port also needs to be update in neutron to refer to the resource provider"},{"line_number":352,"context_line":"    UUID which provides the requested resources. If the port update fails in"}],"source_content_type":"text/x-rst","patch_set":24,"id":"bfb3d3c7_87972791","line":349,"range":{"start_line":347,"start_character":37,"end_line":349,"end_character":19},"updated":"2019-05-21 11:31:46.000000000","message":"This sentence is grammatically incorrect but I don\u0027t have a good suggestion because I\u0027m not entirely sure what you\u0027re trying to say. Mabye:\n\nWhen nova supports migrating instances with guaranteed bandwidth ports, migration will heal missing allocations for these instances.","commit_id":"43429c03edb71e4d02ac593dfbe05b57f2ec82f9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1b61de28491c1eb55c2a169946be3f30b1e21b3a","unresolved":false,"context_lines":[{"line_number":344,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":345,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":346,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":347,"context_line":"    given port needs to be detached. When nova will support migrating instances"},{"line_number":348,"context_line":"    with such port the migration will also heal the missing allocation for"},{"line_number":349,"context_line":"    these instance."},{"line_number":350,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":351,"context_line":"    port also needs to be update in neutron to refer to the resource provider"},{"line_number":352,"context_line":"    UUID which provides the requested resources. If the port update fails in"}],"source_content_type":"text/x-rst","patch_set":24,"id":"bfb3d3c7_5ba200b0","line":349,"range":{"start_line":347,"start_character":37,"end_line":349,"end_character":19},"in_reply_to":"bfb3d3c7_87972791","updated":"2019-05-28 08:14:37.000000000","message":"Done","commit_id":"43429c03edb71e4d02ac593dfbe05b57f2ec82f9"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"144d0737a3cb6c6cd8f9312e37708dd0bfb1e17a","unresolved":false,"context_lines":[{"line_number":348,"context_line":"    with such port the migration will also heal the missing allocation for"},{"line_number":349,"context_line":"    these instance."},{"line_number":350,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":351,"context_line":"    port also needs to be update in neutron to refer to the resource provider"},{"line_number":352,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":353,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":354,"context_line":"    placement allocation but requires the admin to do the port update in"}],"source_content_type":"text/x-rst","patch_set":24,"id":"bfb3d3c7_47912f99","line":351,"range":{"start_line":351,"start_character":26,"end_line":351,"end_character":32},"updated":"2019-05-21 11:31:46.000000000","message":"updated","commit_id":"43429c03edb71e4d02ac593dfbe05b57f2ec82f9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1b61de28491c1eb55c2a169946be3f30b1e21b3a","unresolved":false,"context_lines":[{"line_number":348,"context_line":"    with such port the migration will also heal the missing allocation for"},{"line_number":349,"context_line":"    these instance."},{"line_number":350,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":351,"context_line":"    port also needs to be update in neutron to refer to the resource provider"},{"line_number":352,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":353,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":354,"context_line":"    placement allocation but requires the admin to do the port update in"}],"source_content_type":"text/x-rst","patch_set":24,"id":"bfb3d3c7_5bbd20d6","line":351,"range":{"start_line":351,"start_character":26,"end_line":351,"end_character":32},"in_reply_to":"bfb3d3c7_47912f99","updated":"2019-05-28 08:14:37.000000000","message":"Done","commit_id":"43429c03edb71e4d02ac593dfbe05b57f2ec82f9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":379,"context_line":"    \u003cadmin/config-qos-min-bw.html\u003e`) but the corresponding"},{"line_number":380,"context_line":"    allocation is not found then the allocation is created against the"},{"line_number":381,"context_line":"    network device resource providers according to the resource request of"},{"line_number":382,"context_line":"    given port. It is possible that the missing allocation cannot be created"},{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_b30f8835","line":382,"range":{"start_line":382,"start_character":4,"end_line":382,"end_character":9},"updated":"2019-06-27 18:50:06.000000000","message":"that","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":379,"context_line":"    \u003cadmin/config-qos-min-bw.html\u003e`) but the corresponding"},{"line_number":380,"context_line":"    allocation is not found then the allocation is created against the"},{"line_number":381,"context_line":"    network device resource providers according to the resource request of"},{"line_number":382,"context_line":"    given port. It is possible that the missing allocation cannot be created"},{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_bd672e25","line":382,"range":{"start_line":382,"start_character":4,"end_line":382,"end_character":9},"in_reply_to":"9fb8cfa7_b30f8835","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":382,"context_line":"    given port. It is possible that the missing allocation cannot be created"},{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_145d1a78","line":385,"range":{"start_line":385,"start_character":30,"end_line":385,"end_character":38},"updated":"2019-06-27 21:36:12.000000000","message":"The instance needs to be deleted or the allocations for the instance? I\u0027m guessing the former because if we don\u0027t have the information available to heal the allocations (the in_tree and compute host pci inventory restrictions mentioned in the commit message) then the only recourse is delete and recreate the server so it goes through scheduling and the compute parts or wait for the server migration support.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":382,"context_line":"    given port. It is possible that the missing allocation cannot be created"},{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_f74e3e6d","line":385,"range":{"start_line":385,"start_character":30,"end_line":385,"end_character":38},"in_reply_to":"9fb8cfa7_145d1a78","updated":"2019-06-28 12:38:06.000000000","message":"correct. The instance needs to be deleted / the port needs to be detached. Or later, when it is available, the instance needs to be migrated.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_73059013","line":386,"range":{"start_line":386,"start_character":4,"end_line":386,"end_character":9},"updated":"2019-06-27 18:50:06.000000000","message":"strike","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_fd7846c1","line":386,"range":{"start_line":386,"start_character":4,"end_line":386,"end_character":9},"in_reply_to":"9fb8cfa7_73059013","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_54ccf233","line":388,"range":{"start_line":386,"start_character":38,"end_line":388,"end_character":24},"updated":"2019-06-27 21:36:12.000000000","message":"I would somehow link to https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html here.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":383,"context_line":"    either due to not having enough resource inventory on the host the instance"},{"line_number":384,"context_line":"    resides on or because more than one resource provider could fulfill the"},{"line_number":385,"context_line":"    request. In this case the instance needs to be manually deleted or the"},{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_d803100c","line":388,"range":{"start_line":386,"start_character":38,"end_line":388,"end_character":24},"in_reply_to":"9fb8cfa7_54ccf233","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_6e3367f7","line":390,"range":{"start_line":389,"start_character":72,"end_line":390,"end_character":44},"updated":"2019-06-27 18:50:06.000000000","message":"This makes it sound as if the admin is responsible for doing the port update. Perhaps better phrased:\n\nAfter the allocation for a port is healed successfully in placement, nova-manage will also attempt to update the port in neutron to refer to the ...","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":386,"context_line":"    given port needs to be detached.  When nova supports migrating instances"},{"line_number":387,"context_line":"    with guaranteed bandwidth ports, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_781f845a","line":390,"range":{"start_line":389,"start_character":72,"end_line":390,"end_character":44},"in_reply_to":"9fb8cfa7_6e3367f7","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_ee06771c","line":392,"range":{"start_line":392,"start_character":61,"end_line":392,"end_character":69},"updated":"2019-06-27 18:50:06.000000000","message":"roll back","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":389,"context_line":"    After the allocation for a port is healed successfully in placement the"},{"line_number":390,"context_line":"    port also needs to be updated in neutron to refer to the resource provider"},{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_982278a2","line":392,"range":{"start_line":392,"start_character":61,"end_line":392,"end_character":69},"in_reply_to":"9fb8cfa7_ee06771c","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"    There is also a special case handled for instances that *do* have"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_4e522315","line":394,"range":{"start_line":394,"start_character":22,"end_line":394,"end_character":44},"updated":"2019-06-27 18:50:06.000000000","message":"In this scenario","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":391,"context_line":"    UUID which provides the requested resources. If the port update fails in"},{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"    There is also a special case handled for instances that *do* have"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_383d8cc3","line":394,"range":{"start_line":394,"start_character":22,"end_line":394,"end_character":44},"in_reply_to":"9fb8cfa7_4e522315","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"    There is also a special case handled for instances that *do* have"},{"line_number":398,"context_line":"    allocations created before Placement API microversion 1.8 where project_id"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_948b4abe","line":395,"updated":"2019-06-27 21:36:12.000000000","message":"This is really not so great for systems automating this healing like an ansible task or something. I\u0027m hoping we could either rollback or fail on a subsequent attempt where we would otherwise duplicate allocations (if we can detect that).","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":392,"context_line":"    neutron for any reason then this command does not try to rollback the"},{"line_number":393,"context_line":"    placement allocation but requires the admin to do the port update in"},{"line_number":394,"context_line":"    neutron manually. To be able to do that the command prints out openstack"},{"line_number":395,"context_line":"    CLI commands for the admin."},{"line_number":396,"context_line":""},{"line_number":397,"context_line":"    There is also a special case handled for instances that *do* have"},{"line_number":398,"context_line":"    allocations created before Placement API microversion 1.8 where project_id"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_3748565a","line":395,"in_reply_to":"9fb8cfa7_948b4abe","updated":"2019-06-28 12:38:06.000000000","message":"I will try to do something about it. I assumed that nova manage is called by a human operator.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":407,"context_line":"    Specify ``--verbose`` to get detailed progress output during execution."},{"line_number":408,"context_line":""},{"line_number":409,"context_line":"    Specify ``--skip-port-allocations`` if you want skip healing the port"},{"line_number":410,"context_line":"    allocations."},{"line_number":411,"context_line":""},{"line_number":412,"context_line":"    Specify ``--dry-run`` to print output but not commit any changes. The"},{"line_number":413,"context_line":"    return code should be 4. *(Since 20.0.0 Train)*"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_54f5d255","line":410,"updated":"2019-06-27 21:36:12.000000000","message":"Include \"*(Since 20.0.0 Train)*\" like --dry-run and --instance below.\n\nI would also mention that skipping healing port allocations will improve performance of the command if you know your deployment does not provide these types of ports to your users.\n\nWould it be simpler, from a rollback perspective, if we did the neutron port update first and then the update allocations call to placement so that if the latter fails, we can just remove the \u0027allocation\u0027 piece from the port\u0027s binding:profile?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":407,"context_line":"    Specify ``--verbose`` to get detailed progress output during execution."},{"line_number":408,"context_line":""},{"line_number":409,"context_line":"    Specify ``--skip-port-allocations`` if you want skip healing the port"},{"line_number":410,"context_line":"    allocations."},{"line_number":411,"context_line":""},{"line_number":412,"context_line":"    Specify ``--dry-run`` to print output but not commit any changes. The"},{"line_number":413,"context_line":"    return code should be 4. *(Since 20.0.0 Train)*"}],"source_content_type":"text/x-rst","patch_set":28,"id":"9fb8cfa7_586640ef","line":410,"in_reply_to":"9fb8cfa7_54f5d255","updated":"2019-07-01 14:49:20.000000000","message":"\u003e Include \"*(Since 20.0.0 Train)*\" like --dry-run and --instance\n \u003e below.\n\nDone.\n\n \u003e \n \u003e I would also mention that skipping healing port allocations will\n \u003e improve performance of the command if you know your deployment does\n \u003e not provide these types of ports to your users.\n \u003e \n\nDone.\n\n \u003e Would it be simpler, from a rollback perspective, if we did the\n \u003e neutron port update first and then the update allocations call to\n \u003e placement so that if the latter fails, we can just remove the\n \u003e \u0027allocation\u0027 piece from the port\u0027s binding:profile?\n\nYes. I will move to that direction in the rollback impl.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":387,"context_line":"    with guaranteed bandwidth ports`_, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    After the allocation for a port is healed successfully in placement"},{"line_number":391,"context_line":"    nova-manage will also attempt to update the port in neutron to refer to"},{"line_number":392,"context_line":"    the resource provider UUID which provides the requested resources. If the"},{"line_number":393,"context_line":"    port update fails in neutron for any reason then this command does not try"},{"line_number":394,"context_line":"    to roll back the placement allocation but requires the admin to do the port"}],"source_content_type":"text/x-rst","patch_set":30,"id":"9fb8cfa7_e2d4f949","line":391,"range":{"start_line":390,"start_character":4,"end_line":391,"end_character":63},"updated":"2019-07-03 16:49:22.000000000","message":"Isn\u0027t this old now? I thought we agreed to try the port update first, then update allocations, and if updating allocations failed we\u0027d try to rollback the port update and failing that tell the operator to manually update the port\u0027s binding:profile to null out the allocation key.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":387,"context_line":"    with guaranteed bandwidth ports`_, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    After the allocation for a port is healed successfully in placement"},{"line_number":391,"context_line":"    nova-manage will also attempt to update the port in neutron to refer to"},{"line_number":392,"context_line":"    the resource provider UUID which provides the requested resources. If the"},{"line_number":393,"context_line":"    port update fails in neutron for any reason then this command does not try"},{"line_number":394,"context_line":"    to roll back the placement allocation but requires the admin to do the port"}],"source_content_type":"text/x-rst","patch_set":30,"id":"7faddb67_94bb06e4","line":391,"range":{"start_line":390,"start_character":4,"end_line":391,"end_character":63},"in_reply_to":"9fb8cfa7_e2d4f949","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":417,"context_line":"    *(Since 20.0.0 Train)*"},{"line_number":418,"context_line":""},{"line_number":419,"context_line":"    Specify ``--skip-port-allocations`` to skip the healing of the resource"},{"line_number":420,"context_line":"    allocations of bound ports. E.g. healing bandwidth resource allocation for"},{"line_number":421,"context_line":"    ports having minimum QoS policy rules attached. If your deployment does"},{"line_number":422,"context_line":"    not use such feature then the performance impact of querying neutron ports"},{"line_number":423,"context_line":"    for each instance can be avoided with this flag."}],"source_content_type":"text/x-rst","patch_set":30,"id":"9fb8cfa7_62c0097e","line":420,"range":{"start_line":420,"start_character":30,"end_line":420,"end_character":36},"updated":"2019-07-03 16:49:22.000000000","message":"nit: \", e.g. healing...\"","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":417,"context_line":"    *(Since 20.0.0 Train)*"},{"line_number":418,"context_line":""},{"line_number":419,"context_line":"    Specify ``--skip-port-allocations`` to skip the healing of the resource"},{"line_number":420,"context_line":"    allocations of bound ports. E.g. healing bandwidth resource allocation for"},{"line_number":421,"context_line":"    ports having minimum QoS policy rules attached. If your deployment does"},{"line_number":422,"context_line":"    not use such feature then the performance impact of querying neutron ports"},{"line_number":423,"context_line":"    for each instance can be avoided with this flag."}],"source_content_type":"text/x-rst","patch_set":30,"id":"7faddb67_34cc9288","line":420,"range":{"start_line":420,"start_character":30,"end_line":420,"end_character":36},"in_reply_to":"9fb8cfa7_62c0097e","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    Before the allocation for the ports are persisted in placement nova-manage"},{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port update fails in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_719c2a5d","line":392,"range":{"start_line":392,"start_character":63,"end_line":392,"end_character":75},"updated":"2019-07-08 21:47:42.000000000","message":"nit: \"updates fail\"","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    Before the allocation for the ports are persisted in placement nova-manage"},{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port update fails in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_88edf38c","line":392,"range":{"start_line":392,"start_character":63,"end_line":392,"end_character":75},"in_reply_to":"7faddb67_719c2a5d","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port update fails in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"},{"line_number":396,"context_line":"    rollback in neutron manually according to the description in exit code"},{"line_number":397,"context_line":"    section."}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_f1a7baa6","line":394,"range":{"start_line":394,"start_character":37,"end_line":394,"end_character":50},"updated":"2019-07-08 21:47:42.000000000","message":"\"the ports\"? While I think I know what you\u0027re saying with \"this instance\" here (rollback the partial updates to the ports attached to the instance being processed), it could maybe be confused by someone else reading this.","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port update fails in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"},{"line_number":396,"context_line":"    rollback in neutron manually according to the description in exit code"},{"line_number":397,"context_line":"    section."}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_a8954fe6","line":394,"range":{"start_line":394,"start_character":37,"end_line":394,"end_character":50},"in_reply_to":"7faddb67_f1a7baa6","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"},{"line_number":396,"context_line":"    rollback in neutron manually according to the description in exit code"},{"line_number":397,"context_line":"    section."},{"line_number":398,"context_line":""},{"line_number":399,"context_line":"    .. _supports migrating instances with guaranteed bandwidth ports: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html"}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_11ccd668","line":396,"range":{"start_line":396,"start_character":62,"end_line":396,"end_character":64},"updated":"2019-07-08 21:47:42.000000000","message":"in the","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"},{"line_number":394,"context_line":"    roll back the partial updates of this instance. If the roll back fails"},{"line_number":395,"context_line":"    then the process stops with exit code ``7`` and the admin needs to do the"},{"line_number":396,"context_line":"    rollback in neutron manually according to the description in exit code"},{"line_number":397,"context_line":"    section."},{"line_number":398,"context_line":""},{"line_number":399,"context_line":"    .. _supports migrating instances with guaranteed bandwidth ports: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html"}],"source_content_type":"text/x-rst","patch_set":32,"id":"7faddb67_689f5703","line":396,"range":{"start_line":396,"start_character":62,"end_line":396,"end_character":64},"in_reply_to":"7faddb67_11ccd668","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9d5e3b644a11eb055c47ed9f263f17306a172e6c","unresolved":false,"context_lines":[{"line_number":440,"context_line":"    * 6: Unable to update ports in neutron"},{"line_number":441,"context_line":"    * 7: Cannot roll back neutron port updates. Manual steps needed. The error"},{"line_number":442,"context_line":"      message will indicate which neutron ports need to be changed to clean up"},{"line_number":443,"context_line":"      the partial update. For such ports remove the ``allocation`` key from the"},{"line_number":444,"context_line":"      ``binding:profile`` of the port by first gathering the current keys in"},{"line_number":445,"context_line":"      the profile with::"},{"line_number":446,"context_line":""},{"line_number":447,"context_line":"        $ openstack port show \u003cport_uuid\u003e"},{"line_number":448,"context_line":""},{"line_number":449,"context_line":"      then pushing back the same profile data except the allocation key"},{"line_number":450,"context_line":"      itself::"},{"line_number":451,"context_line":""},{"line_number":452,"context_line":"        $ openstack port set \u003cport_uuid\u003e --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e \\"},{"line_number":453,"context_line":"          --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e ..."},{"line_number":454,"context_line":""},{"line_number":455,"context_line":"    * 127: Invalid input."},{"line_number":456,"context_line":""}],"source_content_type":"text/x-rst","patch_set":34,"id":"7faddb67_843b97e9","line":453,"range":{"start_line":443,"start_character":26,"end_line":453,"end_character":45},"updated":"2019-07-09 14:29:44.000000000","message":"This can be done simpler:\n  $ openstack port unset --binding-profile allocation \u003cport_uuid\u003e","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"fc28b6dabde6565f482f1cddfd00e2797b17205f","unresolved":false,"context_lines":[{"line_number":440,"context_line":"    * 6: Unable to update ports in neutron"},{"line_number":441,"context_line":"    * 7: Cannot roll back neutron port updates. Manual steps needed. The error"},{"line_number":442,"context_line":"      message will indicate which neutron ports need to be changed to clean up"},{"line_number":443,"context_line":"      the partial update. For such ports remove the ``allocation`` key from the"},{"line_number":444,"context_line":"      ``binding:profile`` of the port by first gathering the current keys in"},{"line_number":445,"context_line":"      the profile with::"},{"line_number":446,"context_line":""},{"line_number":447,"context_line":"        $ openstack port show \u003cport_uuid\u003e"},{"line_number":448,"context_line":""},{"line_number":449,"context_line":"      then pushing back the same profile data except the allocation key"},{"line_number":450,"context_line":"      itself::"},{"line_number":451,"context_line":""},{"line_number":452,"context_line":"        $ openstack port set \u003cport_uuid\u003e --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e \\"},{"line_number":453,"context_line":"          --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e ..."},{"line_number":454,"context_line":""},{"line_number":455,"context_line":"    * 127: Invalid input."},{"line_number":456,"context_line":""}],"source_content_type":"text/x-rst","patch_set":34,"id":"7faddb67_1fa85475","line":453,"range":{"start_line":443,"start_character":26,"end_line":453,"end_character":45},"in_reply_to":"7faddb67_843b97e9","updated":"2019-07-09 14:58:00.000000000","message":"Oooo yeah that\u0027s much better.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":387,"context_line":"    with guaranteed bandwidth ports`_, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    Before the allocation for the ports are persisted in placement nova-manage"},{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port updates fail in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_917e4969","line":390,"range":{"start_line":390,"start_character":15,"end_line":390,"end_character":25},"updated":"2019-07-11 20:04:17.000000000","message":"allocations","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":387,"context_line":"    with guaranteed bandwidth ports`_, migration will heal missing allocations"},{"line_number":388,"context_line":"    for these instances."},{"line_number":389,"context_line":""},{"line_number":390,"context_line":"    Before the allocation for the ports are persisted in placement nova-manage"},{"line_number":391,"context_line":"    tries to update each port in neutron to refer to the resource provider UUID"},{"line_number":392,"context_line":"    which provides the requested resources. If any of the port updates fail in"},{"line_number":393,"context_line":"    neutron or the allocation update fails in placement the command tries to"}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_b469ccf9","line":390,"range":{"start_line":390,"start_character":15,"end_line":390,"end_character":25},"in_reply_to":"7faddb67_917e4969","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":420,"context_line":"    Specify ``--skip-port-allocations`` to skip the healing of the resource"},{"line_number":421,"context_line":"    allocations of bound ports, e.g. healing bandwidth resource allocation for"},{"line_number":422,"context_line":"    ports having minimum QoS policy rules attached. If your deployment does"},{"line_number":423,"context_line":"    not use such feature then the performance impact of querying neutron ports"},{"line_number":424,"context_line":"    for each instance can be avoided with this flag."},{"line_number":425,"context_line":"    *(Since 20.0.0 Train)*"},{"line_number":426,"context_line":""}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_b1a2654a","line":423,"range":{"start_line":423,"start_character":12,"end_line":423,"end_character":16},"updated":"2019-07-11 20:04:17.000000000","message":"such a","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":420,"context_line":"    Specify ``--skip-port-allocations`` to skip the healing of the resource"},{"line_number":421,"context_line":"    allocations of bound ports, e.g. healing bandwidth resource allocation for"},{"line_number":422,"context_line":"    ports having minimum QoS policy rules attached. If your deployment does"},{"line_number":423,"context_line":"    not use such feature then the performance impact of querying neutron ports"},{"line_number":424,"context_line":"    for each instance can be avoided with this flag."},{"line_number":425,"context_line":"    *(Since 20.0.0 Train)*"},{"line_number":426,"context_line":""}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_546ad8ec","line":423,"range":{"start_line":423,"start_character":12,"end_line":423,"end_character":16},"in_reply_to":"7faddb67_b1a2654a","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"32b43dfe020ca30d655813af15a208d6ca70d66e","unresolved":false,"context_lines":[{"line_number":440,"context_line":"    * 6: Unable to update ports in neutron"},{"line_number":441,"context_line":"    * 7: Cannot roll back neutron port updates. Manual steps needed. The error"},{"line_number":442,"context_line":"      message will indicate which neutron ports need to be changed to clean up"},{"line_number":443,"context_line":"      the partial update. For such ports remove the ``allocation`` key from the"},{"line_number":444,"context_line":"      ``binding:profile`` of the port by first gathering the current keys in"},{"line_number":445,"context_line":"      the profile with::"},{"line_number":446,"context_line":""},{"line_number":447,"context_line":"        $ openstack port show \u003cport_uuid\u003e"}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_74ae8a99","line":444,"range":{"start_line":443,"start_character":26,"end_line":444,"end_character":37},"updated":"2019-07-22 18:45:51.000000000","message":"nit: I think this could have still been useful but it\u0027s not a big deal. I left it in my FUP:\n\nhttps://review.opendev.org/#/c/670361/1/doc/source/cli/nova-manage.rst","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"452f52f3e1c8993feec5e0864bee49cd80785632","unresolved":false,"context_lines":[{"line_number":449,"context_line":"      then pushing back the same profile data except the allocation key"},{"line_number":450,"context_line":"      itself::"},{"line_number":451,"context_line":""},{"line_number":452,"context_line":"        $ openstack port set \u003cport_uuid\u003e --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e \\"},{"line_number":453,"context_line":"          --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e ..."},{"line_number":454,"context_line":""},{"line_number":455,"context_line":"    * 127: Invalid input."}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_96eabf72","line":452,"updated":"2019-07-11 18:07:30.000000000","message":"Don\u0027t forget that you want to use \"port unset --binding-profile allocation\" here.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0a133ffe47442c3da6f04052f9b81fddbecdacee","unresolved":false,"context_lines":[{"line_number":449,"context_line":"      then pushing back the same profile data except the allocation key"},{"line_number":450,"context_line":"      itself::"},{"line_number":451,"context_line":""},{"line_number":452,"context_line":"        $ openstack port set \u003cport_uuid\u003e --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e \\"},{"line_number":453,"context_line":"          --binding-profile \u003ckey\u003e\u003d\u003cvalue\u003e ..."},{"line_number":454,"context_line":""},{"line_number":455,"context_line":"    * 127: Invalid input."}],"source_content_type":"text/x-rst","patch_set":35,"id":"7faddb67_762063b1","line":452,"in_reply_to":"7faddb67_96eabf72","updated":"2019-07-11 18:21:56.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"}],"nova/cmd/manage.py":[{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"c13693ebd7b8e059bd60d041f80a6092141e272d","unresolved":false,"context_lines":[{"line_number":1875,"context_line":""},{"line_number":1876,"context_line":"        # TODO(gibi): we might want to cache this while working on the same"},{"line_number":1877,"context_line":"        #  instance"},{"line_number":1878,"context_line":"        rps \u003d placement._get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1879,"context_line":"        rps_with_traits \u003d {"},{"line_number":1880,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1881,"context_line":"            for rp in rps}"}],"source_content_type":"text/x-python","patch_set":11,"id":"5fc1f717_10c6ef4a","line":1878,"updated":"2019-03-22 12:39:29.000000000","message":"If you\u0027re going to use this in a public way, probably should make it public?","commit_id":"ae69bf61eb9358f41709068d3c3932c7cd61c62d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d4040a1c47c6ece6d379b634b5fd47074729691f","unresolved":false,"context_lines":[{"line_number":1875,"context_line":""},{"line_number":1876,"context_line":"        # TODO(gibi): we might want to cache this while working on the same"},{"line_number":1877,"context_line":"        #  instance"},{"line_number":1878,"context_line":"        rps \u003d placement._get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1879,"context_line":"        rps_with_traits \u003d {"},{"line_number":1880,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1881,"context_line":"            for rp in rps}"}],"source_content_type":"text/x-python","patch_set":11,"id":"5fc1f717_332e0c8c","line":1878,"in_reply_to":"5fc1f717_10c6ef4a","updated":"2019-03-28 10:49:41.000000000","message":"Good point.","commit_id":"ae69bf61eb9358f41709068d3c3932c7cd61c62d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"15876297217f0db765711c6ab2c09562989dafcf","unresolved":false,"context_lines":[{"line_number":1875,"context_line":""},{"line_number":1876,"context_line":"        # TODO(gibi): we might want to cache this while working on the same"},{"line_number":1877,"context_line":"        #  instance"},{"line_number":1878,"context_line":"        rps \u003d placement._get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1879,"context_line":"        rps_with_traits \u003d {"},{"line_number":1880,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1881,"context_line":"            for rp in rps}"}],"source_content_type":"text/x-python","patch_set":11,"id":"5fc1f717_e560b5d1","line":1878,"in_reply_to":"5fc1f717_332e0c8c","updated":"2019-03-28 13:35:42.000000000","message":"Done","commit_id":"ae69bf61eb9358f41709068d3c3932c7cd61c62d"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"c13693ebd7b8e059bd60d041f80a6092141e272d","unresolved":false,"context_lines":[{"line_number":2097,"context_line":"                if port_allocations:"},{"line_number":2098,"context_line":"                    allocations[\u0027allocations\u0027] \u003d self._merge_allocations("},{"line_number":2099,"context_line":"                        allocations[\u0027allocations\u0027], port_allocations)"},{"line_number":2100,"context_line":"                    resp \u003d placement.put("},{"line_number":2101,"context_line":"                        \u0027/allocations/%s\u0027 % instance.uuid,"},{"line_number":2102,"context_line":"                        allocations,"},{"line_number":2103,"context_line":"                        version\u003dreport.CONSUMER_GENERATION_VERSION)"}],"source_content_type":"text/x-python","patch_set":11,"id":"5fc1f717_10390f3e","line":2100,"updated":"2019-03-22 12:39:29.000000000","message":"my previous comments about retry on rp-generation-based 409 may apply here too","commit_id":"ae69bf61eb9358f41709068d3c3932c7cd61c62d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d4040a1c47c6ece6d379b634b5fd47074729691f","unresolved":false,"context_lines":[{"line_number":2097,"context_line":"                if port_allocations:"},{"line_number":2098,"context_line":"                    allocations[\u0027allocations\u0027] \u003d self._merge_allocations("},{"line_number":2099,"context_line":"                        allocations[\u0027allocations\u0027], port_allocations)"},{"line_number":2100,"context_line":"                    resp \u003d placement.put("},{"line_number":2101,"context_line":"                        \u0027/allocations/%s\u0027 % instance.uuid,"},{"line_number":2102,"context_line":"                        allocations,"},{"line_number":2103,"context_line":"                        version\u003dreport.CONSUMER_GENERATION_VERSION)"}],"source_content_type":"text/x-python","patch_set":11,"id":"5fc1f717_1333d0f7","line":2100,"in_reply_to":"5fc1f717_10390f3e","updated":"2019-03-28 10:49:41.000000000","message":"Done","commit_id":"ae69bf61eb9358f41709068d3c3932c7cd61c62d"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6a54df7138b916bfcd11ed886cd2aa8f76579fe7","unresolved":false,"context_lines":[{"line_number":2053,"context_line":"                            \"binding_profile\": binding_cli})"},{"line_number":2054,"context_line":"        return \"\\n\".join(result)"},{"line_number":2055,"context_line":""},{"line_number":2056,"context_line":"    def _update_ports(self, ctxt, neutron, ports_to_update):"},{"line_number":2057,"context_line":"        try:"},{"line_number":2058,"context_line":"            for port in ports_to_update:"},{"line_number":2059,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003d{\u0027port\u0027: port})"}],"source_content_type":"text/x-python","patch_set":17,"id":"ffb9cba7_082d9142","line":2056,"range":{"start_line":2056,"start_character":28,"end_line":2056,"end_character":32},"updated":"2019-05-05 12:18:53.000000000","message":"unused","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c6d629c7ca294ba076b2cb25eaef8b1873051323","unresolved":false,"context_lines":[{"line_number":2056,"context_line":"    def _update_ports(self, ctxt, neutron, ports_to_update):"},{"line_number":2057,"context_line":"        try:"},{"line_number":2058,"context_line":"            for port in ports_to_update:"},{"line_number":2059,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003d{\u0027port\u0027: port})"},{"line_number":2060,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":2061,"context_line":"            raise UnableToUpdatePorts("},{"line_number":2062,"context_line":"                error\u003de.message,"}],"source_content_type":"text/x-python","patch_set":17,"id":"5fc1f717_3b458cb4","line":2059,"range":{"start_line":2059,"start_character":62,"end_line":2059,"end_character":66},"updated":"2019-04-10 17:01:37.000000000","message":"Cannot send the whole port as it contains read only fields like status. Only send binding:profile: {\u0027allocation\u0027: \u003crp_uud\u003e} to neutron.","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6a54df7138b916bfcd11ed886cd2aa8f76579fe7","unresolved":false,"context_lines":[{"line_number":2056,"context_line":"    def _update_ports(self, ctxt, neutron, ports_to_update):"},{"line_number":2057,"context_line":"        try:"},{"line_number":2058,"context_line":"            for port in ports_to_update:"},{"line_number":2059,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003d{\u0027port\u0027: port})"},{"line_number":2060,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":2061,"context_line":"            raise UnableToUpdatePorts("},{"line_number":2062,"context_line":"                error\u003de.message,"}],"source_content_type":"text/x-python","patch_set":17,"id":"ffb9cba7_a8948545","line":2059,"range":{"start_line":2059,"start_character":62,"end_line":2059,"end_character":66},"in_reply_to":"5fc1f717_3b458cb4","updated":"2019-05-05 12:18:53.000000000","message":"Done","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c6d629c7ca294ba076b2cb25eaef8b1873051323","unresolved":false,"context_lines":[{"line_number":2291,"context_line":"    @action_description("},{"line_number":2292,"context_line":"        _(\"Iterates over non-cell0 cells looking for instances which do \""},{"line_number":2293,"context_line":"          \"not have allocations in the Placement service, or have incomplete \""},{"line_number":2294,"context_line":"          \"consumer project_id/user_id values in existing allocations, and \""},{"line_number":2295,"context_line":"          \"which are not undergoing a task state transition. For each \""},{"line_number":2296,"context_line":"          \"instance found, allocations are created (or updated) against the \""},{"line_number":2297,"context_line":"          \"compute node resource provider for that instance based on the \""}],"source_content_type":"text/x-python","patch_set":17,"id":"5fc1f717_00ea9d0d","line":2294,"updated":"2019-04-10 17:01:37.000000000","message":"or missing allocation related to attached ports.","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6a54df7138b916bfcd11ed886cd2aa8f76579fe7","unresolved":false,"context_lines":[{"line_number":2291,"context_line":"    @action_description("},{"line_number":2292,"context_line":"        _(\"Iterates over non-cell0 cells looking for instances which do \""},{"line_number":2293,"context_line":"          \"not have allocations in the Placement service, or have incomplete \""},{"line_number":2294,"context_line":"          \"consumer project_id/user_id values in existing allocations, and \""},{"line_number":2295,"context_line":"          \"which are not undergoing a task state transition. For each \""},{"line_number":2296,"context_line":"          \"instance found, allocations are created (or updated) against the \""},{"line_number":2297,"context_line":"          \"compute node resource provider for that instance based on the \""}],"source_content_type":"text/x-python","patch_set":17,"id":"ffb9cba7_88a9817c","line":2294,"in_reply_to":"5fc1f717_00ea9d0d","updated":"2019-05-05 12:18:53.000000000","message":"Done","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c6d629c7ca294ba076b2cb25eaef8b1873051323","unresolved":false,"context_lines":[{"line_number":2378,"context_line":""},{"line_number":2379,"context_line":"        neutron \u003d None"},{"line_number":2380,"context_line":"        if heal_port_allocations:"},{"line_number":2381,"context_line":"            neutron \u003d neutron_api.get_client(context, admin\u003dTrue)"},{"line_number":2382,"context_line":""},{"line_number":2383,"context_line":"        num_processed \u003d 0"},{"line_number":2384,"context_line":"        # TODO(mriedem): Use context.scatter_gather_skip_cell0."}],"source_content_type":"text/x-python","patch_set":17,"id":"5fc1f717_e040b108","line":2381,"range":{"start_line":2381,"start_character":45,"end_line":2381,"end_character":52},"updated":"2019-04-10 17:01:37.000000000","message":"this should be ctxt. I also have to find a way to catch such a problem in the unit/functional test.","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6a54df7138b916bfcd11ed886cd2aa8f76579fe7","unresolved":false,"context_lines":[{"line_number":2378,"context_line":""},{"line_number":2379,"context_line":"        neutron \u003d None"},{"line_number":2380,"context_line":"        if heal_port_allocations:"},{"line_number":2381,"context_line":"            neutron \u003d neutron_api.get_client(context, admin\u003dTrue)"},{"line_number":2382,"context_line":""},{"line_number":2383,"context_line":"        num_processed \u003d 0"},{"line_number":2384,"context_line":"        # TODO(mriedem): Use context.scatter_gather_skip_cell0."}],"source_content_type":"text/x-python","patch_set":17,"id":"ffb9cba7_28ddf513","line":2381,"range":{"start_line":2381,"start_character":45,"end_line":2381,"end_character":52},"in_reply_to":"5fc1f717_e040b108","updated":"2019-05-05 12:18:53.000000000","message":"Done","commit_id":"152419d4e0462ddc25709ed88e1e70a95c562c8c"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"d416b9c67d6648f4aea39226c94e61da8e9fd2e6","unresolved":false,"context_lines":[{"line_number":1623,"context_line":"        return 0"},{"line_number":1624,"context_line":""},{"line_number":1625,"context_line":""},{"line_number":1626,"context_line":"class MoreThanOnMatchingResourceProvidersToHealFrom(exception.NovaException):"},{"line_number":1627,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":1628,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":1629,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""}],"source_content_type":"text/x-python","patch_set":25,"id":"9fb8cfa7_4882c9ca","line":1626,"range":{"start_line":1626,"start_character":13,"end_line":1626,"end_character":19},"updated":"2019-06-03 12:48:17.000000000","message":"One.\n\nAlso, is putting the exceptions in here instead of the usual nova/exception just for convenience or something else.\n\nIf they are of a class of exception, do should they have a shared parents? It looks like they mostly exist for the sake of the message.","commit_id":"c43f1a796312f976b7c0df5f8fbfe4cd0a04e493"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"e6f5b57dfdbf23f1ff8d4b1a126d9e1e5a8f371a","unresolved":false,"context_lines":[{"line_number":1623,"context_line":"        return 0"},{"line_number":1624,"context_line":""},{"line_number":1625,"context_line":""},{"line_number":1626,"context_line":"class MoreThanOnMatchingResourceProvidersToHealFrom(exception.NovaException):"},{"line_number":1627,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":1628,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":1629,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""}],"source_content_type":"text/x-python","patch_set":25,"id":"9fb8cfa7_d8873461","line":1626,"range":{"start_line":1626,"start_character":13,"end_line":1626,"end_character":19},"in_reply_to":"9fb8cfa7_4882c9ca","updated":"2019-06-05 15:10:23.000000000","message":"typo fixed, exceptions moved to nova/exception, baseclass added.","commit_id":"c43f1a796312f976b7c0df5f8fbfe4cd0a04e493"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"d416b9c67d6648f4aea39226c94e61da8e9fd2e6","unresolved":false,"context_lines":[{"line_number":1822,"context_line":"                # that PCI PF in placement. When migration will be supported"},{"line_number":1823,"context_line":"                # with such servers then we can ask the admin to migrate these"},{"line_number":1824,"context_line":"                # servers instead to heal their allocation."},{"line_number":1825,"context_line":"                raise MoreThanOnMatchingResourceProvidersToHealFrom("},{"line_number":1826,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1827,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1828,"context_line":"                    instance_uuid\u003dinstance.uuid)"}],"source_content_type":"text/x-python","patch_set":25,"id":"9fb8cfa7_48d089ca","line":1825,"range":{"start_line":1825,"start_character":29,"end_line":1825,"end_character":35},"updated":"2019-06-03 12:48:17.000000000","message":"One","commit_id":"c43f1a796312f976b7c0df5f8fbfe4cd0a04e493"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"e6f5b57dfdbf23f1ff8d4b1a126d9e1e5a8f371a","unresolved":false,"context_lines":[{"line_number":1822,"context_line":"                # that PCI PF in placement. When migration will be supported"},{"line_number":1823,"context_line":"                # with such servers then we can ask the admin to migrate these"},{"line_number":1824,"context_line":"                # servers instead to heal their allocation."},{"line_number":1825,"context_line":"                raise MoreThanOnMatchingResourceProvidersToHealFrom("},{"line_number":1826,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1827,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1828,"context_line":"                    instance_uuid\u003dinstance.uuid)"}],"source_content_type":"text/x-python","patch_set":25,"id":"9fb8cfa7_787e486c","line":1825,"range":{"start_line":1825,"start_character":29,"end_line":1825,"end_character":35},"in_reply_to":"9fb8cfa7_48d089ca","updated":"2019-06-05 15:10:23.000000000","message":"Done","commit_id":"c43f1a796312f976b7c0df5f8fbfe4cd0a04e493"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1664,"context_line":"    def _get_ports(self, ctxt, instance, neutron):"},{"line_number":1665,"context_line":"        \"\"\"Return the ports that are bound to the instance"},{"line_number":1666,"context_line":""},{"line_number":1667,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1668,"context_line":"        :param instance: the instance to return the ports for"},{"line_number":1669,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1670,"context_line":"            communicate with Neutron"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_f46ac6ea","line":1667,"range":{"start_line":1667,"start_character":21,"end_line":1667,"end_character":34},"updated":"2019-06-27 21:36:12.000000000","message":"nit: this doesn\u0027t really matter for this method","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1664,"context_line":"    def _get_ports(self, ctxt, instance, neutron):"},{"line_number":1665,"context_line":"        \"\"\"Return the ports that are bound to the instance"},{"line_number":1666,"context_line":""},{"line_number":1667,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1668,"context_line":"        :param instance: the instance to return the ports for"},{"line_number":1669,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1670,"context_line":"            communicate with Neutron"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_274011a5","line":1667,"range":{"start_line":1667,"start_character":21,"end_line":1667,"end_character":34},"in_reply_to":"9fb8cfa7_f46ac6ea","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1672,"context_line":"        :raise UnableToQueryPorts: If the neutron list ports query fails."},{"line_number":1673,"context_line":"        \"\"\""},{"line_number":1674,"context_line":"        try:"},{"line_number":1675,"context_line":"            return neutron.list_ports("},{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_c8c7e695","line":1675,"updated":"2019-06-27 21:36:12.000000000","message":"I\u0027m not sure if it matters, but we could filter the fields in the response to just [\u0027id\u0027, \u0027binding:profile\u0027, \u0027resource_request\u0027] right? Similar to instance_has_port_with_resource_request.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1672,"context_line":"        :raise UnableToQueryPorts: If the neutron list ports query fails."},{"line_number":1673,"context_line":"        \"\"\""},{"line_number":1674,"context_line":"        try:"},{"line_number":1675,"context_line":"            return neutron.list_ports("},{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("}],"source_content_type":"text/x-python","patch_set":28,"id":"7faddb67_464c1276","line":1675,"in_reply_to":"9fb8cfa7_8783fdca","updated":"2019-07-03 16:49:22.000000000","message":"You would pass the fields kwarg which is a list of fields to return, e.g.:\n\nhttps://github.com/openstack/nova/blob/3c5aec113c4f2e6e5811b3e9be333c80fb740ad8/nova/network/neutronv2/api.py#L2244","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1672,"context_line":"        :raise UnableToQueryPorts: If the neutron list ports query fails."},{"line_number":1673,"context_line":"        \"\"\""},{"line_number":1674,"context_line":"        try:"},{"line_number":1675,"context_line":"            return neutron.list_ports("},{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8783fdca","line":1675,"in_reply_to":"9fb8cfa7_c8c7e695","updated":"2019-07-01 14:49:20.000000000","message":"As far as I see list_ports() doesn\u0027t have a way to filter for fields.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("},{"line_number":1679,"context_line":"                instance_uuid\u003dinstance.uuid, error\u003de.message)"},{"line_number":1680,"context_line":""},{"line_number":1681,"context_line":"    def _get_rps_in_tree_with_required_traits("},{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_b461ae07","line":1679,"range":{"start_line":1679,"start_character":51,"end_line":1679,"end_character":60},"updated":"2019-06-27 21:36:12.000000000","message":"I\u0027d probably just use six.text_type(e) here. Is the message guaranteed to have substitution variables replaced in it? Probably safer to just to-string the thing with text_type.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("},{"line_number":1679,"context_line":"                instance_uuid\u003dinstance.uuid, error\u003de.message)"},{"line_number":1680,"context_line":""},{"line_number":1681,"context_line":"    def _get_rps_in_tree_with_required_traits("},{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_27dc91e2","line":1679,"range":{"start_line":1679,"start_character":51,"end_line":1679,"end_character":60},"in_reply_to":"9fb8cfa7_b461ae07","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1680,"context_line":""},{"line_number":1681,"context_line":"    def _get_rps_in_tree_with_required_traits("},{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"},{"line_number":1683,"context_line":"        \"\"\"Find the RPs that has all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_143c3adf","line":1683,"range":{"start_line":1683,"start_character":29,"end_line":1683,"end_character":32},"updated":"2019-06-27 21:36:12.000000000","message":"have","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1680,"context_line":""},{"line_number":1681,"context_line":"    def _get_rps_in_tree_with_required_traits("},{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"},{"line_number":1683,"context_line":"        \"\"\"Find the RPs that has all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_07ddcddb","line":1683,"range":{"start_line":1683,"start_character":29,"end_line":1683,"end_character":32},"in_reply_to":"9fb8cfa7_143c3adf","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"},{"line_number":1683,"context_line":"        \"\"\"Find the RPs that has all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that are need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_d421c2b5","line":1685,"range":{"start_line":1685,"start_character":21,"end_line":1685,"end_character":34},"updated":"2019-06-27 21:36:12.000000000","message":"same - this doesn\u0027t matter for this method","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1682,"context_line":"            self, ctxt, rp_uuid, required_traits, placement):"},{"line_number":1683,"context_line":"        \"\"\"Find the RPs that has all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that are need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_e7e1999c","line":1685,"range":{"start_line":1685,"start_character":21,"end_line":1685,"end_character":34},"in_reply_to":"9fb8cfa7_d421c2b5","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that are need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1690,"context_line":"            to communicate with the Placement service API."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9427cac7","line":1687,"range":{"start_line":1687,"start_character":48,"end_line":1687,"end_character":51},"updated":"2019-06-27 21:36:12.000000000","message":"nix","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: cell-targeted nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that are need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1690,"context_line":"            to communicate with the Placement service API."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_c7f4955d","line":1687,"range":{"start_line":1687,"start_character":48,"end_line":1687,"end_character":51},"in_reply_to":"9fb8cfa7_9427cac7","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1697,"context_line":"        rps \u003d placement.get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1698,"context_line":"        rps_with_traits \u003d {"},{"line_number":1699,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1700,"context_line":"            for rp in rps}"},{"line_number":1701,"context_line":""},{"line_number":1702,"context_line":"        matching_rps \u003d ["},{"line_number":1703,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_08209e9a","line":1700,"range":{"start_line":1700,"start_character":22,"end_line":1700,"end_character":25},"updated":"2019-06-27 21:36:12.000000000","message":"Because get_providers_in_tree is using @safe_connect, technically rps here could be None and you\u0027d get a TypeError trying to iterate over it.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1697,"context_line":"        rps \u003d placement.get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1698,"context_line":"        rps_with_traits \u003d {"},{"line_number":1699,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1700,"context_line":"            for rp in rps}"},{"line_number":1701,"context_line":""},{"line_number":1702,"context_line":"        matching_rps \u003d ["},{"line_number":1703,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3a94a48a","line":1700,"range":{"start_line":1700,"start_character":22,"end_line":1700,"end_character":25},"in_reply_to":"9fb8cfa7_08209e9a","updated":"2019-07-01 14:49:20.000000000","message":"Done. Plus get_provider_traits() can also raise exceptions that was unhandled before.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1699,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1700,"context_line":"            for rp in rps}"},{"line_number":1701,"context_line":""},{"line_number":1702,"context_line":"        matching_rps \u003d ["},{"line_number":1703,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"},{"line_number":1704,"context_line":"                if (set(required_traits) \u0026 set(provided_traits) \u003d\u003d"},{"line_number":1705,"context_line":"                    set(required_traits))]"},{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"        return matching_rps"},{"line_number":1708,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_88480e5c","line":1705,"range":{"start_line":1702,"start_character":8,"end_line":1705,"end_character":42},"updated":"2019-06-27 21:36:12.000000000","message":"Oh god this is breaking my brain - can we write this without the comprehension? And add a comment?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1699,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1700,"context_line":"            for rp in rps}"},{"line_number":1701,"context_line":""},{"line_number":1702,"context_line":"        matching_rps \u003d ["},{"line_number":1703,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"},{"line_number":1704,"context_line":"                if (set(required_traits) \u0026 set(provided_traits) \u003d\u003d"},{"line_number":1705,"context_line":"                    set(required_traits))]"},{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"        return matching_rps"},{"line_number":1708,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3ac26486","line":1705,"range":{"start_line":1702,"start_character":8,"end_line":1705,"end_character":42},"in_reply_to":"9fb8cfa7_0bdbb84e","updated":"2019-07-01 14:49:20.000000000","message":"I changed to use issubset. @Matt: I hope that helps.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[{"line_number":1699,"context_line":"            rp[\u0027uuid\u0027]: placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1700,"context_line":"            for rp in rps}"},{"line_number":1701,"context_line":""},{"line_number":1702,"context_line":"        matching_rps \u003d ["},{"line_number":1703,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"},{"line_number":1704,"context_line":"                if (set(required_traits) \u0026 set(provided_traits) \u003d\u003d"},{"line_number":1705,"context_line":"                    set(required_traits))]"},{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"        return matching_rps"},{"line_number":1708,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_0bdbb84e","line":1705,"range":{"start_line":1702,"start_character":8,"end_line":1705,"end_character":42},"in_reply_to":"9fb8cfa7_88480e5c","updated":"2019-06-27 22:15:11.000000000","message":"Maybe it would be clearer if you used set(provided_traits).issubset(required_traits) instead?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"        return matching_rps"},{"line_number":1708,"context_line":""},{"line_number":1709,"context_line":"    def _merge_allocations(self, alloc1, alloc2):"},{"line_number":1710,"context_line":"        \"\"\"Return a new allocation dict that contains the sum of alloc1 and"},{"line_number":1711,"context_line":"        alloc2."},{"line_number":1712,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_e8abcad2","line":1709,"updated":"2019-06-27 21:36:12.000000000","message":"This reminds me of the merge_resources utility method I removed here:\n\nI8c6b6c46b2587ee727653dafadbcb08b99ed7d35\n\nWe probably could have revived that except this doesn\u0027t care about the \"sign\" stuff.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"        return matching_rps"},{"line_number":1708,"context_line":""},{"line_number":1709,"context_line":"    def _merge_allocations(self, alloc1, alloc2):"},{"line_number":1710,"context_line":"        \"\"\"Return a new allocation dict that contains the sum of alloc1 and"},{"line_number":1711,"context_line":"        alloc2."},{"line_number":1712,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3a47c41f","line":1709,"in_reply_to":"9fb8cfa7_e8abcad2","updated":"2019-07-01 14:49:20.000000000","message":"as far as I see merge_resources only merged a resource dicts, here we merge allocation dicts containing more than one resource dicts.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1721,"context_line":"        \"\"\""},{"line_number":1722,"context_line":""},{"line_number":1723,"context_line":"        allocations \u003d collections.defaultdict("},{"line_number":1724,"context_line":"            lambda: {\u0027resources\u0027: collections.defaultdict(lambda: 0)})"},{"line_number":1725,"context_line":""},{"line_number":1726,"context_line":"        for alloc in [alloc1, alloc2]:"},{"line_number":1727,"context_line":"            for rp_uuid in alloc:"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8efb3bbc","line":1724,"range":{"start_line":1724,"start_character":58,"end_line":1724,"end_character":67},"updated":"2019-06-27 18:50:06.000000000","message":"or\n\n int","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1721,"context_line":"        \"\"\""},{"line_number":1722,"context_line":""},{"line_number":1723,"context_line":"        allocations \u003d collections.defaultdict("},{"line_number":1724,"context_line":"            lambda: {\u0027resources\u0027: collections.defaultdict(lambda: 0)})"},{"line_number":1725,"context_line":""},{"line_number":1726,"context_line":"        for alloc in [alloc1, alloc2]:"},{"line_number":1727,"context_line":"            for rp_uuid in alloc:"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_fa4ccc3f","line":1724,"range":{"start_line":1724,"start_character":58,"end_line":1724,"end_character":67},"in_reply_to":"9fb8cfa7_8efb3bbc","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1729,"context_line":"                    allocations[rp_uuid][\u0027resources\u0027][rc] +\u003d amount"},{"line_number":1730,"context_line":"        return allocations"},{"line_number":1731,"context_line":""},{"line_number":1732,"context_line":"    def _add_resources_to_allocation(self, allocation, rp_uuid, resources):"},{"line_number":1733,"context_line":"        \"\"\"Updates the passed in allocation dict with the resources allocated"},{"line_number":1734,"context_line":"        from the rp_uuid."},{"line_number":1735,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_6ec36739","line":1732,"range":{"start_line":1732,"start_character":8,"end_line":1732,"end_character":36},"updated":"2019-06-27 18:50:06.000000000","message":"This seems like a special case of _merge_allocations. DRYable?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1729,"context_line":"                    allocations[rp_uuid][\u0027resources\u0027][rc] +\u003d amount"},{"line_number":1730,"context_line":"        return allocations"},{"line_number":1731,"context_line":""},{"line_number":1732,"context_line":"    def _add_resources_to_allocation(self, allocation, rp_uuid, resources):"},{"line_number":1733,"context_line":"        \"\"\"Updates the passed in allocation dict with the resources allocated"},{"line_number":1734,"context_line":"        from the rp_uuid."},{"line_number":1735,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_fafa8cb7","line":1732,"range":{"start_line":1732,"start_character":8,"end_line":1732,"end_character":36},"in_reply_to":"9fb8cfa7_28e802b1","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1729,"context_line":"                    allocations[rp_uuid][\u0027resources\u0027][rc] +\u003d amount"},{"line_number":1730,"context_line":"        return allocations"},{"line_number":1731,"context_line":""},{"line_number":1732,"context_line":"    def _add_resources_to_allocation(self, allocation, rp_uuid, resources):"},{"line_number":1733,"context_line":"        \"\"\"Updates the passed in allocation dict with the resources allocated"},{"line_number":1734,"context_line":"        from the rp_uuid."},{"line_number":1735,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_28e802b1","line":1732,"range":{"start_line":1732,"start_character":8,"end_line":1732,"end_character":36},"in_reply_to":"9fb8cfa7_6ec36739","updated":"2019-06-27 21:36:12.000000000","message":"Agree.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1747,"context_line":"        \"\"\""},{"line_number":1748,"context_line":"        current_resources \u003d allocation.get("},{"line_number":1749,"context_line":"            rp_uuid,"},{"line_number":1750,"context_line":"            {\u0027resources\u0027: collections.defaultdict(lambda: 0)})[\u0027resources\u0027]"},{"line_number":1751,"context_line":""},{"line_number":1752,"context_line":"        for rc, amount in resources.items():"},{"line_number":1753,"context_line":"            current_resources[rc] +\u003d amount"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ae963f4b","line":1750,"range":{"start_line":1750,"start_character":50,"end_line":1750,"end_character":59},"updated":"2019-06-27 18:50:06.000000000","message":"or\n\n int","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1747,"context_line":"        \"\"\""},{"line_number":1748,"context_line":"        current_resources \u003d allocation.get("},{"line_number":1749,"context_line":"            rp_uuid,"},{"line_number":1750,"context_line":"            {\u0027resources\u0027: collections.defaultdict(lambda: 0)})[\u0027resources\u0027]"},{"line_number":1751,"context_line":""},{"line_number":1752,"context_line":"        for rc, amount in resources.items():"},{"line_number":1753,"context_line":"            current_resources[rc] +\u003d amount"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9a2fd03d","line":1750,"range":{"start_line":1750,"start_character":50,"end_line":1750,"end_character":59},"in_reply_to":"9fb8cfa7_a8be32ab","updated":"2019-07-01 14:49:20.000000000","message":"removed.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1747,"context_line":"        \"\"\""},{"line_number":1748,"context_line":"        current_resources \u003d allocation.get("},{"line_number":1749,"context_line":"            rp_uuid,"},{"line_number":1750,"context_line":"            {\u0027resources\u0027: collections.defaultdict(lambda: 0)})[\u0027resources\u0027]"},{"line_number":1751,"context_line":""},{"line_number":1752,"context_line":"        for rc, amount in resources.items():"},{"line_number":1753,"context_line":"            current_resources[rc] +\u003d amount"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_a8be32ab","line":1750,"range":{"start_line":1750,"start_character":50,"end_line":1750,"end_character":59},"in_reply_to":"9fb8cfa7_ae963f4b","updated":"2019-06-27 21:36:12.000000000","message":"Agree - don\u0027t care for lambda\u0027s like this.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1765,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1766,"context_line":"            to communicate with the Placement service API."},{"line_number":1767,"context_line":"        :param heal_port_allocations: True if healing port allocation is"},{"line_number":1768,"context_line":"            requested, False otherwise."},{"line_number":1769,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1770,"context_line":"            communicate with Neutron"},{"line_number":1771,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_88e48e3d","line":1768,"range":{"start_line":1767,"start_character":8,"end_line":1768,"end_character":39},"updated":"2019-06-27 21:36:12.000000000","message":"Couldn\u0027t we just not call the method if this is False?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1765,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1766,"context_line":"            to communicate with the Placement service API."},{"line_number":1767,"context_line":"        :param heal_port_allocations: True if healing port allocation is"},{"line_number":1768,"context_line":"            requested, False otherwise."},{"line_number":1769,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1770,"context_line":"            communicate with Neutron"},{"line_number":1771,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_fa6fec5c","line":1768,"range":{"start_line":1767,"start_character":8,"end_line":1768,"end_character":39},"in_reply_to":"9fb8cfa7_88e48e3d","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1765,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1766,"context_line":"            to communicate with the Placement service API."},{"line_number":1767,"context_line":"        :param heal_port_allocations: True if healing port allocation is"},{"line_number":1768,"context_line":"            requested, False otherwise."},{"line_number":1769,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1770,"context_line":"            communicate with Neutron"},{"line_number":1771,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8bc7a864","line":1768,"range":{"start_line":1767,"start_character":8,"end_line":1768,"end_character":39},"in_reply_to":"9fb8cfa7_88e48e3d","updated":"2019-06-27 22:15:11.000000000","message":"fwiw, I had the same thought about this and a couple of other methods. But I figured I was already loading in enough comments.\n\nApparently Matt has no such compunctions.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1765,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1766,"context_line":"            to communicate with the Placement service API."},{"line_number":1767,"context_line":"        :param heal_port_allocations: True if healing port allocation is"},{"line_number":1768,"context_line":"            requested, False otherwise."},{"line_number":1769,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1770,"context_line":"            communicate with Neutron"},{"line_number":1771,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7a845c0b","line":1768,"range":{"start_line":1767,"start_character":8,"end_line":1768,"end_character":39},"in_reply_to":"9fb8cfa7_8bc7a864","updated":"2019-07-01 14:49:20.000000000","message":"\u003e fwiw, I had the same thought about this and a couple of other\n \u003e methods. But I figured I was already loading in enough comments.\n\n@Eric: What are the other methods that needs similar fixing?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"46628177a1d784678de551128c4df04dbf0e91fa","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1765,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1766,"context_line":"            to communicate with the Placement service API."},{"line_number":1767,"context_line":"        :param heal_port_allocations: True if healing port allocation is"},{"line_number":1768,"context_line":"            requested, False otherwise."},{"line_number":1769,"context_line":"        :param neutron: nova.network.neutronv2.api.ClientWrapper to"},{"line_number":1770,"context_line":"            communicate with Neutron"},{"line_number":1771,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_813309f6","line":1768,"range":{"start_line":1767,"start_character":8,"end_line":1768,"end_character":39},"in_reply_to":"9fb8cfa7_8bc7a864","updated":"2019-06-28 01:26:29.000000000","message":"I am a guiltless monster.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1775,"context_line":"        if not heal_port_allocations:"},{"line_number":1776,"context_line":"            return {}, []"},{"line_number":1777,"context_line":""},{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_48d45648","line":1778,"range":{"start_line":1778,"start_character":58,"end_line":1778,"end_character":61},"updated":"2019-06-27 21:36:12.000000000","message":"have","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1775,"context_line":"        if not heal_port_allocations:"},{"line_number":1776,"context_line":"            return {}, []"},{"line_number":1777,"context_line":""},{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_faa4cca4","line":1778,"range":{"start_line":1778,"start_character":58,"end_line":1778,"end_character":61},"in_reply_to":"9fb8cfa7_48d45648","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1776,"context_line":"            return {}, []"},{"line_number":1777,"context_line":""},{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_68d7da4c","line":1779,"range":{"start_line":1779,"start_character":14,"end_line":1779,"end_character":18},"updated":"2019-06-27 21:36:12.000000000","message":"do","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1776,"context_line":"            return {}, []"},{"line_number":1777,"context_line":""},{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9ab910cd","line":1779,"range":{"start_line":1779,"start_character":14,"end_line":1779,"end_character":18},"in_reply_to":"9fb8cfa7_68d7da4c","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1783,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1784,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_e870aa74","line":1781,"updated":"2019-06-27 21:36:12.000000000","message":"Just a review FYI for posterity, gibi and I discussed the differences between hitting neutron directly vs checking the instance network info cache in IRC:\n\nhttp://eavesdrop.openstack.org/irclogs/%23openstack-nova/%23openstack-nova.2019-06-27.log.html#t2019-06-27T13:19:52\n\ntl;dr is that we have to hit neutron to see if there is a resource_request because the point of this change is to heal allocations for ports attached to servers before stein and those servers won\u0027t have the binding:profile.allocation in their vif cache.\n\nWe also could (but don\u0027t) update the instance network info cache binding:profile.allocation in this command but it should be healed by the _heal_instance_info_cache periodic in the compute service.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1778,"context_line":"        # We need to heal port allocations for ports that has resource_request"},{"line_number":1779,"context_line":"        # but does not have an RP uuid in the binding:profile.allocation field"},{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1783,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1784,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7ae77ce4","line":1781,"in_reply_to":"9fb8cfa7_e870aa74","updated":"2019-07-01 14:49:20.000000000","message":"Added a comment about it.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"19304bdc594c141078f27ab5705b1238f87f85b4","unresolved":false,"context_lines":[{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1783,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1784,"context_line":""},{"line_number":1785,"context_line":"        if not ports_to_heal:"},{"line_number":1786,"context_line":"            # nothing to do, return early"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_65cdb864","line":1783,"updated":"2019-06-28 14:44:58.000000000","message":"Do we also need to make sure the port is actually bound to a host? i.e. the port\u0027s binding:host_id is not None?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1783,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1784,"context_line":""},{"line_number":1785,"context_line":"        if not ports_to_heal:"},{"line_number":1786,"context_line":"            # nothing to do, return early"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7a00dc66","line":1783,"in_reply_to":"9fb8cfa7_251fc0b7","updated":"2019-07-01 14:49:20.000000000","message":"added a comment.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"edc4fd1346cd73a1a109260c495fb7d88023dab9","unresolved":false,"context_lines":[{"line_number":1780,"context_line":"        ports_to_heal \u003d ["},{"line_number":1781,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1782,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1783,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1784,"context_line":""},{"line_number":1785,"context_line":"        if not ports_to_heal:"},{"line_number":1786,"context_line":"            # nothing to do, return early"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_251fc0b7","line":1783,"in_reply_to":"9fb8cfa7_65cdb864","updated":"2019-06-28 14:54:01.000000000","message":"Thinking through this, you can have ports attached to an instance via the device_id field on the port and the ports can still be unbound because of shelved instances:\n\nhttps://github.com/openstack/nova/blob/324da0532f3b59aa16233a93a260d289e55860fb/nova/compute/manager.py#L5168\n\nHowever, looking deeper at that, it\u0027s not implemented for neutron:\n\nhttps://github.com/openstack/nova/blob/324da0532f3b59aa16233a93a260d289e55860fb/nova/network/neutronv2/api.py#L3183\n\nThough it probably doesn\u0027t matter for a shelved offloaded instance because we shouldn\u0027t process it anyway since the instance.node would be None:\n\nhttps://github.com/openstack/nova/blob/324da0532f3b59aa16233a93a260d289e55860fb/nova/compute/manager.py#L5204\n\nSo we\u0027d filter out that instance in _heal_allocations_for_instance.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1807,"context_line":"                # with such servers then we can ask the admin to migrate these"},{"line_number":1808,"context_line":"                # servers instead to heal their allocation."},{"line_number":1809,"context_line":"                raise exception.MoreThanOneResourceProvidersToHealFrom("},{"line_number":1810,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_88eeceea","line":1810,"range":{"start_line":1810,"start_character":29,"end_line":1810,"end_character":46},"updated":"2019-06-27 21:36:12.000000000","message":"nit: How will this render in the message? Maybe better to do:\n\nrp_uuids\u003d\u0027,\u0027.join(matching_rp_uuids)\n\n?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1807,"context_line":"                # with such servers then we can ask the admin to migrate these"},{"line_number":1808,"context_line":"                # servers instead to heal their allocation."},{"line_number":1809,"context_line":"                raise exception.MoreThanOneResourceProvidersToHealFrom("},{"line_number":1810,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9a2cb002","line":1810,"range":{"start_line":1810,"start_character":29,"end_line":1810,"end_character":46},"in_reply_to":"9fb8cfa7_2bac7c9d","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[{"line_number":1807,"context_line":"                # with such servers then we can ask the admin to migrate these"},{"line_number":1808,"context_line":"                # servers instead to heal their allocation."},{"line_number":1809,"context_line":"                raise exception.MoreThanOneResourceProvidersToHealFrom("},{"line_number":1810,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_2bac7c9d","line":1810,"range":{"start_line":1810,"start_character":29,"end_line":1810,"end_character":46},"in_reply_to":"9fb8cfa7_88eeceea","updated":"2019-06-27 22:15:11.000000000","message":"It renders as a bracketed list of quoted elements\n\n ... [\u0027foo\u0027, \u0027bar\u0027] ...\n\nwhich is probably fine. Given the way the message is constructed, I would prefer that to losing the brackets.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1809,"context_line":"                raise exception.MoreThanOneResourceProvidersToHealFrom("},{"line_number":1810,"context_line":"                    rp_uuids\u003dmatching_rp_uuids,"},{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8ee9fbe3","line":1812,"updated":"2019-06-27 18:50:06.000000000","message":"Gross. But yes.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_2ebecfd4","line":1814,"range":{"start_line":1814,"start_character":12,"end_line":1814,"end_character":16},"updated":"2019-06-27 18:50:06.000000000","message":"nit: could be `if`","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_a8c2b251","line":1814,"range":{"start_line":1814,"start_character":12,"end_line":1814,"end_character":16},"in_reply_to":"9fb8cfa7_2ebecfd4","updated":"2019-06-27 21:36:12.000000000","message":"Agree - you\u0027ve already raised out of the conditional above so I\u0027d prefer just \u0027if\u0027 here. It\u0027s a style nit of mine.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1811,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3a002434","line":1814,"range":{"start_line":1814,"start_character":12,"end_line":1814,"end_character":16},"in_reply_to":"9fb8cfa7_a8c2b251","updated":"2019-07-01 14:49:20.000000000","message":"I moved the core of the loop to a separate function and change the if-elif-else construct to two guard clauses.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"},{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_2ea32fb4","line":1815,"range":{"start_line":1815,"start_character":32,"end_line":1815,"end_character":60},"updated":"2019-06-27 18:50:06.000000000","message":"hm.\n\nIs this really worth an exception? Or should we just warn and skip it?\n\nWhat does it mean? That the provider for the port has been deleted? Under what circumstances would that happen?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"},{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_e89f4a4d","line":1815,"range":{"start_line":1815,"start_character":32,"end_line":1815,"end_character":60},"in_reply_to":"9fb8cfa7_2ea32fb4","updated":"2019-06-27 21:36:12.000000000","message":"Well, or it could mean the compute node resource provider that the instance is on with the QoS port doesn\u0027t have whatever trait is needed for the port\u0027s resource request - but that\u0027s really a neutron issue isn\u0027t it? So the user has a port attached to a server that we can\u0027t heal in placement and is really invalid, so what is the recourse? Fix neutron? Manually fix the provider in placement? Detach the port and/or recreate the server? Whatever we come up with it\u0027s probably good debug type information for the error message to the operator.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1812,"context_line":"                    instance_uuid\u003dinstance.uuid)"},{"line_number":1813,"context_line":""},{"line_number":1814,"context_line":"            elif len(matching_rp_uuids) \u003d\u003d 0:"},{"line_number":1815,"context_line":"                raise exception.NoResourceProviderToHealFrom("},{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"},{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7a145c4d","line":1815,"range":{"start_line":1815,"start_character":32,"end_line":1815,"end_character":60},"in_reply_to":"9fb8cfa7_e89f4a4d","updated":"2019-07-01 14:49:20.000000000","message":"This is a neutron configuration or neutron - placement communication issue. I extended the exception text to refer to the documentation https://docs.openstack.org/neutron/latest/admin/config-qos-min-bw.html\n\nBasically the neutron config needs to be fixed and then the heal allocation script can be run again.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"},{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_6e9047ee","line":1819,"range":{"start_line":1819,"start_character":20,"end_line":1819,"end_character":27},"updated":"2019-06-27 18:50:06.000000000","message":"I would call this kwarg node_uuid","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1816,"context_line":"                    port_id\u003dport[\u0027id\u0027],"},{"line_number":1817,"context_line":"                    instance_uuid\u003dinstance.uuid,"},{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_5d1f125a","line":1819,"range":{"start_line":1819,"start_character":20,"end_line":1819,"end_character":27},"in_reply_to":"9fb8cfa7_6e9047ee","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"},{"line_number":1823,"context_line":"                # allocate the resources from it. If there are not enough"},{"line_number":1824,"context_line":"                # inventory left on the RP then the PUT /allocations placement"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_0eac2b9d","line":1821,"range":{"start_line":1821,"start_character":12,"end_line":1821,"end_character":16},"updated":"2019-06-27 18:50:06.000000000","message":"nit: redundant","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"},{"line_number":1823,"context_line":"                # allocate the resources from it. If there are not enough"},{"line_number":1824,"context_line":"                # inventory left on the RP then the PUT /allocations placement"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_c896064d","line":1821,"range":{"start_line":1821,"start_character":12,"end_line":1821,"end_character":16},"in_reply_to":"9fb8cfa7_0eac2b9d","updated":"2019-06-27 21:36:12.000000000","message":"Yeah please drop the else and the nesting here since you\u0027ve already raised from the conditions above.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"},{"line_number":1823,"context_line":"                # allocate the resources from it. If there are not enough"},{"line_number":1824,"context_line":"                # inventory left on the RP then the PUT /allocations placement"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_1d3d1ac3","line":1821,"range":{"start_line":1821,"start_character":12,"end_line":1821,"end_character":16},"in_reply_to":"9fb8cfa7_37c7f666","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":1818,"context_line":"                    traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1819,"context_line":"                    rp_uuid\u003dnode_uuid)"},{"line_number":1820,"context_line":""},{"line_number":1821,"context_line":"            else:  # len(matching_rps) \u003d\u003d 1"},{"line_number":1822,"context_line":"                # We found one RP that matches the traits. Assume that we can"},{"line_number":1823,"context_line":"                # allocate the resources from it. If there are not enough"},{"line_number":1824,"context_line":"                # inventory left on the RP then the PUT /allocations placement"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_37c7f666","line":1821,"range":{"start_line":1821,"start_character":12,"end_line":1821,"end_character":16},"in_reply_to":"9fb8cfa7_c896064d","updated":"2019-06-28 12:38:06.000000000","message":"for me elif is more explicit and the if + exception is more implicit.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1832,"context_line":"                # We also need to record the RP we are allocated from in the"},{"line_number":1833,"context_line":"                # port. This will be sent back to Neutron after the allocation"},{"line_number":1834,"context_line":"                # is updated."},{"line_number":1835,"context_line":"                binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1836,"context_line":"                binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1837,"context_line":"                port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1838,"context_line":""},{"line_number":1839,"context_line":"        return allocations, ports_to_heal"},{"line_number":1840,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_680b5a8d","line":1837,"range":{"start_line":1835,"start_character":16,"end_line":1837,"end_character":57},"updated":"2019-06-27 21:36:12.000000000","message":"nit: might be nice to re-use our constants from the neutronv2.api module.\n\nheh - why don\u0027t those actually live in the nova/network/neutron/v2/constants module....","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1832,"context_line":"                # We also need to record the RP we are allocated from in the"},{"line_number":1833,"context_line":"                # port. This will be sent back to Neutron after the allocation"},{"line_number":1834,"context_line":"                # is updated."},{"line_number":1835,"context_line":"                binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1836,"context_line":"                binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1837,"context_line":"                port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1838,"context_line":""},{"line_number":1839,"context_line":"        return allocations, ports_to_heal"},{"line_number":1840,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_bd996e1f","line":1837,"range":{"start_line":1835,"start_character":16,"end_line":1837,"end_character":57},"in_reply_to":"9fb8cfa7_680b5a8d","updated":"2019-07-01 14:49:20.000000000","message":"Let me do that refactor in a follow up.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1844,"context_line":""},{"line_number":1845,"context_line":"    def _port_update_to_openstack_cli(self, ports_to_update):"},{"line_number":1846,"context_line":"        template \u003d (\"openstack port set %(port_id)s \""},{"line_number":1847,"context_line":"                    \"--binding-profile %(binding_profile)s\")"},{"line_number":1848,"context_line":"        result \u003d []"},{"line_number":1849,"context_line":"        for port in ports_to_update:"},{"line_number":1850,"context_line":"            binding_cli \u003d self._port_binding_profile_to_cli_fragment("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_2e7b2ff5","line":1847,"range":{"start_line":1847,"start_character":39,"end_line":1847,"end_character":58},"updated":"2019-06-27 18:50:06.000000000","message":"Should this be quoted?\n\nI guess it\u0027s not necessary.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1844,"context_line":""},{"line_number":1845,"context_line":"    def _port_update_to_openstack_cli(self, ports_to_update):"},{"line_number":1846,"context_line":"        template \u003d (\"openstack port set %(port_id)s \""},{"line_number":1847,"context_line":"                    \"--binding-profile %(binding_profile)s\")"},{"line_number":1848,"context_line":"        result \u003d []"},{"line_number":1849,"context_line":"        for port in ports_to_update:"},{"line_number":1850,"context_line":"            binding_cli \u003d self._port_binding_profile_to_cli_fragment("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7dea1674","line":1847,"range":{"start_line":1847,"start_character":39,"end_line":1847,"end_character":58},"in_reply_to":"9fb8cfa7_2e7b2ff5","updated":"2019-07-01 14:49:20.000000000","message":"checked, not necessary","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1864,"context_line":"                        }"},{"line_number":1865,"context_line":"                    }"},{"line_number":1866,"context_line":"                }"},{"line_number":1867,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1868,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1869,"context_line":"            raise exception.UnableToUpdatePorts("},{"line_number":1870,"context_line":"                error\u003de.message,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_a81ab2c2","line":1867,"updated":"2019-06-27 21:36:12.000000000","message":"Per my comment about verbose output below, I could see this being a useful place to say we\u0027re updating the port binding profile with whatever value here. We do something similar when updating a port binding here:\n\nhttps://github.com/openstack/nova/blob/4363b10f5b9eaa7be2df36a94b6bbad5f4674c57/nova/network/neutronv2/api.py#L3244","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1864,"context_line":"                        }"},{"line_number":1865,"context_line":"                    }"},{"line_number":1866,"context_line":"                }"},{"line_number":1867,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1868,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1869,"context_line":"            raise exception.UnableToUpdatePorts("},{"line_number":1870,"context_line":"                error\u003de.message,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_bd888e70","line":1867,"in_reply_to":"9fb8cfa7_a81ab2c2","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1865,"context_line":"                    }"},{"line_number":1866,"context_line":"                }"},{"line_number":1867,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1868,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1869,"context_line":"            raise exception.UnableToUpdatePorts("},{"line_number":1870,"context_line":"                error\u003de.message,"},{"line_number":1871,"context_line":"                cli\u003dself._port_update_to_openstack_cli(ports_to_update))"},{"line_number":1872,"context_line":""},{"line_number":1873,"context_line":"    def _heal_missing_alloc(self, ctxt, instance, node_cache):"},{"line_number":1874,"context_line":"        node_uuid \u003d self._get_compute_node_uuid("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ced7f3be","line":1871,"range":{"start_line":1868,"start_character":0,"end_line":1871,"end_character":72},"updated":"2019-06-27 18:50:06.000000000","message":"Mm, this is going to make us bail out on the first failure, and print the CLI only for that failure, even though all the remaining updates haven\u0027t been done (because not attempted). The admin will run the CLI and think they\u0027re done, but they\u0027re not.\n\nI think the exception trap should be inside the loop, and we should be collecting CLIs for each failure. Then at the end, if we collected any, we can raise the UnableToUpdatePorts exception containing *all* of the CLIs.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1865,"context_line":"                    }"},{"line_number":1866,"context_line":"                }"},{"line_number":1867,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1868,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1869,"context_line":"            raise exception.UnableToUpdatePorts("},{"line_number":1870,"context_line":"                error\u003de.message,"},{"line_number":1871,"context_line":"                cli\u003dself._port_update_to_openstack_cli(ports_to_update))"},{"line_number":1872,"context_line":""},{"line_number":1873,"context_line":"    def _heal_missing_alloc(self, ctxt, instance, node_cache):"},{"line_number":1874,"context_line":"        node_uuid \u003d self._get_compute_node_uuid("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_bdd64e4e","line":1871,"range":{"start_line":1868,"start_character":0,"end_line":1871,"end_character":72},"in_reply_to":"9fb8cfa7_481696aa","updated":"2019-07-01 14:49:20.000000000","message":"This will bail out at the first failure, BUT it will print all the necessary neutron port update commands as it works based on whole ports_to_update list. \nActually it asks the admin to update such ports that was successfully updated by the for loop before the failure. Which is not a problem as we ask the operation is idempotent.\n\nThis code is changed to implement automatic rollback attempt instead.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1865,"context_line":"                    }"},{"line_number":1866,"context_line":"                }"},{"line_number":1867,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1868,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1869,"context_line":"            raise exception.UnableToUpdatePorts("},{"line_number":1870,"context_line":"                error\u003de.message,"},{"line_number":1871,"context_line":"                cli\u003dself._port_update_to_openstack_cli(ports_to_update))"},{"line_number":1872,"context_line":""},{"line_number":1873,"context_line":"    def _heal_missing_alloc(self, ctxt, instance, node_cache):"},{"line_number":1874,"context_line":"        node_uuid \u003d self._get_compute_node_uuid("}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_481696aa","line":1871,"range":{"start_line":1868,"start_character":0,"end_line":1871,"end_character":72},"in_reply_to":"9fb8cfa7_ced7f3be","updated":"2019-06-27 21:36:12.000000000","message":"nice catch","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1903,"context_line":"        :param instance: the instance to check for allocation healing"},{"line_number":1904,"context_line":"        :param node_cache: dict of Instance.node keys to ComputeNode.uuid"},{"line_number":1905,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1906,"context_line":"        :param outout: function that takes a single message for verbose output"},{"line_number":1907,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1908,"context_line":"            to communicate with the Placement service API."},{"line_number":1909,"context_line":"        :param dry_run: Process instances and print output but do not commit"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_881faeb5","line":1906,"range":{"start_line":1906,"start_character":8,"end_line":1906,"end_character":78},"updated":"2019-06-27 21:36:12.000000000","message":"I would suggest passing this through where it could be useful for verbose output if someone is trying to heal a single instance and things aren\u0027t working - the CLI is pretty complicated so having some output along the way would be invaluable for debug.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1903,"context_line":"        :param instance: the instance to check for allocation healing"},{"line_number":1904,"context_line":"        :param node_cache: dict of Instance.node keys to ComputeNode.uuid"},{"line_number":1905,"context_line":"            values; this cache is updated if a new node is processed."},{"line_number":1906,"context_line":"        :param outout: function that takes a single message for verbose output"},{"line_number":1907,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1908,"context_line":"            to communicate with the Placement service API."},{"line_number":1909,"context_line":"        :param dry_run: Process instances and print output but do not commit"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_7d00d666","line":1906,"range":{"start_line":1906,"start_character":8,"end_line":1906,"end_character":78},"in_reply_to":"9fb8cfa7_881faeb5","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1964,"context_line":"            allocations \u003d self._heal_missing_project_and_user_id("},{"line_number":1965,"context_line":"                allocations, instance)"},{"line_number":1966,"context_line":""},{"line_number":1967,"context_line":"        port_allocations, ports_to_update \u003d self._get_port_allocations_to_heal("},{"line_number":1968,"context_line":"            ctxt, instance, node_cache, placement, heal_port_allocations,"},{"line_number":1969,"context_line":"            neutron)"},{"line_number":1970,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_88748eff","line":1967,"updated":"2019-06-27 21:36:12.000000000","message":"As mentioned earlier, I\u0027d not pass heal_port_allocations to the method here and instead just check if heal_port_allocations and if True then call the method else don\u0027t.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1964,"context_line":"            allocations \u003d self._heal_missing_project_and_user_id("},{"line_number":1965,"context_line":"                allocations, instance)"},{"line_number":1966,"context_line":""},{"line_number":1967,"context_line":"        port_allocations, ports_to_update \u003d self._get_port_allocations_to_heal("},{"line_number":1968,"context_line":"            ctxt, instance, node_cache, placement, heal_port_allocations,"},{"line_number":1969,"context_line":"            neutron)"},{"line_number":1970,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ddbc6207","line":1967,"in_reply_to":"9fb8cfa7_88748eff","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1982,"context_line":"                        \u0027instance\u0027: instance.uuid,"},{"line_number":1983,"context_line":"                        \u0027allocations\u0027: allocations})"},{"line_number":1984,"context_line":"            else:"},{"line_number":1985,"context_line":"                resp \u003d placement.put_allocations(ctxt, instance.uuid,"},{"line_number":1986,"context_line":"                                                 allocations)"},{"line_number":1987,"context_line":"                if resp:"},{"line_number":1988,"context_line":"                    # Update any port that has been changed"},{"line_number":1989,"context_line":"                    self._update_ports(neutron, ports_to_update)"},{"line_number":1990,"context_line":"                    output(_(\u0027Successfully %(operation)sd allocations for \u0027"},{"line_number":1991,"context_line":"                             \u0027instance %(instance)s.\u0027) %"},{"line_number":1992,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_48f21672","line":1989,"range":{"start_line":1985,"start_character":16,"end_line":1989,"end_character":64},"updated":"2019-06-27 21:36:12.000000000","message":"As I mentioned before, can we avoid the crazy manual recovery and potential duplicate allocations if the manual recovery was goofed up by swapping the order of operations here such that we attempt the port updates first, then put the allocations and if the allocation update fails, rollback the port updates - that seems a lot easier and cleaner to me than failing and then giving a complicated set of manual steps to heal (which as noted automated tooling wouldn\u0027t be good with parsing).","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1987,"context_line":"                if resp:"},{"line_number":1988,"context_line":"                    # Update any port that has been changed"},{"line_number":1989,"context_line":"                    self._update_ports(neutron, ports_to_update)"},{"line_number":1990,"context_line":"                    output(_(\u0027Successfully %(operation)sd allocations for \u0027"},{"line_number":1991,"context_line":"                             \u0027instance %(instance)s.\u0027) %"},{"line_number":1992,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":1993,"context_line":"                            \u0027instance\u0027: instance.uuid})"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_4e0b038d","line":1990,"range":{"start_line":1990,"start_character":43,"end_line":1990,"end_character":57},"updated":"2019-06-27 18:50:06.000000000","message":"whoah\n\nI know this isn\u0027t you, but this is a horrible untranslatable nightmare.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"36d66d6be65611fd13dd91215c5034a6ddeaa51b","unresolved":false,"context_lines":[{"line_number":1987,"context_line":"                if resp:"},{"line_number":1988,"context_line":"                    # Update any port that has been changed"},{"line_number":1989,"context_line":"                    self._update_ports(neutron, ports_to_update)"},{"line_number":1990,"context_line":"                    output(_(\u0027Successfully %(operation)sd allocations for \u0027"},{"line_number":1991,"context_line":"                             \u0027instance %(instance)s.\u0027) %"},{"line_number":1992,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":1993,"context_line":"                            \u0027instance\u0027: instance.uuid})"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_0e52c235","line":1990,"range":{"start_line":1990,"start_character":43,"end_line":1990,"end_character":57},"in_reply_to":"9fb8cfa7_3d3b3e38","updated":"2019-07-01 16:15:07.000000000","message":"\u003e Eric requested a specific message for each case in\n \u003e https://review.opendev.org/#/c/655459/5/nova/cmd/manage.py@1737\n\nUgh, and then I approved it with the %(...)sd. /me hangs head in shame.\n\nFollowup is fine. We should really have separate messages. We shouldn\u0027t be wishy-washy about \"but we\u0027re probably not translating anyway\" - either we behave properly when using _() or we remove _().","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1987,"context_line":"                if resp:"},{"line_number":1988,"context_line":"                    # Update any port that has been changed"},{"line_number":1989,"context_line":"                    self._update_ports(neutron, ports_to_update)"},{"line_number":1990,"context_line":"                    output(_(\u0027Successfully %(operation)sd allocations for \u0027"},{"line_number":1991,"context_line":"                             \u0027instance %(instance)s.\u0027) %"},{"line_number":1992,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":1993,"context_line":"                            \u0027instance\u0027: instance.uuid})"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_88900eef","line":1990,"range":{"start_line":1990,"start_character":43,"end_line":1990,"end_character":57},"in_reply_to":"9fb8cfa7_4e0b038d","updated":"2019-06-27 21:36:12.000000000","message":"Oh but it was:\n\nhttps://review.opendev.org/#/c/655459/8/nova/cmd/manage.py@1761\n\nIt will end up being \"created\" or \"updated\" in English but yeah it would be hard to translate. However, I\u0027m pretty sure we don\u0027t get translated anymore anyway so...meh?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1987,"context_line":"                if resp:"},{"line_number":1988,"context_line":"                    # Update any port that has been changed"},{"line_number":1989,"context_line":"                    self._update_ports(neutron, ports_to_update)"},{"line_number":1990,"context_line":"                    output(_(\u0027Successfully %(operation)sd allocations for \u0027"},{"line_number":1991,"context_line":"                             \u0027instance %(instance)s.\u0027) %"},{"line_number":1992,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":1993,"context_line":"                            \u0027instance\u0027: instance.uuid})"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3d3b3e38","line":1990,"range":{"start_line":1990,"start_character":43,"end_line":1990,"end_character":57},"in_reply_to":"9fb8cfa7_88900eef","updated":"2019-07-01 14:49:20.000000000","message":"The original distinction between creating and updating the instance allocation for an instance is hard to carry over to the generic function. Originally I wanted to say something generic instead but Eric requested a specific message for each case in https://review.opendev.org/#/c/655459/5/nova/cmd/manage.py@1737\n\nIf we can agree what to do here then I can fix that up in a followup.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2104,"context_line":"               \u0027--max-count has no effect.\u0027)"},{"line_number":2105,"context_line":"    @args(\u0027--skip-port-allocations\u0027, action\u003d\u0027store_true\u0027,"},{"line_number":2106,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2107,"context_line":"          help\u003d\u0027Skip the healing of the resource allocaton of bound ports. \u0027"},{"line_number":2108,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2109,"context_line":"               \u0027minimum QoS policy rules attached.\u0027)"},{"line_number":2110,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_cec29385","line":2107,"range":{"start_line":2107,"start_character":49,"end_line":2107,"end_character":58},"updated":"2019-06-27 18:50:06.000000000","message":"allocation (or allocations might be better)","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2104,"context_line":"               \u0027--max-count has no effect.\u0027)"},{"line_number":2105,"context_line":"    @args(\u0027--skip-port-allocations\u0027, action\u003d\u0027store_true\u0027,"},{"line_number":2106,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2107,"context_line":"          help\u003d\u0027Skip the healing of the resource allocaton of bound ports. \u0027"},{"line_number":2108,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2109,"context_line":"               \u0027minimum QoS policy rules attached.\u0027)"},{"line_number":2110,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_dd730215","line":2107,"range":{"start_line":2107,"start_character":49,"end_line":2107,"end_character":58},"in_reply_to":"9fb8cfa7_cec29385","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":2106,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2107,"context_line":"          help\u003d\u0027Skip the healing of the resource allocaton of bound ports. \u0027"},{"line_number":2108,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2109,"context_line":"               \u0027minimum QoS policy rules attached.\u0027)"},{"line_number":2110,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"},{"line_number":2111,"context_line":"                         instance_uuid\u003dNone, skip_port_allocations\u003dFalse):"},{"line_number":2112,"context_line":"        \"\"\"Heals instance allocations in the Placement service"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_c8b36697","line":2109,"updated":"2019-06-27 21:36:12.000000000","message":"Like I mentioned in the CLI docs, I\u0027d probably mention something in here about specifying this for performance reasons if your deployment does not provide these types of ports to your users. But maybe that\u0027s good enough in the CLI docs? Worst case is someone doesn\u0027t specify this but it doesn\u0027t do any harm (besides extra traffic on the neutron API).","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2106,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2107,"context_line":"          help\u003d\u0027Skip the healing of the resource allocaton of bound ports. \u0027"},{"line_number":2108,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2109,"context_line":"               \u0027minimum QoS policy rules attached.\u0027)"},{"line_number":2110,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"},{"line_number":2111,"context_line":"                         instance_uuid\u003dNone, skip_port_allocations\u003dFalse):"},{"line_number":2112,"context_line":"        \"\"\"Heals instance allocations in the Placement service"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_1d6e7a2e","line":2109,"in_reply_to":"9fb8cfa7_c8b36697","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2210,"context_line":"                try:"},{"line_number":2211,"context_line":"                    num_processed +\u003d self._heal_instances_in_cell("},{"line_number":2212,"context_line":"                        cctxt, limit_per_cell, unlimited, output, placement,"},{"line_number":2213,"context_line":"                        dry_run, instance_uuid, heal_port_allocations, neutron)"},{"line_number":2214,"context_line":"                except exception.ComputeHostNotFound as e:"},{"line_number":2215,"context_line":"                    print(e.format_message())"},{"line_number":2216,"context_line":"                    return 2"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_6e8fe774","line":2213,"range":{"start_line":2213,"start_character":48,"end_line":2213,"end_character":78},"updated":"2019-06-27 18:50:06.000000000","message":"You could have just passed `neutron` and keyed off of its not-None-ness to indicate you\u0027re healing port allocations.\n\nBut this is more explicit.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2210,"context_line":"                try:"},{"line_number":2211,"context_line":"                    num_processed +\u003d self._heal_instances_in_cell("},{"line_number":2212,"context_line":"                        cctxt, limit_per_cell, unlimited, output, placement,"},{"line_number":2213,"context_line":"                        dry_run, instance_uuid, heal_port_allocations, neutron)"},{"line_number":2214,"context_line":"                except exception.ComputeHostNotFound as e:"},{"line_number":2215,"context_line":"                    print(e.format_message())"},{"line_number":2216,"context_line":"                    return 2"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9d634a42","line":2213,"range":{"start_line":2213,"start_character":48,"end_line":2213,"end_character":78},"in_reply_to":"9fb8cfa7_6e8fe774","updated":"2019-07-01 14:49:20.000000000","message":"I like being explicit","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":2219,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2220,"context_line":"                        exception.MoreThanOneResourceProvidersToHealFrom) as e:"},{"line_number":2221,"context_line":"                    print(e.format_message())"},{"line_number":2222,"context_line":"                    return 3"},{"line_number":2223,"context_line":"                except exception.UnableToQueryPorts as e:"},{"line_number":2224,"context_line":"                    print(e.format_message())"},{"line_number":2225,"context_line":"                    return 5"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_286ec2ff","line":2222,"updated":"2019-06-27 21:36:12.000000000","message":"Ack, the description of this case is sufficiently vague for the new exceptions handled here:\n\n* 3: Unable to create (or update) allocations for an instance against its compute node resource provider.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2219,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2220,"context_line":"                        exception.MoreThanOneResourceProvidersToHealFrom) as e:"},{"line_number":2221,"context_line":"                    print(e.format_message())"},{"line_number":2222,"context_line":"                    return 3"},{"line_number":2223,"context_line":"                except exception.UnableToQueryPorts as e:"},{"line_number":2224,"context_line":"                    print(e.format_message())"},{"line_number":2225,"context_line":"                    return 5"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_3d525e68","line":2222,"in_reply_to":"9fb8cfa7_286ec2ff","updated":"2019-07-01 14:49:20.000000000","message":"More exception ended up here. Is it OK to report exit code 3 if placement cannot be reached?","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1673,"context_line":"        \"\"\""},{"line_number":1674,"context_line":"        try:"},{"line_number":1675,"context_line":"            return neutron.list_ports("},{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("},{"line_number":1679,"context_line":"                instance_uuid\u003dinstance.uuid, error\u003dsix.text_type(e))"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a6b4e636","line":1676,"updated":"2019-07-03 16:49:22.000000000","message":"nit: As mentioned before, if we want to restrict to just a small set of fields (id, resource_request, binding:profile) we can do that with the fields kwarg like this:\n\nhttps://github.com/openstack/nova/blob/3c5aec113c4f2e6e5811b3e9be333c80fb740ad8/nova/network/neutronv2/api.py#L2244","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1673,"context_line":"        \"\"\""},{"line_number":1674,"context_line":"        try:"},{"line_number":1675,"context_line":"            return neutron.list_ports("},{"line_number":1676,"context_line":"                ctxt, device_id\u003dinstance.uuid)[\u0027ports\u0027]"},{"line_number":1677,"context_line":"        except neutron_client_exc.NeutronClientException as e:"},{"line_number":1678,"context_line":"            raise exception.UnableToQueryPorts("},{"line_number":1679,"context_line":"                instance_uuid\u003dinstance.uuid, error\u003dsix.text_type(e))"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_d4abbe72","line":1676,"in_reply_to":"7faddb67_a6b4e636","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1683,"context_line":"        \"\"\"Find the RPs that have all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_26823651","line":1686,"range":{"start_line":1686,"start_character":54,"end_line":1686,"end_character":57},"updated":"2019-07-03 16:49:22.000000000","message":"to","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1683,"context_line":"        \"\"\"Find the RPs that have all the required traits in the given rp tree."},{"line_number":1684,"context_line":""},{"line_number":1685,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_54b10ee1","line":1686,"range":{"start_line":1686,"start_character":54,"end_line":1686,"end_character":57},"in_reply_to":"7faddb67_26823651","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1685,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1690,"context_line":"            to communicate with the Placement service API."},{"line_number":1691,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_c6db227b","line":1688,"range":{"start_line":1688,"start_character":25,"end_line":1688,"end_character":27},"updated":"2019-07-03 16:49:22.000000000","message":"resource providers (plural - and I prefer spelling it out if possible)","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1685,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1686,"context_line":"        :param rp_uuid: the RP uuid that will be used the query the tree."},{"line_number":1687,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1688,"context_line":"            the returned RP."},{"line_number":1689,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1690,"context_line":"            to communicate with the Placement service API."},{"line_number":1691,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_34c81254","line":1688,"range":{"start_line":1688,"start_character":25,"end_line":1688,"end_character":27},"in_reply_to":"7faddb67_c6db227b","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1695,"context_line":"            in the tree for the provider rp_uuid."},{"line_number":1696,"context_line":"        \"\"\""},{"line_number":1697,"context_line":"        try:"},{"line_number":1698,"context_line":"            rps \u003d placement.get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1699,"context_line":"            rps_with_traits \u003d {"},{"line_number":1700,"context_line":"                rp[\u0027uuid\u0027]:"},{"line_number":1701,"context_line":"                    placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_c6c48295","line":1698,"range":{"start_line":1698,"start_character":28,"end_line":1698,"end_character":49},"updated":"2019-07-03 16:49:22.000000000","message":"nit: this could also raise ResourceProviderRetrievalFailed which isn\u0027t in your docstring","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1695,"context_line":"            in the tree for the provider rp_uuid."},{"line_number":1696,"context_line":"        \"\"\""},{"line_number":1697,"context_line":"        try:"},{"line_number":1698,"context_line":"            rps \u003d placement.get_providers_in_tree(ctxt, rp_uuid)"},{"line_number":1699,"context_line":"            rps_with_traits \u003d {"},{"line_number":1700,"context_line":"                rp[\u0027uuid\u0027]:"},{"line_number":1701,"context_line":"                    placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_941306b9","line":1698,"range":{"start_line":1698,"start_character":28,"end_line":1698,"end_character":49},"in_reply_to":"7faddb67_c6c48295","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1705,"context_line":""},{"line_number":1706,"context_line":"        matching_rps \u003d ["},{"line_number":1707,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"},{"line_number":1708,"context_line":"                if set(required_traits).issubset(set(provided_traits))]"},{"line_number":1709,"context_line":""},{"line_number":1710,"context_line":"        return matching_rps"},{"line_number":1711,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_26455618","line":1708,"range":{"start_line":1708,"start_character":49,"end_line":1708,"end_character":52},"updated":"2019-07-03 16:49:22.000000000","message":"This is unnecessary since provided_traits is already a set:\n\nhttps://github.com/openstack/nova/blob/3c5aec113c4f2e6e5811b3e9be333c80fb740ad8/nova/scheduler/client/report.py#L393","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1705,"context_line":""},{"line_number":1706,"context_line":"        matching_rps \u003d ["},{"line_number":1707,"context_line":"            uuid for uuid, provided_traits in rps_with_traits.items()"},{"line_number":1708,"context_line":"                if set(required_traits).issubset(set(provided_traits))]"},{"line_number":1709,"context_line":""},{"line_number":1710,"context_line":"        return matching_rps"},{"line_number":1711,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_74120ab4","line":1708,"range":{"start_line":1708,"start_character":49,"end_line":1708,"end_character":52},"in_reply_to":"7faddb67_26455618","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1739,"context_line":""},{"line_number":1740,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1741,"context_line":"        :param node_uuid: the ComputeNode uuid the instance is running on."},{"line_number":1742,"context_line":"        :param port: the port dict returned from neturon"},{"line_number":1743,"context_line":"        :param instance_uuid: The uuid of the instance the port is bound to"},{"line_number":1744,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1745,"context_line":"            to communicate with the Placement service API."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_8653eae2","line":1742,"range":{"start_line":1742,"start_character":49,"end_line":1742,"end_character":56},"updated":"2019-07-03 16:49:22.000000000","message":"neutron","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1739,"context_line":""},{"line_number":1740,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1741,"context_line":"        :param node_uuid: the ComputeNode uuid the instance is running on."},{"line_number":1742,"context_line":"        :param port: the port dict returned from neturon"},{"line_number":1743,"context_line":"        :param instance_uuid: The uuid of the instance the port is bound to"},{"line_number":1744,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1745,"context_line":"            to communicate with the Placement service API."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_3424920d","line":1742,"range":{"start_line":1742,"start_character":49,"end_line":1742,"end_character":56},"in_reply_to":"7faddb67_8653eae2","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1745,"context_line":"            to communicate with the Placement service API."},{"line_number":1746,"context_line":"        :param output: function that takes a single message for verbose output"},{"line_number":1747,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1748,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1749,"context_line":"            trait information cannot be read from placement."},{"line_number":1750,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"},{"line_number":1751,"context_line":"            unambiguously which resource provider to heal from."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a669468c","line":1748,"updated":"2019-07-03 16:49:22.000000000","message":"ResourceProviderRetrievalFailed as well if you\u0027re being detailed.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1745,"context_line":"            to communicate with the Placement service API."},{"line_number":1746,"context_line":"        :param output: function that takes a single message for verbose output"},{"line_number":1747,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1748,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1749,"context_line":"            trait information cannot be read from placement."},{"line_number":1750,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"},{"line_number":1751,"context_line":"            unambiguously which resource provider to heal from."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_d4209efa","line":1748,"in_reply_to":"7faddb67_a669468c","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1753,"context_line":"            found to heal from."},{"line_number":1754,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"},{"line_number":1755,"context_line":"            by RP uuid to be included in the instance allocation dict. The"},{"line_number":1756,"context_line":"            second item rp_uuid to heal from."},{"line_number":1757,"context_line":"        \"\"\""},{"line_number":1758,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1759,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027],"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e6f41ed2","line":1756,"range":{"start_line":1756,"start_character":23,"end_line":1756,"end_character":24},"updated":"2019-07-03 16:49:22.000000000","message":"is the?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1753,"context_line":"            found to heal from."},{"line_number":1754,"context_line":"        :return: A two tuple where the first item is a dict of resources keyed"},{"line_number":1755,"context_line":"            by RP uuid to be included in the instance allocation dict. The"},{"line_number":1756,"context_line":"            second item rp_uuid to heal from."},{"line_number":1757,"context_line":"        \"\"\""},{"line_number":1758,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1759,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027],"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_b47de2e8","line":1756,"range":{"start_line":1756,"start_character":23,"end_line":1756,"end_character":24},"in_reply_to":"7faddb67_e6f41ed2","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1756,"context_line":"            second item rp_uuid to heal from."},{"line_number":1757,"context_line":"        \"\"\""},{"line_number":1758,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1759,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1760,"context_line":"            placement)"},{"line_number":1761,"context_line":""},{"line_number":1762,"context_line":"        if len(matching_rp_uuids) \u003e 1:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e61dfe0f","line":1759,"range":{"start_line":1759,"start_character":53,"end_line":1759,"end_character":65},"updated":"2019-07-03 16:49:22.000000000","message":"Random question but if the port has the resource_request field, is \"required\" guaranteed on the neutron side to be not empty? The API reference doesn\u0027t explain:\n\nhttps://developer.openstack.org/api-ref/network/v2/index.html#port-resource-request\n\nThe description of the resource_request parameter says:\n\n\"Expose Placement resources (i.e.: minimum-bandwidth) and traits (i.e.: vnic-type, physnet) requested by a port to Nova and Placement. A resource_request object contains a required key for the traits (generated from the vnic_type and the physnet) required by the port, and a resources key for ingress and egress minimum-bandwidth need for the port.\"\n\nIt doesn\u0027t say that \"required\" has to be populated though.\n\nI guess if something goes wrong there it\u0027s a misconfiguration in neutron and we\u0027ll raise NoResourceProviderToHealFrom which tells the operator to check their neutron config and links to the docs.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1756,"context_line":"            second item rp_uuid to heal from."},{"line_number":1757,"context_line":"        \"\"\""},{"line_number":1758,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1759,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1760,"context_line":"            placement)"},{"line_number":1761,"context_line":""},{"line_number":1762,"context_line":"        if len(matching_rp_uuids) \u003e 1:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_74d52ab9","line":1759,"range":{"start_line":1759,"start_character":53,"end_line":1759,"end_character":65},"in_reply_to":"7faddb67_e61dfe0f","updated":"2019-07-04 13:24:26.000000000","message":"as vnic_type is represented as a trait, and vnic_type is a mandatory field of the port, I assume that at least a CUSTOM_VNIC_TYPE_XXX trait is present. Anyhow I change the code to be prepared if no \u0027resources\u0027 or no \u0027required\u0027 keys exists in \u0027resource_request\u0027. \n\nIf the resources is empty then I think it is a neutron bug. But this code will fail as it will try to put empty allocation for a rp_uuid in placement and that is not accepted.\n\nif the required is empty then I also think it is a neturon but. But depending on how many RPs are in a given compute tree the code might found too much, not any, or exactly one RP. In the exactly one case it is the compute RP so the placement allocation update will fail as there won\u0027t be any bandwidth inventory.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1784,"context_line":""},{"line_number":1785,"context_line":"        # len(matching_rps) \u003d\u003d 1"},{"line_number":1786,"context_line":"        # We found one RP that matches the traits. Assume that we can allocate"},{"line_number":1787,"context_line":"        # the resources from it. If there are not enough inventory left on the"},{"line_number":1788,"context_line":"        # RP then the PUT /allocations placement call will detect that."},{"line_number":1789,"context_line":"        rp_uuid \u003d matching_rp_uuids[0]"},{"line_number":1790,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e6cbbe6f","line":1787,"range":{"start_line":1787,"start_character":42,"end_line":1787,"end_character":45},"updated":"2019-07-03 16:49:22.000000000","message":"nit: is","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1784,"context_line":""},{"line_number":1785,"context_line":"        # len(matching_rps) \u003d\u003d 1"},{"line_number":1786,"context_line":"        # We found one RP that matches the traits. Assume that we can allocate"},{"line_number":1787,"context_line":"        # the resources from it. If there are not enough inventory left on the"},{"line_number":1788,"context_line":"        # RP then the PUT /allocations placement call will detect that."},{"line_number":1789,"context_line":"        rp_uuid \u003d matching_rp_uuids[0]"},{"line_number":1790,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_949466ea","line":1787,"range":{"start_line":1787,"start_character":42,"end_line":1787,"end_character":45},"in_reply_to":"7faddb67_e6cbbe6f","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1812,"context_line":"        :raise nova.exception.ComputeHostNotFound: if compute node of the"},{"line_number":1813,"context_line":"            instance not found in the db."},{"line_number":1814,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1815,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1816,"context_line":"            trait information cannot be read from placement."},{"line_number":1817,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"},{"line_number":1818,"context_line":"            unambiguously which resource provider to heal from."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_c693225c","line":1815,"updated":"2019-07-03 16:49:22.000000000","message":"ResourceProviderRetrievalFailed as well if you\u0027re being detailed.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1812,"context_line":"        :raise nova.exception.ComputeHostNotFound: if compute node of the"},{"line_number":1813,"context_line":"            instance not found in the db."},{"line_number":1814,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1815,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1816,"context_line":"            trait information cannot be read from placement."},{"line_number":1817,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"},{"line_number":1818,"context_line":"            unambiguously which resource provider to heal from."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_549eee07","line":1815,"in_reply_to":"7faddb67_c693225c","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1823,"context_line":"            second item is a list of port dicts to be updated in Neutron."},{"line_number":1824,"context_line":"        \"\"\""},{"line_number":1825,"context_line":"        # We need to heal port allocations for ports that have resource_request"},{"line_number":1826,"context_line":"        # but do not have an RP uuid in the binding:profile.allocation field"},{"line_number":1827,"context_line":"        # We cannot use the instance info_cache to check the binding profile"},{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a69aa66b","line":1826,"range":{"start_line":1826,"start_character":71,"end_line":1826,"end_character":76},"updated":"2019-07-03 16:49:22.000000000","message":"nit: field.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1823,"context_line":"            second item is a list of port dicts to be updated in Neutron."},{"line_number":1824,"context_line":"        \"\"\""},{"line_number":1825,"context_line":"        # We need to heal port allocations for ports that have resource_request"},{"line_number":1826,"context_line":"        # but do not have an RP uuid in the binding:profile.allocation field"},{"line_number":1827,"context_line":"        # We cannot use the instance info_cache to check the binding profile"},{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_148876c4","line":1826,"range":{"start_line":1826,"start_character":71,"end_line":1826,"end_character":76},"in_reply_to":"7faddb67_a69aa66b","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1825,"context_line":"        # We need to heal port allocations for ports that have resource_request"},{"line_number":1826,"context_line":"        # but do not have an RP uuid in the binding:profile.allocation field"},{"line_number":1827,"context_line":"        # We cannot use the instance info_cache to check the binding profile"},{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1830,"context_line":"        # binding:profile."},{"line_number":1831,"context_line":"        # In theory a port can be assigned to an instance without it is"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_46897224","line":1828,"range":{"start_line":1828,"start_character":61,"end_line":1828,"end_character":64},"updated":"2019-07-03 16:49:22.000000000","message":"were","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1825,"context_line":"        # We need to heal port allocations for ports that have resource_request"},{"line_number":1826,"context_line":"        # but do not have an RP uuid in the binding:profile.allocation field"},{"line_number":1827,"context_line":"        # We cannot use the instance info_cache to check the binding profile"},{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1830,"context_line":"        # binding:profile."},{"line_number":1831,"context_line":"        # In theory a port can be assigned to an instance without it is"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_54b7ce87","line":1828,"range":{"start_line":1828,"start_character":61,"end_line":1828,"end_character":64},"in_reply_to":"7faddb67_46897224","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1830,"context_line":"        # binding:profile."},{"line_number":1831,"context_line":"        # In theory a port can be assigned to an instance without it is"},{"line_number":1832,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1833,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1834,"context_line":"        # are not on any host."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_c67c8222","line":1831,"range":{"start_line":1831,"start_character":69,"end_line":1831,"end_character":71},"updated":"2019-07-03 16:49:22.000000000","message":"being","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1828,"context_line":"        # as this code needs to be able to handle ports that was attached"},{"line_number":1829,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1830,"context_line":"        # binding:profile."},{"line_number":1831,"context_line":"        # In theory a port can be assigned to an instance without it is"},{"line_number":1832,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1833,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1834,"context_line":"        # are not on any host."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_14b15690","line":1831,"range":{"start_line":1831,"start_character":69,"end_line":1831,"end_character":71},"in_reply_to":"7faddb67_c67c8222","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1834,"context_line":"        # are not on any host."},{"line_number":1835,"context_line":"        ports_to_heal \u003d ["},{"line_number":1836,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1837,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1838,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1839,"context_line":""},{"line_number":1840,"context_line":"        if not ports_to_heal:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_66934e51","line":1837,"updated":"2019-07-03 16:49:22.000000000","message":"Per above, do we also need to check that \u0027required\u0027 is in \u0027resource_request\u0027 and not empty?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1834,"context_line":"        # are not on any host."},{"line_number":1835,"context_line":"        ports_to_heal \u003d ["},{"line_number":1836,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1837,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1838,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1839,"context_line":""},{"line_number":1840,"context_line":"        if not ports_to_heal:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_345c924a","line":1837,"in_reply_to":"7faddb67_66934e51","updated":"2019-07-04 13:24:26.000000000","message":"if there is resource_request then it is there because there is some resources under it. As the neutron doc is not specifying it I agree that I need to go to full defensive mode.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1835,"context_line":"        ports_to_heal \u003d ["},{"line_number":1836,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1837,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1838,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1839,"context_line":""},{"line_number":1840,"context_line":"        if not ports_to_heal:"},{"line_number":1841,"context_line":"            # nothing to do, return early"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_06b13ae2","line":1838,"range":{"start_line":1838,"start_character":29,"end_line":1838,"end_character":46},"updated":"2019-07-03 16:49:22.000000000","message":"Assuming this can\u0027t be None otherwise we\u0027ll get a NoneType error here.\n\nActually it looks like we need to handle it being None (I knew I\u0027d seen this before):\n\nhttps://review.opendev.org/#/c/504260/","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1835,"context_line":"        ports_to_heal \u003d ["},{"line_number":1836,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"},{"line_number":1837,"context_line":"            if (port.get(\u0027resource_request\u0027) and"},{"line_number":1838,"context_line":"                not port.get(\u0027binding:profile\u0027, {}).get(\u0027allocation\u0027))]"},{"line_number":1839,"context_line":""},{"line_number":1840,"context_line":"        if not ports_to_heal:"},{"line_number":1841,"context_line":"            # nothing to do, return early"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_b7888471","line":1838,"range":{"start_line":1838,"start_character":29,"end_line":1838,"end_character":46},"in_reply_to":"7faddb67_06b13ae2","updated":"2019-07-04 13:24:26.000000000","message":"Good catch. Done.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1851,"context_line":"            allocations \u003d self._merge_allocations("},{"line_number":1852,"context_line":"                allocations, port_allocation)"},{"line_number":1853,"context_line":"            # We also need to record the RP we are allocated from in the"},{"line_number":1854,"context_line":"            # port. This will be sent back to Neutron after the allocation"},{"line_number":1855,"context_line":"            # is updated."},{"line_number":1856,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a6ef06af","line":1854,"range":{"start_line":1854,"start_character":54,"end_line":1854,"end_character":59},"updated":"2019-07-03 16:49:22.000000000","message":"I thought we were going to update the port\u0027s binding:profile with the allocation *before* trying to PUT the allocations in case the latter fails and we need to rollback?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1851,"context_line":"            allocations \u003d self._merge_allocations("},{"line_number":1852,"context_line":"                allocations, port_allocation)"},{"line_number":1853,"context_line":"            # We also need to record the RP we are allocated from in the"},{"line_number":1854,"context_line":"            # port. This will be sent back to Neutron after the allocation"},{"line_number":1855,"context_line":"            # is updated."},{"line_number":1856,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_57f27008","line":1854,"range":{"start_line":1854,"start_character":54,"end_line":1854,"end_character":59},"in_reply_to":"7faddb67_a6ef06af","updated":"2019-07-04 13:24:26.000000000","message":"Old comment. Done.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1853,"context_line":"            # We also need to record the RP we are allocated from in the"},{"line_number":1854,"context_line":"            # port. This will be sent back to Neutron after the allocation"},{"line_number":1855,"context_line":"            # is updated."},{"line_number":1856,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1858,"context_line":"            port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1859,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_0634da49","line":1856,"range":{"start_line":1856,"start_character":30,"end_line":1856,"end_character":61},"updated":"2019-07-03 16:49:22.000000000","message":"If we handle binding:profile being set to None in the port then we won\u0027t get here, but just a reminder.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1853,"context_line":"            # We also need to record the RP we are allocated from in the"},{"line_number":1854,"context_line":"            # port. This will be sent back to Neutron after the allocation"},{"line_number":1855,"context_line":"            # is updated."},{"line_number":1856,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {})"},{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1858,"context_line":"            port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1859,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_37d9f482","line":1856,"range":{"start_line":1856,"start_character":30,"end_line":1856,"end_character":61},"in_reply_to":"7faddb67_0634da49","updated":"2019-07-04 13:24:26.000000000","message":"need to keep it here as I did not update the port itself above.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1858,"context_line":"            port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1859,"context_line":""},{"line_number":1860,"context_line":"            output(_(\"Found resource provider %(rp_uuid)s having matching \""},{"line_number":1861,"context_line":"                     \"traits for port %(port_uuid)s with resource request \""},{"line_number":1862,"context_line":"                     \"%(request)s.\") %"},{"line_number":1863,"context_line":"                     {\"rp_uuid\": rp_uuid, \"port_uuid\": port[\"id\"],"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_c63d622d","line":1860,"updated":"2019-07-03 16:49:22.000000000","message":"nit: would probably be good to work the instance uuid into this somehow for context since we could be iterating over lots of instances.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1857,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1858,"context_line":"            port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1859,"context_line":""},{"line_number":1860,"context_line":"            output(_(\"Found resource provider %(rp_uuid)s having matching \""},{"line_number":1861,"context_line":"                     \"traits for port %(port_uuid)s with resource request \""},{"line_number":1862,"context_line":"                     \"%(request)s.\") %"},{"line_number":1863,"context_line":"                     {\"rp_uuid\": rp_uuid, \"port_uuid\": port[\"id\"],"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_17739882","line":1860,"in_reply_to":"7faddb67_c63d622d","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1890,"context_line":"            # back the partial updates so the admin can retry the healing."},{"line_number":1891,"context_line":"            raise exception.UnableToUpdatePorts(error\u003dsix.text_type(e))"},{"line_number":1892,"context_line":""},{"line_number":1893,"context_line":"    def _rollback_port_updates(self, neutron, ports_to_update, output):"},{"line_number":1894,"context_line":"        # _update_ports() added the allocation key to these ports, so we need"},{"line_number":1895,"context_line":"        # to remove them during the rollback."},{"line_number":1896,"context_line":"        manual_rollback_needed \u003d []"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_467892f7","line":1893,"range":{"start_line":1893,"start_character":55,"end_line":1893,"end_character":61},"updated":"2019-07-03 16:49:22.000000000","message":"nit: ports_to_rollback?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1890,"context_line":"            # back the partial updates so the admin can retry the healing."},{"line_number":1891,"context_line":"            raise exception.UnableToUpdatePorts(error\u003dsix.text_type(e))"},{"line_number":1892,"context_line":""},{"line_number":1893,"context_line":"    def _rollback_port_updates(self, neutron, ports_to_update, output):"},{"line_number":1894,"context_line":"        # _update_ports() added the allocation key to these ports, so we need"},{"line_number":1895,"context_line":"        # to remove them during the rollback."},{"line_number":1896,"context_line":"        manual_rollback_needed \u003d []"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_970468c8","line":1893,"range":{"start_line":1893,"start_character":55,"end_line":1893,"end_character":61},"in_reply_to":"7faddb67_467892f7","updated":"2019-07-04 13:24:26.000000000","message":"copy pasta. Done.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1896,"context_line":"        manual_rollback_needed \u003d []"},{"line_number":1897,"context_line":"        last_exc \u003d None"},{"line_number":1898,"context_line":"        for port in ports_to_update:"},{"line_number":1899,"context_line":"            profile \u003d {key: value"},{"line_number":1900,"context_line":"                       for key, value in port[\u0027binding:profile\u0027].items()"},{"line_number":1901,"context_line":"                       if key !\u003d \u0027allocation\u0027}"},{"line_number":1902,"context_line":"            body \u003d {"},{"line_number":1903,"context_line":"                \u0027port\u0027: {"},{"line_number":1904,"context_line":"                    \u0027binding:profile\u0027: profile"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_268796da","line":1901,"range":{"start_line":1899,"start_character":12,"end_line":1901,"end_character":46},"updated":"2019-07-03 16:49:22.000000000","message":"Couldn\u0027t we just do:\n\nprofile \u003d port[\u0027binding:profile\u0027]\nprofile.pop(\u0027allocation\u0027)\n\n?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1896,"context_line":"        manual_rollback_needed \u003d []"},{"line_number":1897,"context_line":"        last_exc \u003d None"},{"line_number":1898,"context_line":"        for port in ports_to_update:"},{"line_number":1899,"context_line":"            profile \u003d {key: value"},{"line_number":1900,"context_line":"                       for key, value in port[\u0027binding:profile\u0027].items()"},{"line_number":1901,"context_line":"                       if key !\u003d \u0027allocation\u0027}"},{"line_number":1902,"context_line":"            body \u003d {"},{"line_number":1903,"context_line":"                \u0027port\u0027: {"},{"line_number":1904,"context_line":"                    \u0027binding:profile\u0027: profile"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_d70180b1","line":1901,"range":{"start_line":1899,"start_character":12,"end_line":1901,"end_character":46},"in_reply_to":"7faddb67_268796da","updated":"2019-07-04 13:24:26.000000000","message":"As this either succeeds and we are done or we fail and we exit. So yes. I can do that.\nDone.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"813267edab6a284754db4aa3e2e45f3533e7592b","unresolved":false,"context_lines":[{"line_number":1896,"context_line":"        manual_rollback_needed \u003d []"},{"line_number":1897,"context_line":"        last_exc \u003d None"},{"line_number":1898,"context_line":"        for port in ports_to_update:"},{"line_number":1899,"context_line":"            profile \u003d {key: value"},{"line_number":1900,"context_line":"                       for key, value in port[\u0027binding:profile\u0027].items()"},{"line_number":1901,"context_line":"                       if key !\u003d \u0027allocation\u0027}"},{"line_number":1902,"context_line":"            body \u003d {"},{"line_number":1903,"context_line":"                \u0027port\u0027: {"},{"line_number":1904,"context_line":"                    \u0027binding:profile\u0027: profile"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_b1a4e2ba","line":1901,"range":{"start_line":1899,"start_character":12,"end_line":1901,"end_character":46},"in_reply_to":"7faddb67_d70180b1","updated":"2019-07-08 21:26:36.000000000","message":"Oh right I guess I wasn\u0027t thinking about modify-by-reference screwing up the port binding back on the port itself.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":1980,"context_line":"            unambiguously which resource provider to heal from."},{"line_number":1981,"context_line":"        :raise NoResourceProviderToHealFrom: if there is no resource provider"},{"line_number":1982,"context_line":"            found to heal from."},{"line_number":1983,"context_line":"        \"\"\""},{"line_number":1984,"context_line":"        if instance.task_state is not None:"},{"line_number":1985,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"},{"line_number":1986,"context_line":"                     \u0027state transition: %(task_state)s\u0027) %"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_26d556ba","line":1983,"updated":"2019-07-03 16:49:22.000000000","message":"UnableToRollbackPortUpdates and UnableToUpdatePorts and ResourceProviderRetrievalFailed could also be in this docstring right?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":1980,"context_line":"            unambiguously which resource provider to heal from."},{"line_number":1981,"context_line":"        :raise NoResourceProviderToHealFrom: if there is no resource provider"},{"line_number":1982,"context_line":"            found to heal from."},{"line_number":1983,"context_line":"        \"\"\""},{"line_number":1984,"context_line":"        if instance.task_state is not None:"},{"line_number":1985,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"},{"line_number":1986,"context_line":"                     \u0027state transition: %(task_state)s\u0027) %"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_97bb08e4","line":1983,"in_reply_to":"7faddb67_26d556ba","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2001,"context_line":"                error\u003d_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2002,"context_line":"        except exception.ConsumerAllocationRetrievalFailed as e:"},{"line_number":2003,"context_line":"            output(_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a6a70650","line":2004,"updated":"2019-07-03 16:49:22.000000000","message":"Hmm, this is also unrelated to this change but not this series. This looks like a regression caused by Iff5b73d8e818fb1145690d0eeff880d98424fa1d because if we failed to get allocations for some reason, we\u0027ll set allocations\u003dNone and then below we\u0027ll overwrite any allocations that actually did exist for the consumer, right? I left comments on the change for that other patch.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"8ab28446adf87a2a44fa1e1b71e9adedecad88fd","unresolved":false,"context_lines":[{"line_number":2001,"context_line":"                error\u003d_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2002,"context_line":"        except exception.ConsumerAllocationRetrievalFailed as e:"},{"line_number":2003,"context_line":"            output(_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_635ecae7","line":2004,"in_reply_to":"7faddb67_17a2b89b","updated":"2019-07-04 15:23:42.000000000","message":"Fix is in https://review.opendev.org/#/c/669188/","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2001,"context_line":"                error\u003d_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2002,"context_line":"        except exception.ConsumerAllocationRetrievalFailed as e:"},{"line_number":2003,"context_line":"            output(_(\"Allocation retrieval failed: %s\") % e)"},{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_17a2b89b","line":2004,"in_reply_to":"7faddb67_a6a70650","updated":"2019-07-04 13:24:26.000000000","message":"I answered in  https://review.opendev.org/#/c/655458/6/nova/cmd/manage.py .\n\nI think your are right that it could be potentially bug. I think it was present before https://review.opendev.org/#/c/655458 . Let me fix it in a separate patch.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"},{"line_number":2008,"context_line":"        # return None if we can\u0027t communicate with Placement, and the"},{"line_number":2009,"context_line":"        # response can have an empty {\u0027allocations\u0027: {}} response if"},{"line_number":2010,"context_line":"        # there are no allocations for the instance so handle both"},{"line_number":2011,"context_line":"        if not allocations or not allocations.get(\u0027allocations\u0027):"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_8691aaf4","line":2008,"range":{"start_line":2007,"start_character":10,"end_line":2008,"end_character":21},"updated":"2019-07-03 16:49:22.000000000","message":"Unrelated but I see this is old - we use get_allocs_for_consumer now which doesn\u0027t use safe_connect.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"8ab28446adf87a2a44fa1e1b71e9adedecad88fd","unresolved":false,"context_lines":[{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"},{"line_number":2008,"context_line":"        # return None if we can\u0027t communicate with Placement, and the"},{"line_number":2009,"context_line":"        # response can have an empty {\u0027allocations\u0027: {}} response if"},{"line_number":2010,"context_line":"        # there are no allocations for the instance so handle both"},{"line_number":2011,"context_line":"        if not allocations or not allocations.get(\u0027allocations\u0027):"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_2370d272","line":2008,"range":{"start_line":2007,"start_character":10,"end_line":2008,"end_character":21},"in_reply_to":"7faddb67_5734b04a","updated":"2019-07-04 15:23:42.000000000","message":"fix is in https://review.opendev.org/#/c/669188/","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2004,"context_line":"            allocations \u003d None"},{"line_number":2005,"context_line":""},{"line_number":2006,"context_line":"        need_healing \u003d False"},{"line_number":2007,"context_line":"        # get_allocations_for_consumer uses safe_connect which will"},{"line_number":2008,"context_line":"        # return None if we can\u0027t communicate with Placement, and the"},{"line_number":2009,"context_line":"        # response can have an empty {\u0027allocations\u0027: {}} response if"},{"line_number":2010,"context_line":"        # there are no allocations for the instance so handle both"},{"line_number":2011,"context_line":"        if not allocations or not allocations.get(\u0027allocations\u0027):"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_5734b04a","line":2008,"range":{"start_line":2007,"start_character":10,"end_line":2008,"end_character":21},"in_reply_to":"7faddb67_8691aaf4","updated":"2019-07-04 13:24:26.000000000","message":"Let me fix this up in a separate patch.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2047,"context_line":"            else:"},{"line_number":2048,"context_line":"                # First update ports in neutron. If any of those operations"},{"line_number":2049,"context_line":"                # fail, then roll back the successful part of it and fail the"},{"line_number":2050,"context_line":"                # healing."},{"line_number":2051,"context_line":"                self._update_ports(neutron, ports_to_update, output)"},{"line_number":2052,"context_line":""},{"line_number":2053,"context_line":"                # Now that neutron update succeeded we can try to update"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_a6f9464e","line":2050,"range":{"start_line":2050,"start_character":25,"end_line":2050,"end_character":26},"updated":"2019-07-03 16:49:22.000000000","message":"nit: you might add, \"We do this first because rolling back the port updates is more straight-forward than rolling back allocation changes.\"","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2047,"context_line":"            else:"},{"line_number":2048,"context_line":"                # First update ports in neutron. If any of those operations"},{"line_number":2049,"context_line":"                # fail, then roll back the successful part of it and fail the"},{"line_number":2050,"context_line":"                # healing."},{"line_number":2051,"context_line":"                self._update_ports(neutron, ports_to_update, output)"},{"line_number":2052,"context_line":""},{"line_number":2053,"context_line":"                # Now that neutron update succeeded we can try to update"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_d74740bc","line":2050,"range":{"start_line":2050,"start_character":25,"end_line":2050,"end_character":26},"in_reply_to":"7faddb67_a6f9464e","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2111,"context_line":"            unambiguously which resource provider to heal from."},{"line_number":2112,"context_line":"        :raise NoResourceProviderToHealFrom: if there is no resource provider"},{"line_number":2113,"context_line":"            found to heal from."},{"line_number":2114,"context_line":"        \"\"\""},{"line_number":2115,"context_line":"        # Keep a cache of instance.node to compute node resource provider UUID."},{"line_number":2116,"context_line":"        # This will save some queries for non-ironic instances to the"},{"line_number":2117,"context_line":"        # compute_nodes table."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e6209eb3","line":2114,"updated":"2019-07-03 16:49:22.000000000","message":"UnableToRollbackPortUpdates and UnableToUpdatePorts and ResourceProviderRetrievalFailed could also be in this docstring right?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2111,"context_line":"            unambiguously which resource provider to heal from."},{"line_number":2112,"context_line":"        :raise NoResourceProviderToHealFrom: if there is no resource provider"},{"line_number":2113,"context_line":"            found to heal from."},{"line_number":2114,"context_line":"        \"\"\""},{"line_number":2115,"context_line":"        # Keep a cache of instance.node to compute node resource provider UUID."},{"line_number":2116,"context_line":"        # This will save some queries for non-ironic instances to the"},{"line_number":2117,"context_line":"        # compute_nodes table."}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_f744bcb9","line":2114,"in_reply_to":"7faddb67_e6209eb3","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2191,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2192,"context_line":"          help\u003d\u0027Skip the healing of the resource allocations of bound ports. \u0027"},{"line_number":2193,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2194,"context_line":"               \u0027minimum QoS policy rules attached. If your deployment does\u0027"},{"line_number":2195,"context_line":"               \u0027not use such feature then the performance impact of querying \u0027"},{"line_number":2196,"context_line":"               \u0027neutron ports for each instance can be avoided with this \u0027"},{"line_number":2197,"context_line":"               \u0027flag.\u0027)"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_86068a20","line":2194,"range":{"start_line":2194,"start_character":74,"end_line":2194,"end_character":75},"updated":"2019-07-03 16:49:22.000000000","message":"space","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2191,"context_line":"          dest\u003d\u0027skip_port_allocations\u0027, default\u003dFalse,"},{"line_number":2192,"context_line":"          help\u003d\u0027Skip the healing of the resource allocations of bound ports. \u0027"},{"line_number":2193,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2194,"context_line":"               \u0027minimum QoS policy rules attached. If your deployment does\u0027"},{"line_number":2195,"context_line":"               \u0027not use such feature then the performance impact of querying \u0027"},{"line_number":2196,"context_line":"               \u0027neutron ports for each instance can be avoided with this \u0027"},{"line_number":2197,"context_line":"               \u0027flag.\u0027)"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_971308b9","line":2194,"range":{"start_line":2194,"start_character":74,"end_line":2194,"end_character":75},"in_reply_to":"7faddb67_86068a20","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2307,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2308,"context_line":"                        exception.MoreThanOneResourceProviderToHealFrom,"},{"line_number":2309,"context_line":"                        exception.PlacementAPIConnectFailure,"},{"line_number":2310,"context_line":"                        exception.ResourceProviderTraitRetrievalFailed) as e:"},{"line_number":2311,"context_line":"                    print(e.format_message())"},{"line_number":2312,"context_line":"                    return 3"},{"line_number":2313,"context_line":"                except exception.UnableToQueryPorts as e:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_290d2937","line":2310,"updated":"2019-07-03 16:49:22.000000000","message":"ResourceProviderRetrievalFailed I think as well right?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2307,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2308,"context_line":"                        exception.MoreThanOneResourceProviderToHealFrom,"},{"line_number":2309,"context_line":"                        exception.PlacementAPIConnectFailure,"},{"line_number":2310,"context_line":"                        exception.ResourceProviderTraitRetrievalFailed) as e:"},{"line_number":2311,"context_line":"                    print(e.format_message())"},{"line_number":2312,"context_line":"                    return 3"},{"line_number":2313,"context_line":"                except exception.UnableToQueryPorts as e:"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_77120cb4","line":2310,"in_reply_to":"7faddb67_290d2937","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1686,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1687,"context_line":"        :param rp_uuid: the RP uuid that will be used to query the tree."},{"line_number":1688,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1689,"context_line":"            the returned resource provider."},{"line_number":1690,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1691,"context_line":"            to communicate with the Placement service API."},{"line_number":1692,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_91dd2616","line":1689,"range":{"start_line":1689,"start_character":34,"end_line":1689,"end_character":42},"updated":"2019-07-08 21:47:42.000000000","message":"nit: providers (plural)","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1686,"context_line":"        :param ctxt: nova.context.RequestContext"},{"line_number":1687,"context_line":"        :param rp_uuid: the RP uuid that will be used to query the tree."},{"line_number":1688,"context_line":"        :param required_traits: the traits that need to be supported by"},{"line_number":1689,"context_line":"            the returned resource provider."},{"line_number":1690,"context_line":"        :param placement: nova.scheduler.client.report.SchedulerReportClient"},{"line_number":1691,"context_line":"            to communicate with the Placement service API."},{"line_number":1692,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_88ae5396","line":1689,"range":{"start_line":1689,"start_character":34,"end_line":1689,"end_character":42},"in_reply_to":"7faddb67_91dd2616","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1761,"context_line":"            second item is the rp_uuid to heal from."},{"line_number":1762,"context_line":"        \"\"\""},{"line_number":1763,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027].get(\u0027required\u0027, []),"},{"line_number":1765,"context_line":"            placement)"},{"line_number":1766,"context_line":""},{"line_number":1767,"context_line":"        if len(matching_rp_uuids) \u003e 1:"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_d1487ecd","line":1764,"range":{"start_line":1764,"start_character":70,"end_line":1764,"end_character":72},"updated":"2019-07-08 21:47:42.000000000","message":"OK so if we hit this, _get_rps_in_tree_with_required_traits will return the whole tree since the empty set is a subset of the traits on all providers in the tree. If that returns more than one provider we\u0027ll fail with MoreThanOneResourceProviderToHealFrom. If it returns just the single compute node resource provider (no nested providers) then we\u0027ll choose that for the allocation on the port and PUT /allocations/{consumer_id} may fail if we\u0027d be posting no resources for that provider, right? Unless we\u0027re healing other resources on that compute node provider for VCPU/MEMORY_MB/DISK_GB and we just don\u0027t bother with the port allocation - but then the port isn\u0027t really allocated to the compute node provider right?\n\nI\u0027m just wondering if we could have weird side effects if the port has a resource_request with no resources (or required traits) - should we just explicitly fail in that case with something like NoResourceProviderToHealFrom?","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1761,"context_line":"            second item is the rp_uuid to heal from."},{"line_number":1762,"context_line":"        \"\"\""},{"line_number":1763,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027].get(\u0027required\u0027, []),"},{"line_number":1765,"context_line":"            placement)"},{"line_number":1766,"context_line":""},{"line_number":1767,"context_line":"        if len(matching_rp_uuids) \u003e 1:"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_e8f027cc","line":1764,"range":{"start_line":1764,"start_character":70,"end_line":1764,"end_character":72},"in_reply_to":"7faddb67_d1487ecd","updated":"2019-07-09 07:50:12.000000000","message":"I will prevent this as you suggested below by filtering for both resources and required in the port","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1795,"context_line":""},{"line_number":1796,"context_line":"        port_allocation \u003d {"},{"line_number":1797,"context_line":"            rp_uuid: {"},{"line_number":1798,"context_line":"                \u0027resources\u0027: port[\u0027resource_request\u0027].get(\u0027resources\u0027, {})"},{"line_number":1799,"context_line":"            }"},{"line_number":1800,"context_line":"        }"},{"line_number":1801,"context_line":"        return port_allocation, rp_uuid"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_9162063e","line":1798,"range":{"start_line":1798,"start_character":71,"end_line":1798,"end_character":73},"updated":"2019-07-08 21:47:42.000000000","message":"Per above, should we just fail explicitly if we hit this? I worry about merging the port allocations empty resources dict with the potentially other resources from the flavor on the root compute node resource provider (if \"required\" is empty).","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1795,"context_line":""},{"line_number":1796,"context_line":"        port_allocation \u003d {"},{"line_number":1797,"context_line":"            rp_uuid: {"},{"line_number":1798,"context_line":"                \u0027resources\u0027: port[\u0027resource_request\u0027].get(\u0027resources\u0027, {})"},{"line_number":1799,"context_line":"            }"},{"line_number":1800,"context_line":"        }"},{"line_number":1801,"context_line":"        return port_allocation, rp_uuid"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_e877c739","line":1798,"range":{"start_line":1798,"start_character":71,"end_line":1798,"end_character":73},"in_reply_to":"7faddb67_9162063e","updated":"2019-07-09 07:50:12.000000000","message":"due to has_reques_but_no_allocation() filter this could not happen.","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1840,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1841,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1842,"context_line":"        # are not on any host."},{"line_number":1843,"context_line":"        def has_reques_but_no_allocation(port):"},{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_71d42abe","line":1843,"range":{"start_line":1843,"start_character":16,"end_line":1843,"end_character":22},"updated":"2019-07-08 21:47:42.000000000","message":"request","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1840,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1841,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1842,"context_line":"        # are not on any host."},{"line_number":1843,"context_line":"        def has_reques_but_no_allocation(port):"},{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_0873434f","line":1843,"range":{"start_line":1843,"start_character":16,"end_line":1843,"end_character":22},"in_reply_to":"7faddb67_71d42abe","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1842,"context_line":"        # are not on any host."},{"line_number":1843,"context_line":"        def has_reques_but_no_allocation(port):"},{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1847,"context_line":"            return request and not allocation"},{"line_number":1848,"context_line":""}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_f1dfbad3","line":1845,"range":{"start_line":1845,"start_character":30,"end_line":1845,"end_character":67},"updated":"2019-07-08 21:47:42.000000000","message":"nit: just thinking out loud we could make this module-level helper function public and re-use it here:\n\nhttps://github.com/openstack/nova/blob/86524773b8cd3a52c98409c7ca183b4e1873e2b8/nova/network/neutronv2/api.py#L92\n\nBut that could also be done in a follow up change since this patch is already big.","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a48702d1a67100be6d3dde6e9aee22a55199dfda","unresolved":false,"context_lines":[{"line_number":1842,"context_line":"        # are not on any host."},{"line_number":1843,"context_line":"        def has_reques_but_no_allocation(port):"},{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1847,"context_line":"            return request and not allocation"},{"line_number":1848,"context_line":""}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_03a7a451","line":1845,"range":{"start_line":1845,"start_character":30,"end_line":1845,"end_character":67},"in_reply_to":"7faddb67_a88f8f21","updated":"2019-07-09 08:43:27.000000000","message":"refactor proposed in: https://review.opendev.org/#/c/669817","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1842,"context_line":"        # are not on any host."},{"line_number":1843,"context_line":"        def has_reques_but_no_allocation(port):"},{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1847,"context_line":"            return request and not allocation"},{"line_number":1848,"context_line":""}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_a88f8f21","line":1845,"range":{"start_line":1845,"start_character":30,"end_line":1845,"end_character":67},"in_reply_to":"7faddb67_f1dfbad3","updated":"2019-07-09 07:50:12.000000000","message":"I will propose a separate patch","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1847,"context_line":"            return request and not allocation"},{"line_number":1848,"context_line":""},{"line_number":1849,"context_line":"        ports_to_heal \u003d ["},{"line_number":1850,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_1104d649","line":1847,"range":{"start_line":1847,"start_character":19,"end_line":1847,"end_character":26},"updated":"2019-07-08 21:47:42.000000000","message":"Per the comments in _get_port_allocation should this filter function also check to make sure that the resource_request has resources and required traits?","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1844,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1845,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1846,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1847,"context_line":"            return request and not allocation"},{"line_number":1848,"context_line":""},{"line_number":1849,"context_line":"        ports_to_heal \u003d ["},{"line_number":1850,"context_line":"            port for port in self._get_ports(ctxt, instance, neutron)"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_28d07f1f","line":1847,"range":{"start_line":1847,"start_character":19,"end_line":1847,"end_character":26},"in_reply_to":"7faddb67_1104d649","updated":"2019-07-09 07:50:12.000000000","message":"Let\u0027s do this. This way I can remove all the defaulted \u0027required\u0027 and \u0027resources\u0027 gets in the later codepath.","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1866,"context_line":"            # We also need to record the RP we are allocated from in the"},{"line_number":1867,"context_line":"            # port. This will be sent back to Neutron before the allocation"},{"line_number":1868,"context_line":"            # is updated in placement"},{"line_number":1869,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1870,"context_line":"            binding_profile[\u0027allocation\u0027] \u003d rp_uuid"},{"line_number":1871,"context_line":"            port[\u0027binding:profile\u0027] \u003d binding_profile"},{"line_number":1872,"context_line":""}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_d1fdde3a","line":1869,"range":{"start_line":1869,"start_character":30,"end_line":1869,"end_character":67},"updated":"2019-07-08 21:47:42.000000000","message":"same nit above","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":1999,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2000,"context_line":"            partial update was rolled back successfully."},{"line_number":2001,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2002,"context_line":"        and the rollback of the partial updates are also failed."},{"line_number":2003,"context_line":"        \"\"\""},{"line_number":2004,"context_line":"        if instance.task_state is not None:"},{"line_number":2005,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_519fae02","line":2002,"updated":"2019-07-08 21:47:42.000000000","message":"indent","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":1999,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2000,"context_line":"            partial update was rolled back successfully."},{"line_number":2001,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2002,"context_line":"        and the rollback of the partial updates are also failed."},{"line_number":2003,"context_line":"        \"\"\""},{"line_number":2004,"context_line":"        if instance.task_state is not None:"},{"line_number":2005,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_c8920b4c","line":2002,"in_reply_to":"7faddb67_519fae02","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"e52349e5a82b76d257f9604fccbffe680cafae68","unresolved":false,"context_lines":[{"line_number":2136,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2137,"context_line":"            partial update was rolled back successfully."},{"line_number":2138,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2139,"context_line":"        and the rollback of the partial updates are also failed."},{"line_number":2140,"context_line":"        \"\"\""},{"line_number":2141,"context_line":"        # Keep a cache of instance.node to compute node resource provider UUID."},{"line_number":2142,"context_line":"        # This will save some queries for non-ironic instances to the"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_d192bef6","line":2139,"updated":"2019-07-08 21:47:42.000000000","message":"indent","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"7a23c7cee8921fdf9a7e3969325ed05561b68edf","unresolved":false,"context_lines":[{"line_number":2136,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2137,"context_line":"            partial update was rolled back successfully."},{"line_number":2138,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2139,"context_line":"        and the rollback of the partial updates are also failed."},{"line_number":2140,"context_line":"        \"\"\""},{"line_number":2141,"context_line":"        # Keep a cache of instance.node to compute node resource provider UUID."},{"line_number":2142,"context_line":"        # This will save some queries for non-ironic instances to the"}],"source_content_type":"text/x-python","patch_set":32,"id":"7faddb67_889c135e","line":2139,"in_reply_to":"7faddb67_d192bef6","updated":"2019-07-09 07:50:12.000000000","message":"Done","commit_id":"5c33937a1ac9508e936eb9c755e6c0992c48d3e6"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":1839,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1840,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1841,"context_line":"        # are not on any host."},{"line_number":1842,"context_line":"        def has_request_but_no_allocation(port):"},{"line_number":1843,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1844,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1845,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_7c7cf3ea","line":1842,"updated":"2019-07-09 16:23:43.000000000","message":"Do we have a test case where a port is filtered out by this function? I think all of the functional tests use ports with a resource_request:\n\nhttps://review.opendev.org/#/c/637955/34/nova/tests/functional/test_nova_manage.py@775\n\nThis doesn\u0027t necessarily need to be a functional test either - it would probably be faster as a simple set of unit tests that call _get_port_allocations_to_heal directly. I can think of a few cases:\n\n1. port with no resource_request\n2. port with no binding:profile\n3. port with binding:profile[\u0027allocation\u0027] already set (maybe this one should be a functional test - or an extra heal_allocations call on one of the existing tests, e.g. start with healing the port allocation, then run it again and assert the port is already healed)\n4. port with resource_request but no resources\n5. port with resource_request but no required traits","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":1839,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1840,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1841,"context_line":"        # are not on any host."},{"line_number":1842,"context_line":"        def has_request_but_no_allocation(port):"},{"line_number":1843,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1844,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1845,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_147a5287","line":1842,"in_reply_to":"7faddb67_7c7cf3ea","updated":"2019-07-11 09:38:49.000000000","message":"I made has_request_but_no_allocation object level and added a unit test for it with the input you listed.\nAlso I added a functional test to assert if allocation already set then no healing is (re)done. See: test_no_healing_is_needed functional test.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"fc28b6dabde6565f482f1cddfd00e2797b17205f","unresolved":false,"context_lines":[{"line_number":1843,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1844,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1845,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1846,"context_line":"            return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1847,"context_line":"                    request.get(\u0027required\u0027, []) and"},{"line_number":1848,"context_line":"                    not allocation)"},{"line_number":1849,"context_line":""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_5fd6ccc5","line":1846,"updated":"2019-07-09 14:58:00.000000000","message":"nit: could add a comment that we\u0027re being defensive about \u0027resources\u0027 and \u0027required\u0027 in the resource_request since the neutron API is not clear about those fields being optional.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":1843,"context_line":"            request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1844,"context_line":"            binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1845,"context_line":"            allocation \u003d binding_profile.get(\u0027allocation\u0027)"},{"line_number":1846,"context_line":"            return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1847,"context_line":"                    request.get(\u0027required\u0027, []) and"},{"line_number":1848,"context_line":"                    not allocation)"},{"line_number":1849,"context_line":""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_d5c7ea44","line":1846,"in_reply_to":"7faddb67_5fd6ccc5","updated":"2019-07-11 09:38:49.000000000","message":"Done","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":1922,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1923,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"},{"line_number":1924,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1925,"context_line":"            except neutron_client_exc.NeutronClientException as e:"},{"line_number":1926,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1927,"context_line":"                # back off."},{"line_number":1928,"context_line":"                manual_rollback_needed.append(port)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_9c4f6fbc","line":1925,"updated":"2019-07-09 16:23:43.000000000","message":"I\u0027m not sure but I don\u0027t think test_heal_multiple_port_allocations_rollback_fails covers the slight wrinkle (which came up before in review with Eric) that even if one rollback fails another might pass. I think at some point earlier you had the try/except around the for loop which meant that if one port update would fail we\u0027d stop, but now we continue. I\u0027m not sure you have that logic covered in a test, but it could be a simple unit test for this function alone rather than a functional test.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":1922,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1923,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"},{"line_number":1924,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1925,"context_line":"            except neutron_client_exc.NeutronClientException as e:"},{"line_number":1926,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1927,"context_line":"                # back off."},{"line_number":1928,"context_line":"                manual_rollback_needed.append(port)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_e6110cae","line":1925,"in_reply_to":"7faddb67_9c4f6fbc","updated":"2019-07-11 09:38:49.000000000","message":"Extended test_heal_multiple_port_allocations_rollback_fails with a 3rd port and logic to assert that rollback continues after failure.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":2333,"context_line":"                        exception.AllocationUpdateFailed,"},{"line_number":2334,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2335,"context_line":"                        exception.MoreThanOneResourceProviderToHealFrom,"},{"line_number":2336,"context_line":"                        exception.PlacementAPIConnectFailure,"},{"line_number":2337,"context_line":"                        exception.ResourceProviderRetrievalFailed,"},{"line_number":2338,"context_line":"                        exception.ResourceProviderTraitRetrievalFailed) as e:"},{"line_number":2339,"context_line":"                    print(e.format_message())"},{"line_number":2340,"context_line":"                    return 3"},{"line_number":2341,"context_line":"                except exception.UnableToQueryPorts as e:"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_dcf96780","line":2338,"range":{"start_line":2336,"start_character":24,"end_line":2338,"end_character":70},"updated":"2019-07-09 16:23:43.000000000","message":"I don\u0027t think we have a test case for these. I think you\u0027d just mock a failure in _get_rps_in_tree_with_required_traits. I\u0027m not necessarily saying we need all 3 explicitly covered unless you can make that simple within a single test case like using a loop over the mock or ddt or something.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":2333,"context_line":"                        exception.AllocationUpdateFailed,"},{"line_number":2334,"context_line":"                        exception.NoResourceProviderToHealFrom,"},{"line_number":2335,"context_line":"                        exception.MoreThanOneResourceProviderToHealFrom,"},{"line_number":2336,"context_line":"                        exception.PlacementAPIConnectFailure,"},{"line_number":2337,"context_line":"                        exception.ResourceProviderRetrievalFailed,"},{"line_number":2338,"context_line":"                        exception.ResourceProviderTraitRetrievalFailed) as e:"},{"line_number":2339,"context_line":"                    print(e.format_message())"},{"line_number":2340,"context_line":"                    return 3"},{"line_number":2341,"context_line":"                except exception.UnableToQueryPorts as e:"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_d487da0e","line":2338,"range":{"start_line":2336,"start_character":24,"end_line":2338,"end_character":70},"in_reply_to":"7faddb67_dcf96780","updated":"2019-07-11 09:38:49.000000000","message":"Added a functional test to cover it: test_heal_port_allocation_placement_unavailable()","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1691,"context_line":"            to communicate with the Placement service API."},{"line_number":1692,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1693,"context_line":"        :raise ResourceProviderRetrievalFailed: if the resource provider does"},{"line_number":1694,"context_line":"            not exists."},{"line_number":1695,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1696,"context_line":"            trait information cannot be read from placement."},{"line_number":1697,"context_line":"        :return: A list of RP UUIDs that supports every required traits and"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_5198d1be","line":1694,"range":{"start_line":1694,"start_character":16,"end_line":1694,"end_character":22},"updated":"2019-07-11 20:04:17.000000000","message":"exist","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1691,"context_line":"            to communicate with the Placement service API."},{"line_number":1692,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1693,"context_line":"        :raise ResourceProviderRetrievalFailed: if the resource provider does"},{"line_number":1694,"context_line":"            not exists."},{"line_number":1695,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1696,"context_line":"            trait information cannot be read from placement."},{"line_number":1697,"context_line":"        :return: A list of RP UUIDs that supports every required traits and"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_894ef96c","line":1694,"range":{"start_line":1694,"start_character":16,"end_line":1694,"end_character":22},"in_reply_to":"7faddb67_5198d1be","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1702,"context_line":"            rps_with_traits \u003d {"},{"line_number":1703,"context_line":"                rp[\u0027uuid\u0027]:"},{"line_number":1704,"context_line":"                    placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1705,"context_line":"                for rp in rps}"},{"line_number":1706,"context_line":"        except ks_exc.ClientException:"},{"line_number":1707,"context_line":"            raise exception.PlacementAPIConnectFailure()"},{"line_number":1708,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_b117c551","line":1705,"updated":"2019-07-11 20:04:17.000000000","message":"nit: you could do the trait filtering here and just build this up with the matching RPs as you go","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1702,"context_line":"            rps_with_traits \u003d {"},{"line_number":1703,"context_line":"                rp[\u0027uuid\u0027]:"},{"line_number":1704,"context_line":"                    placement.get_provider_traits(ctxt, rp[\u0027uuid\u0027]).traits"},{"line_number":1705,"context_line":"                for rp in rps}"},{"line_number":1706,"context_line":"        except ks_exc.ClientException:"},{"line_number":1707,"context_line":"            raise exception.PlacementAPIConnectFailure()"},{"line_number":1708,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_949cd0c0","line":1705,"in_reply_to":"7faddb67_b117c551","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1736,"context_line":"        return allocations"},{"line_number":1737,"context_line":""},{"line_number":1738,"context_line":"    def _get_port_allocation("},{"line_number":1739,"context_line":"            self, ctxt, node_uuid, port, instance_uuid, placement, output):"},{"line_number":1740,"context_line":"        \"\"\"Return the extra allocation the instance needs due to the given"},{"line_number":1741,"context_line":"        port."},{"line_number":1742,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_0c4b1e59","line":1739,"range":{"start_line":1739,"start_character":67,"end_line":1739,"end_character":73},"updated":"2019-07-11 20:04:17.000000000","message":"this is no longer used","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1736,"context_line":"        return allocations"},{"line_number":1737,"context_line":""},{"line_number":1738,"context_line":"    def _get_port_allocation("},{"line_number":1739,"context_line":"            self, ctxt, node_uuid, port, instance_uuid, placement, output):"},{"line_number":1740,"context_line":"        \"\"\"Return the extra allocation the instance needs due to the given"},{"line_number":1741,"context_line":"        port."},{"line_number":1742,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d4b56833","line":1739,"range":{"start_line":1739,"start_character":67,"end_line":1739,"end_character":73},"in_reply_to":"7faddb67_0c4b1e59","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1749,"context_line":"        :param output: function that takes a single message for verbose output"},{"line_number":1750,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1751,"context_line":"        :raise ResourceProviderRetrievalFailed: compute node resource provider"},{"line_number":1752,"context_line":"            does not exists."},{"line_number":1753,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1754,"context_line":"            trait information cannot be read from placement."},{"line_number":1755,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ccff46ff","line":1752,"range":{"start_line":1752,"start_character":21,"end_line":1752,"end_character":27},"updated":"2019-07-11 20:04:17.000000000","message":"exist","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1749,"context_line":"        :param output: function that takes a single message for verbose output"},{"line_number":1750,"context_line":"        :raise PlacementAPIConnectFailure: if placement API cannot be reached"},{"line_number":1751,"context_line":"        :raise ResourceProviderRetrievalFailed: compute node resource provider"},{"line_number":1752,"context_line":"            does not exists."},{"line_number":1753,"context_line":"        :raise ResourceProviderTraitRetrievalFailed: if resource provider"},{"line_number":1754,"context_line":"            trait information cannot be read from placement."},{"line_number":1755,"context_line":"        :raise MoreThanOneResourceProviderToHealFrom: if it cannot be decided"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_a953b516","line":1752,"range":{"start_line":1752,"start_character":21,"end_line":1752,"end_character":27},"in_reply_to":"7faddb67_ccff46ff","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027], placement)"},{"line_number":1765,"context_line":""},{"line_number":1766,"context_line":"        if len(matching_rp_uuids) \u003e 1:"},{"line_number":1767,"context_line":"            # If there are more than one such RPs then it is an ambiguous"},{"line_number":1768,"context_line":"            # situation that we cannot handle here efficiently because that"},{"line_number":1769,"context_line":"            # would require the reimplementation of most of the allocation"},{"line_number":1770,"context_line":"            # candidate query functionality of placement. Also if more"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_8c094e2d","line":1767,"range":{"start_line":1767,"start_character":46,"end_line":1767,"end_character":49},"updated":"2019-07-11 20:04:17.000000000","message":"RP","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027], placement)"},{"line_number":1765,"context_line":""},{"line_number":1766,"context_line":"        if len(matching_rp_uuids) \u003e 1:"},{"line_number":1767,"context_line":"            # If there are more than one such RPs then it is an ambiguous"},{"line_number":1768,"context_line":"            # situation that we cannot handle here efficiently because that"},{"line_number":1769,"context_line":"            # would require the reimplementation of most of the allocation"},{"line_number":1770,"context_line":"            # candidate query functionality of placement. Also if more"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ec024209","line":1767,"range":{"start_line":1767,"start_character":23,"end_line":1767,"end_character":26},"updated":"2019-07-11 20:04:17.000000000","message":"is","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027], placement)"},{"line_number":1765,"context_line":""},{"line_number":1766,"context_line":"        if len(matching_rp_uuids) \u003e 1:"},{"line_number":1767,"context_line":"            # If there are more than one such RPs then it is an ambiguous"},{"line_number":1768,"context_line":"            # situation that we cannot handle here efficiently because that"},{"line_number":1769,"context_line":"            # would require the reimplementation of most of the allocation"},{"line_number":1770,"context_line":"            # candidate query functionality of placement. Also if more"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_547bb879","line":1767,"range":{"start_line":1767,"start_character":46,"end_line":1767,"end_character":49},"in_reply_to":"7faddb67_8c094e2d","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1764,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027], placement)"},{"line_number":1765,"context_line":""},{"line_number":1766,"context_line":"        if len(matching_rp_uuids) \u003e 1:"},{"line_number":1767,"context_line":"            # If there are more than one such RPs then it is an ambiguous"},{"line_number":1768,"context_line":"            # situation that we cannot handle here efficiently because that"},{"line_number":1769,"context_line":"            # would require the reimplementation of most of the allocation"},{"line_number":1770,"context_line":"            # candidate query functionality of placement. Also if more"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_74803447","line":1767,"range":{"start_line":1767,"start_character":23,"end_line":1767,"end_character":26},"in_reply_to":"7faddb67_ec024209","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1771,"context_line":"            # than one such RP exists then selecting the right one might"},{"line_number":1772,"context_line":"            # need extra information from the compute node. For example"},{"line_number":1773,"context_line":"            # which PCI PF the VF is allocated from and which RP represents"},{"line_number":1774,"context_line":"            # that PCI PF in placement. When migration will be supported"},{"line_number":1775,"context_line":"            # with such servers then we can ask the admin to migrate these"},{"line_number":1776,"context_line":"            # servers instead to heal their allocation."},{"line_number":1777,"context_line":"            raise exception.MoreThanOneResourceProviderToHealFrom("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ac0c4a3b","line":1774,"range":{"start_line":1774,"start_character":55,"end_line":1774,"end_character":62},"updated":"2019-07-11 20:04:17.000000000","message":"is","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1771,"context_line":"            # than one such RP exists then selecting the right one might"},{"line_number":1772,"context_line":"            # need extra information from the compute node. For example"},{"line_number":1773,"context_line":"            # which PCI PF the VF is allocated from and which RP represents"},{"line_number":1774,"context_line":"            # that PCI PF in placement. When migration will be supported"},{"line_number":1775,"context_line":"            # with such servers then we can ask the admin to migrate these"},{"line_number":1776,"context_line":"            # servers instead to heal their allocation."},{"line_number":1777,"context_line":"            raise exception.MoreThanOneResourceProviderToHealFrom("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_54f098fd","line":1774,"range":{"start_line":1774,"start_character":55,"end_line":1774,"end_character":62},"in_reply_to":"7faddb67_ac0c4a3b","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1786,"context_line":"                traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1787,"context_line":"                node_uuid\u003dnode_uuid)"},{"line_number":1788,"context_line":""},{"line_number":1789,"context_line":"        # len(matching_rps) \u003d\u003d 1"},{"line_number":1790,"context_line":"        # We found one RP that matches the traits. Assume that we can allocate"},{"line_number":1791,"context_line":"        # the resources from it. If there is not enough inventory left on the"},{"line_number":1792,"context_line":"        # RP then the PUT /allocations placement call will detect that."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ac252aad","line":1789,"range":{"start_line":1789,"start_character":14,"end_line":1789,"end_character":26},"updated":"2019-07-11 20:04:17.000000000","message":"matching_rp_uuids","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1786,"context_line":"                traits\u003dport[\u0027resource_request\u0027][\u0027required\u0027],"},{"line_number":1787,"context_line":"                node_uuid\u003dnode_uuid)"},{"line_number":1788,"context_line":""},{"line_number":1789,"context_line":"        # len(matching_rps) \u003d\u003d 1"},{"line_number":1790,"context_line":"        # We found one RP that matches the traits. Assume that we can allocate"},{"line_number":1791,"context_line":"        # the resources from it. If there is not enough inventory left on the"},{"line_number":1792,"context_line":"        # RP then the PUT /allocations placement call will detect that."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_14dea06c","line":1789,"range":{"start_line":1789,"start_character":14,"end_line":1789,"end_character":26},"in_reply_to":"7faddb67_ac252aad","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1797,"context_line":"                \u0027resources\u0027: port[\u0027resource_request\u0027][\u0027resources\u0027]"},{"line_number":1798,"context_line":"            }"},{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_2c313aed","line":1800,"range":{"start_line":1800,"start_character":32,"end_line":1800,"end_character":39},"updated":"2019-07-11 20:04:17.000000000","message":"Why do we need this if the first return is a dict with a single key equal to rp_uuid?\n\nIt saves the caller doing\n\n rp_uuid \u003d list(port_allocation)[0]","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1797,"context_line":"                \u0027resources\u0027: port[\u0027resource_request\u0027][\u0027resources\u0027]"},{"line_number":1798,"context_line":"            }"},{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d46a487f","line":1800,"range":{"start_line":1800,"start_character":32,"end_line":1800,"end_character":39},"in_reply_to":"7faddb67_2c313aed","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1804,"context_line":"        binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1805,"context_line":"        allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_3b8df67a","line":1802,"updated":"2019-07-11 17:04:24.000000000","message":"nit: this could be a staticmethod","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1804,"context_line":"        binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1805,"context_line":"        allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_8c0f0ee0","line":1802,"range":{"start_line":1802,"start_character":8,"end_line":1802,"end_character":38},"updated":"2019-07-11 20:04:17.000000000","message":"readability-wise, I would prefer if this were defined right next to _get_ports so I can more easily grok L1853-5.\n\n(Or it could all just be collapsed in to a single _get_ports_to_heal - but I get that the splitup makes for easier unit testing.)","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1804,"context_line":"        binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1805,"context_line":"        allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_94bdb0d9","line":1802,"in_reply_to":"7faddb67_2c505ad4","updated":"2019-07-15 15:26:38.000000000","message":"Added @staticmethod decorators.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1804,"context_line":"        binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1805,"context_line":"        allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_2c505ad4","line":1802,"in_reply_to":"7faddb67_3b8df67a","updated":"2019-07-11 20:04:17.000000000","message":"As could all of the helpers above.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1799,"context_line":"        }"},{"line_number":1800,"context_line":"        return port_allocation, rp_uuid"},{"line_number":1801,"context_line":""},{"line_number":1802,"context_line":"    def _has_request_but_no_allocation(self, port):"},{"line_number":1803,"context_line":"        request \u003d port.get(\u0027resource_request\u0027)"},{"line_number":1804,"context_line":"        binding_profile \u003d port.get(\u0027binding:profile\u0027, {}) or {}"},{"line_number":1805,"context_line":"        allocation \u003d binding_profile.get(\u0027allocation\u0027)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_74b834e4","line":1802,"range":{"start_line":1802,"start_character":8,"end_line":1802,"end_character":38},"in_reply_to":"7faddb67_8c0f0ee0","updated":"2019-07-15 15:26:38.000000000","message":"Moved.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1806,"context_line":"        # We are defensive here about \u0027resources\u0027 and \u0027required\u0027 in the"},{"line_number":1807,"context_line":"        # \u0027resource_request\u0027 as neutron API is not clear about those fields"},{"line_number":1808,"context_line":"        # being optional."},{"line_number":1809,"context_line":"        return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1810,"context_line":"                request.get(\u0027required\u0027, []) and"},{"line_number":1811,"context_line":"                not allocation)"},{"line_number":1812,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_b144a51c","line":1809,"range":{"start_line":1809,"start_character":51,"end_line":1809,"end_character":55},"updated":"2019-07-11 20:04:17.000000000","message":"redundant?","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1806,"context_line":"        # We are defensive here about \u0027resources\u0027 and \u0027required\u0027 in the"},{"line_number":1807,"context_line":"        # \u0027resource_request\u0027 as neutron API is not clear about those fields"},{"line_number":1808,"context_line":"        # being optional."},{"line_number":1809,"context_line":"        return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1810,"context_line":"                request.get(\u0027required\u0027, []) and"},{"line_number":1811,"context_line":"                not allocation)"},{"line_number":1812,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_548178fa","line":1809,"range":{"start_line":1809,"start_character":51,"end_line":1809,"end_character":55},"in_reply_to":"7faddb67_b144a51c","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1807,"context_line":"        # \u0027resource_request\u0027 as neutron API is not clear about those fields"},{"line_number":1808,"context_line":"        # being optional."},{"line_number":1809,"context_line":"        return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1810,"context_line":"                request.get(\u0027required\u0027, []) and"},{"line_number":1811,"context_line":"                not allocation)"},{"line_number":1812,"context_line":""},{"line_number":1813,"context_line":"    def _get_port_allocations_to_heal("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_71462d24","line":1810,"range":{"start_line":1810,"start_character":38,"end_line":1810,"end_character":42},"updated":"2019-07-11 20:04:17.000000000","message":"redundant?\n\nOr is request some kind of mystical pseudo-dict with a nonstandard get()?\n\n[Later] Looks like it\u0027s just a dict (gleaned from the json of an HTTP response). So yeah, these are redundant.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1807,"context_line":"        # \u0027resource_request\u0027 as neutron API is not clear about those fields"},{"line_number":1808,"context_line":"        # being optional."},{"line_number":1809,"context_line":"        return (request and request.get(\u0027resources\u0027, {}) and"},{"line_number":1810,"context_line":"                request.get(\u0027required\u0027, []) and"},{"line_number":1811,"context_line":"                not allocation)"},{"line_number":1812,"context_line":""},{"line_number":1813,"context_line":"    def _get_port_allocations_to_heal("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_b490ec4b","line":1810,"range":{"start_line":1810,"start_character":38,"end_line":1810,"end_character":42},"in_reply_to":"7faddb67_71462d24","updated":"2019-07-15 15:26:38.000000000","message":"you are correct. Done.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1846,"context_line":"        # as this code needs to be able to handle ports that were attached"},{"line_number":1847,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1848,"context_line":"        # binding:profile."},{"line_number":1849,"context_line":"        # In theory a port can be assigned to an instance without it is being"},{"line_number":1850,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1851,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1852,"context_line":"        # are not on any host."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_acf68af8","line":1849,"range":{"start_line":1849,"start_character":69,"end_line":1849,"end_character":72},"updated":"2019-07-11 20:04:17.000000000","message":"strike","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1846,"context_line":"        # as this code needs to be able to handle ports that were attached"},{"line_number":1847,"context_line":"        # before nova in stein started updating the allocation key in the"},{"line_number":1848,"context_line":"        # binding:profile."},{"line_number":1849,"context_line":"        # In theory a port can be assigned to an instance without it is being"},{"line_number":1850,"context_line":"        # bound to any host (e.g. in case of shelve offload) but"},{"line_number":1851,"context_line":"        # _heal_allocations_for_instance() already filters out instances that"},{"line_number":1852,"context_line":"        # are not on any host."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_7486f415","line":1849,"range":{"start_line":1849,"start_character":69,"end_line":1849,"end_character":72},"in_reply_to":"7faddb67_acf68af8","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1861,"context_line":"        node_uuid \u003d self._get_compute_node_uuid("},{"line_number":1862,"context_line":"            ctxt, instance, node_cache)"},{"line_number":1863,"context_line":""},{"line_number":1864,"context_line":"        allocations \u003d collections.defaultdict(dict)"},{"line_number":1865,"context_line":"        for port in ports_to_heal:"},{"line_number":1866,"context_line":"            port_allocation, rp_uuid \u003d self._get_port_allocation("},{"line_number":1867,"context_line":"                ctxt, node_uuid, port, instance.uuid, placement, output)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_0cf1decd","line":1864,"range":{"start_line":1864,"start_character":8,"end_line":1864,"end_character":51},"updated":"2019-07-11 20:04:17.000000000","message":"pretty sure this can just be {} now","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1861,"context_line":"        node_uuid \u003d self._get_compute_node_uuid("},{"line_number":1862,"context_line":"            ctxt, instance, node_cache)"},{"line_number":1863,"context_line":""},{"line_number":1864,"context_line":"        allocations \u003d collections.defaultdict(dict)"},{"line_number":1865,"context_line":"        for port in ports_to_heal:"},{"line_number":1866,"context_line":"            port_allocation, rp_uuid \u003d self._get_port_allocation("},{"line_number":1867,"context_line":"                ctxt, node_uuid, port, instance.uuid, placement, output)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_f4a664ab","line":1864,"range":{"start_line":1864,"start_character":8,"end_line":1864,"end_character":51},"in_reply_to":"7faddb67_0cf1decd","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"3642f2f9928fb35ac371fb634078ff96cba4982a","unresolved":false,"context_lines":[{"line_number":1888,"context_line":"        try:"},{"line_number":1889,"context_line":"            for port in ports_to_update:"},{"line_number":1890,"context_line":"                body \u003d {"},{"line_number":1891,"context_line":"                    \u0027port\u0027: {"},{"line_number":1892,"context_line":"                        \u0027binding:profile\u0027: {"},{"line_number":1893,"context_line":"                            \u0027allocation\u0027: port[\u0027binding:profile\u0027][\u0027allocation\u0027]"},{"line_number":1894,"context_line":"                        }"},{"line_number":1895,"context_line":"                    }"},{"line_number":1896,"context_line":"                }"},{"line_number":1897,"context_line":"                output("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ec4fa28f","line":1894,"range":{"start_line":1891,"start_character":28,"end_line":1894,"end_character":25},"updated":"2019-07-11 20:19:07.000000000","message":"So this works if we just do\n\n \u0027port\u0027: port\n\ndoesn\u0027t it?","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1888,"context_line":"        try:"},{"line_number":1889,"context_line":"            for port in ports_to_update:"},{"line_number":1890,"context_line":"                body \u003d {"},{"line_number":1891,"context_line":"                    \u0027port\u0027: {"},{"line_number":1892,"context_line":"                        \u0027binding:profile\u0027: {"},{"line_number":1893,"context_line":"                            \u0027allocation\u0027: port[\u0027binding:profile\u0027][\u0027allocation\u0027]"},{"line_number":1894,"context_line":"                        }"},{"line_number":1895,"context_line":"                    }"},{"line_number":1896,"context_line":"                }"},{"line_number":1897,"context_line":"                output("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_a92e558f","line":1894,"range":{"start_line":1891,"start_character":28,"end_line":1894,"end_character":25},"in_reply_to":"7faddb67_ec4fa28f","updated":"2019-07-15 15:26:38.000000000","message":"I\u0027m afraid of putting the whole port back as it has port has resource_request key and that is read-only. But sure we need to put back the whole binding:profile.\n\nDone.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1893,"context_line":"                            \u0027allocation\u0027: port[\u0027binding:profile\u0027][\u0027allocation\u0027]"},{"line_number":1894,"context_line":"                        }"},{"line_number":1895,"context_line":"                    }"},{"line_number":1896,"context_line":"                }"},{"line_number":1897,"context_line":"                output("},{"line_number":1898,"context_line":"                    _(\u0027Updating port %(port_uuid)s with attributes \u0027"},{"line_number":1899,"context_line":"                      \u0027%(attributes)s\u0027) %"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_4c7ef619","line":1896,"updated":"2019-07-11 20:04:17.000000000","message":"I don\u0027t understand how this only updates the binding:profile[\u0027allocation\u0027] and leaves any other information in the binding:profile untouched...","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1893,"context_line":"                            \u0027allocation\u0027: port[\u0027binding:profile\u0027][\u0027allocation\u0027]"},{"line_number":1894,"context_line":"                        }"},{"line_number":1895,"context_line":"                    }"},{"line_number":1896,"context_line":"                }"},{"line_number":1897,"context_line":"                output("},{"line_number":1898,"context_line":"                    _(\u0027Updating port %(port_uuid)s with attributes \u0027"},{"line_number":1899,"context_line":"                      \u0027%(attributes)s\u0027) %"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ae1adbc8","line":1896,"in_reply_to":"7faddb67_4c6d9630","updated":"2019-07-15 15:26:38.000000000","message":"You are right. Here we need to post the whole binding:profile  which includes the new allocation key to preserve all the existing keys.\n\nDone.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7866f2856618164f6837865c41f11c346f94ccc4","unresolved":false,"context_lines":[{"line_number":1893,"context_line":"                            \u0027allocation\u0027: port[\u0027binding:profile\u0027][\u0027allocation\u0027]"},{"line_number":1894,"context_line":"                        }"},{"line_number":1895,"context_line":"                    }"},{"line_number":1896,"context_line":"                }"},{"line_number":1897,"context_line":"                output("},{"line_number":1898,"context_line":"                    _(\u0027Updating port %(port_uuid)s with attributes \u0027"},{"line_number":1899,"context_line":"                      \u0027%(attributes)s\u0027) %"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_4c6d9630","line":1896,"in_reply_to":"7faddb67_4c7ef619","updated":"2019-07-11 20:08:34.000000000","message":"Yeah this one looks suspicious.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1904,"context_line":"            output("},{"line_number":1905,"context_line":"                _(\u0027Updating port %(port_uuid)s failed: %(error)s\u0027) %"},{"line_number":1906,"context_line":"                {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1907,"context_line":"            # one of the port update failed. We need to roll back the updates"},{"line_number":1908,"context_line":"            # that succeeded before"},{"line_number":1909,"context_line":"            self._rollback_port_updates(neutron, succeeded, output)"},{"line_number":1910,"context_line":"            # we failed to heal so we need to stop but we successfully rolled"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_eca482bb","line":1907,"range":{"start_line":1907,"start_character":30,"end_line":1907,"end_character":36},"updated":"2019-07-11 20:04:17.000000000","message":"updates","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1904,"context_line":"            output("},{"line_number":1905,"context_line":"                _(\u0027Updating port %(port_uuid)s failed: %(error)s\u0027) %"},{"line_number":1906,"context_line":"                {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1907,"context_line":"            # one of the port update failed. We need to roll back the updates"},{"line_number":1908,"context_line":"            # that succeeded before"},{"line_number":1909,"context_line":"            self._rollback_port_updates(neutron, succeeded, output)"},{"line_number":1910,"context_line":"            # we failed to heal so we need to stop but we successfully rolled"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d4c24834","line":1907,"range":{"start_line":1907,"start_character":30,"end_line":1907,"end_character":36},"in_reply_to":"7faddb67_eca482bb","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1923,"context_line":"                \u0027port\u0027: {"},{"line_number":1924,"context_line":"                    \u0027binding:profile\u0027: profile"},{"line_number":1925,"context_line":"                }"},{"line_number":1926,"context_line":"            }"},{"line_number":1927,"context_line":"            try:"},{"line_number":1928,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1929,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ec92424b","line":1926,"updated":"2019-07-11 20:04:17.000000000","message":"...but this one removes the binding:profile[\u0027allocation\u0027]\n\nLooking at the PUT /ports/$port_uuid API [1] (which is where neutron.update_port lands afaict) it looks like a full replace. In which case this one is okay (or would be) but the one on L1901 is going to blow away the port\u0027s whole content, leaving *only* the binding:profile[\u0027allocation\u0027].\n\n[1] https://developer.openstack.org/api-ref/network/v2/index.html?expanded\u003dupdate-port-detail#update-port","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1923,"context_line":"                \u0027port\u0027: {"},{"line_number":1924,"context_line":"                    \u0027binding:profile\u0027: profile"},{"line_number":1925,"context_line":"                }"},{"line_number":1926,"context_line":"            }"},{"line_number":1927,"context_line":"            try:"},{"line_number":1928,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1929,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_6e162358","line":1926,"in_reply_to":"7faddb67_0c50fe5c","updated":"2019-07-15 15:26:38.000000000","message":"In this script we working on ports that are bound to an instance. I can imaging that we are trying to heal an instance while a port is being detached from such an instance in parallel. Then this code might push some keys back to the binding:profile of an already unbound port. As it does not make the unbound port bound again, I\u0027m not really worrying here.\n\nIn general the whole heal_instance_allocation can create an allocation for an instance in placement while that instance is deleted in parallel and therefore leak resource allocation. This is where your extra tasks state come into picture to prevent parallel action on an instance being healed. Still that would only prevent instance actions on the nova API in parallel with healing but not prevent a parallel call to neutron altering a port being healed.\n\nThe heal port allocation code is already pretty complex for a tool that supposed to be a small helper for admins to heal allocations in place [1] and we already see that it cannot heal everything. So for me this script is a best effort tool. And as such I feel that we are over-engeneering it.\n\n[1] https://specs.openstack.org/openstack/nova-specs/specs/stein/implemented/bandwidth-resource-provider.html#upgrade-impact","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1923,"context_line":"                \u0027port\u0027: {"},{"line_number":1924,"context_line":"                    \u0027binding:profile\u0027: profile"},{"line_number":1925,"context_line":"                }"},{"line_number":1926,"context_line":"            }"},{"line_number":1927,"context_line":"            try:"},{"line_number":1928,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1929,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_6e1063a9","line":1926,"in_reply_to":"7faddb67_ec92424b","updated":"2019-07-15 15:26:38.000000000","message":"The _update_port is broken. I think _rollback_port_updates only removes the allocation key as intended.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7866f2856618164f6837865c41f11c346f94ccc4","unresolved":false,"context_lines":[{"line_number":1923,"context_line":"                \u0027port\u0027: {"},{"line_number":1924,"context_line":"                    \u0027binding:profile\u0027: profile"},{"line_number":1925,"context_line":"                }"},{"line_number":1926,"context_line":"            }"},{"line_number":1927,"context_line":"            try:"},{"line_number":1928,"context_line":"                output(_(\u0027Rolling back port update for %(port_uuid)s\u0027) %"},{"line_number":1929,"context_line":"                       {\u0027port_uuid\u0027: port[\u0027id\u0027]})"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_0c50fe5c","line":1926,"in_reply_to":"7faddb67_ec92424b","updated":"2019-07-11 20:08:34.000000000","message":"When Eric initially brought this up with me in IRC I was thinking he was worried about us dropping some changes on the binding profile between the time we initially got the port and when we\u0027re doing the rollback, and I was going to point at the port revision stuff in the neutron API [1] which is like generations in placement or etags, so if we were paranoid about that we could leverage revisions here, but it\u0027s not what Eric is pointing out. Something to keep in mind though - but I\u0027m not saying we need to worry about conflicts/revisions in this patch (I\u0027m sure we have the same race type issue in our existing neutronv2/api flows).\n\n[1] https://developer.openstack.org/api-ref/network/v2/index.html?expanded\u003dupdate-port-detail#revisions","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":1930,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1931,"context_line":"            except neutron_client_exc.NeutronClientException as e:"},{"line_number":1932,"context_line":"                output("},{"line_number":1933,"context_line":"                    _(\u0027Rolling back port %(port_uuid)s failed: %(error)s\u0027) %"},{"line_number":1934,"context_line":"                    {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1935,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1936,"context_line":"                # back off."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_1be7da42","line":1933,"range":{"start_line":1933,"start_character":31,"end_line":1933,"end_character":40},"updated":"2019-07-11 17:04:24.000000000","message":"nit: \"Rolling back update for port\"","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0a133ffe47442c3da6f04052f9b81fddbecdacee","unresolved":false,"context_lines":[{"line_number":1930,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1931,"context_line":"            except neutron_client_exc.NeutronClientException as e:"},{"line_number":1932,"context_line":"                output("},{"line_number":1933,"context_line":"                    _(\u0027Rolling back port %(port_uuid)s failed: %(error)s\u0027) %"},{"line_number":1934,"context_line":"                    {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1935,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1936,"context_line":"                # back off."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_b63edbd7","line":1933,"range":{"start_line":1933,"start_character":31,"end_line":1933,"end_character":40},"in_reply_to":"7faddb67_1be7da42","updated":"2019-07-11 18:21:56.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1930,"context_line":"                neutron.update_port(port[\u0027id\u0027], body\u003dbody)"},{"line_number":1931,"context_line":"            except neutron_client_exc.NeutronClientException as e:"},{"line_number":1932,"context_line":"                output("},{"line_number":1933,"context_line":"                    _(\u0027Rolling back port %(port_uuid)s failed: %(error)s\u0027) %"},{"line_number":1934,"context_line":"                    {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1935,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1936,"context_line":"                # back off."}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_f4d424eb","line":1933,"range":{"start_line":1933,"start_character":31,"end_line":1933,"end_character":40},"in_reply_to":"7faddb67_1be7da42","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":1934,"context_line":"                    {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1935,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1936,"context_line":"                # back off."},{"line_number":1937,"context_line":"                manual_rollback_needed.append(port)"},{"line_number":1938,"context_line":"                last_exc \u003d e"},{"line_number":1939,"context_line":""},{"line_number":1940,"context_line":"        if manual_rollback_needed:"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_2cd61af3","line":1937,"range":{"start_line":1937,"start_character":16,"end_line":1937,"end_character":51},"updated":"2019-07-11 20:04:17.000000000","message":"nit, this could just be saving the port[\u0027id\u0027] and you wouldn\u0027t have to do the additional list comp on L1946.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":1934,"context_line":"                    {\u0027port_uuid\u0027: port[\u0027id\u0027], \u0027error\u0027: six.text_type(e)})"},{"line_number":1935,"context_line":"                # TODO(gibi): We could implement a retry mechanism with"},{"line_number":1936,"context_line":"                # back off."},{"line_number":1937,"context_line":"                manual_rollback_needed.append(port)"},{"line_number":1938,"context_line":"                last_exc \u003d e"},{"line_number":1939,"context_line":""},{"line_number":1940,"context_line":"        if manual_rollback_needed:"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d4f00894","line":1937,"range":{"start_line":1937,"start_character":16,"end_line":1937,"end_character":51},"in_reply_to":"7faddb67_2cd61af3","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":2009,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2010,"context_line":"            partial update was rolled back successfully."},{"line_number":2011,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2012,"context_line":"            and the rollback of the partial updates are also failed."},{"line_number":2013,"context_line":"        \"\"\""},{"line_number":2014,"context_line":"        if instance.task_state is not None:"},{"line_number":2015,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ac03ea72","line":2012,"range":{"start_line":2012,"start_character":52,"end_line":2012,"end_character":55},"updated":"2019-07-11 20:04:17.000000000","message":"strike","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":2009,"context_line":"        :raise UnableToUpdatePorts: if a port update failed in neutron but any"},{"line_number":2010,"context_line":"            partial update was rolled back successfully."},{"line_number":2011,"context_line":"        :raise UnableToRollbackPortUpdates: if a port update failed in neutron"},{"line_number":2012,"context_line":"            and the rollback of the partial updates are also failed."},{"line_number":2013,"context_line":"        \"\"\""},{"line_number":2014,"context_line":"        if instance.task_state is not None:"},{"line_number":2015,"context_line":"            output(_(\u0027Instance %(instance)s is undergoing a task \u0027"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_b431cc5c","line":2012,"range":{"start_line":2012,"start_character":52,"end_line":2012,"end_character":55},"in_reply_to":"7faddb67_ac03ea72","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":2059,"context_line":"            port_allocations, ports_to_update \u003d {}, []"},{"line_number":2060,"context_line":""},{"line_number":2061,"context_line":"        if port_allocations:"},{"line_number":2062,"context_line":"            need_healing \u003d \u0027Update\u0027 if not need_healing else need_healing"},{"line_number":2063,"context_line":"            # Merge in any missing port allocations"},{"line_number":2064,"context_line":"            allocations[\u0027allocations\u0027] \u003d self._merge_allocations("},{"line_number":2065,"context_line":"                allocations[\u0027allocations\u0027], port_allocations)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d105a1c3","line":2062,"range":{"start_line":2062,"start_character":12,"end_line":2062,"end_character":73},"updated":"2019-07-11 20:04:17.000000000","message":"could this be\n\n need_healing \u003d need_healing or \u0027Update\u0027\n\n?","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":2059,"context_line":"            port_allocations, ports_to_update \u003d {}, []"},{"line_number":2060,"context_line":""},{"line_number":2061,"context_line":"        if port_allocations:"},{"line_number":2062,"context_line":"            need_healing \u003d \u0027Update\u0027 if not need_healing else need_healing"},{"line_number":2063,"context_line":"            # Merge in any missing port allocations"},{"line_number":2064,"context_line":"            allocations[\u0027allocations\u0027] \u003d self._merge_allocations("},{"line_number":2065,"context_line":"                allocations[\u0027allocations\u0027], port_allocations)"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_741034a9","line":2062,"range":{"start_line":2062,"start_character":12,"end_line":2062,"end_character":73},"in_reply_to":"7faddb67_d105a1c3","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":2090,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":2091,"context_line":"                            \u0027instance\u0027: instance.uuid})"},{"line_number":2092,"context_line":"                    return True"},{"line_number":2093,"context_line":"                else:"},{"line_number":2094,"context_line":"                    # Rollback every neutron update. If we succeed to"},{"line_number":2095,"context_line":"                    # roll back then it is safe to stop here and let the admin"},{"line_number":2096,"context_line":"                    # retry. If the rollback fails then"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_311d15bd","line":2093,"updated":"2019-07-11 20:04:17.000000000","message":"This flow is kind of weird.\n_update_ports does its own rollback if it fails.\nBut then we do an explicit check and rollback in here if the allocation update fails.\nIt would feel more natural if it looked like:\n\n try:\n     _update_ports\n     put_allocations\n except:\n     _rollback_port_updates\n\nCouple problems that would make that unclean:\n- put_allocations doesn\u0027t raise an exception. It could, if we passed raise_exc\u003dTrue all the way through, but that\u0027s ick. Or (which would also help with the next thing) we could internally\n\n if not put_allocations():\n     raise ...\n\n- We raise a different exception up the stack depending on which thing failed. But we could handle that by making _update_ports and the internal thing above raise the right exception and then doing our _rollback_port_updates under a save_and_reraise_exception.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":2090,"context_line":"                           {\u0027operation\u0027: need_healing.lower(),"},{"line_number":2091,"context_line":"                            \u0027instance\u0027: instance.uuid})"},{"line_number":2092,"context_line":"                    return True"},{"line_number":2093,"context_line":"                else:"},{"line_number":2094,"context_line":"                    # Rollback every neutron update. If we succeed to"},{"line_number":2095,"context_line":"                    # roll back then it is safe to stop here and let the admin"},{"line_number":2096,"context_line":"                    # retry. If the rollback fails then"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_14dc60e2","line":2093,"in_reply_to":"7faddb67_311d15bd","updated":"2019-07-15 15:26:38.000000000","message":"If I have only one rollback call up here, then the exception raised from _update_ports() needs to carry the list of ports that needs to be rolled back. That feels wrong to me.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":2202,"context_line":"        _(\"Iterates over non-cell0 cells looking for instances which do \""},{"line_number":2203,"context_line":"          \"not have allocations in the Placement service, or have incomplete \""},{"line_number":2204,"context_line":"          \"consumer project_id/user_id values in existing allocations or \""},{"line_number":2205,"context_line":"          \"missing allocation for ports having resource request, and \""},{"line_number":2206,"context_line":"          \"which are not undergoing a task state transition. For each \""},{"line_number":2207,"context_line":"          \"instance found, allocations are created (or updated) against the \""},{"line_number":2208,"context_line":"          \"compute node resource provider for that instance based on the \""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_cc1426a3","line":2205,"range":{"start_line":2205,"start_character":19,"end_line":2205,"end_character":29},"updated":"2019-07-11 20:04:17.000000000","message":"allocations","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":2202,"context_line":"        _(\"Iterates over non-cell0 cells looking for instances which do \""},{"line_number":2203,"context_line":"          \"not have allocations in the Placement service, or have incomplete \""},{"line_number":2204,"context_line":"          \"consumer project_id/user_id values in existing allocations or \""},{"line_number":2205,"context_line":"          \"missing allocation for ports having resource request, and \""},{"line_number":2206,"context_line":"          \"which are not undergoing a task state transition. For each \""},{"line_number":2207,"context_line":"          \"instance found, allocations are created (or updated) against the \""},{"line_number":2208,"context_line":"          \"compute node resource provider for that instance based on the \""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_74ecb48e","line":2205,"range":{"start_line":2205,"start_character":19,"end_line":2205,"end_character":29},"in_reply_to":"7faddb67_cc1426a3","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e1136016025f4b9f67dc1602eb4bdb6f46aa8009","unresolved":false,"context_lines":[{"line_number":2228,"context_line":"          help\u003d\u0027Skip the healing of the resource allocations of bound ports. \u0027"},{"line_number":2229,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2230,"context_line":"               \u0027minimum QoS policy rules attached. If your deployment does \u0027"},{"line_number":2231,"context_line":"               \u0027not use such feature then the performance impact of querying \u0027"},{"line_number":2232,"context_line":"               \u0027neutron ports for each instance can be avoided with this \u0027"},{"line_number":2233,"context_line":"               \u0027flag.\u0027)"},{"line_number":2234,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_ec17a2a2","line":2231,"range":{"start_line":2231,"start_character":24,"end_line":2231,"end_character":28},"updated":"2019-07-11 20:04:17.000000000","message":"such a","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d80e1318bbf295dfea7de05024bc3ba62b5cbdd7","unresolved":false,"context_lines":[{"line_number":2228,"context_line":"          help\u003d\u0027Skip the healing of the resource allocations of bound ports. \u0027"},{"line_number":2229,"context_line":"               \u0027E.g. healing bandwidth resource allocation for ports having \u0027"},{"line_number":2230,"context_line":"               \u0027minimum QoS policy rules attached. If your deployment does \u0027"},{"line_number":2231,"context_line":"               \u0027not use such feature then the performance impact of querying \u0027"},{"line_number":2232,"context_line":"               \u0027neutron ports for each instance can be avoided with this \u0027"},{"line_number":2233,"context_line":"               \u0027flag.\u0027)"},{"line_number":2234,"context_line":"    def heal_allocations(self, max_count\u003dNone, verbose\u003dFalse, dry_run\u003dFalse,"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_949610f0","line":2231,"range":{"start_line":2231,"start_character":24,"end_line":2231,"end_character":28},"in_reply_to":"7faddb67_ec17a2a2","updated":"2019-07-15 15:26:38.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"32b43dfe020ca30d655813af15a208d6ca70d66e","unresolved":false,"context_lines":[{"line_number":1768,"context_line":"            unambiguously which resource provider to heal from."},{"line_number":1769,"context_line":"        :raise NoResourceProviderToHealFrom: if there is no resource provider"},{"line_number":1770,"context_line":"            found to heal from."},{"line_number":1771,"context_line":"        :return: A dict of resources keyed by RP uuid to be included in the"},{"line_number":1772,"context_line":"            instance allocation dict."},{"line_number":1773,"context_line":"        \"\"\""},{"line_number":1774,"context_line":"        matching_rp_uuids \u003d self._get_rps_in_tree_with_required_traits("},{"line_number":1775,"context_line":"            ctxt, node_uuid, port[\u0027resource_request\u0027][\u0027required\u0027], placement)"}],"source_content_type":"text/x-python","patch_set":36,"id":"7faddb67_540d8ee6","line":1772,"range":{"start_line":1771,"start_character":8,"end_line":1772,"end_character":37},"updated":"2019-07-22 18:45:51.000000000","message":"nit: it could be worth mentioning that this will be exactly length 1.","commit_id":"54dea2531c887f77e4b7a8e7edb978d8f1ccfe50"}],"nova/exception.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2454,"context_line":"    msg_fmt \u003d _(\"Healing port allocation failed.\")"},{"line_number":2455,"context_line":""},{"line_number":2456,"context_line":""},{"line_number":2457,"context_line":"class MoreThanOneResourceProvidersToHealFrom(HealPortAllocationException):"},{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8edd7bde","line":2457,"range":{"start_line":2457,"start_character":25,"end_line":2457,"end_character":34},"updated":"2019-06-27 18:50:06.000000000","message":"Provider","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2454,"context_line":"    msg_fmt \u003d _(\"Healing port allocation failed.\")"},{"line_number":2455,"context_line":""},{"line_number":2456,"context_line":""},{"line_number":2457,"context_line":"class MoreThanOneResourceProvidersToHealFrom(HealPortAllocationException):"},{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_38758c64","line":2457,"range":{"start_line":2457,"start_character":25,"end_line":2457,"end_character":34},"in_reply_to":"9fb8cfa7_8edd7bde","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2455,"context_line":""},{"line_number":2456,"context_line":""},{"line_number":2457,"context_line":"class MoreThanOneResourceProvidersToHealFrom(HealPortAllocationException):"},{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ae05df47","line":2458,"range":{"start_line":2458,"start_character":71,"end_line":2458,"end_character":74},"updated":"2019-06-27 18:50:06.000000000","message":"is","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2455,"context_line":""},{"line_number":2456,"context_line":""},{"line_number":2457,"context_line":"class MoreThanOneResourceProvidersToHealFrom(HealPortAllocationException):"},{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_f86a947e","line":2458,"range":{"start_line":2458,"start_character":71,"end_line":2458,"end_character":74},"in_reply_to":"9fb8cfa7_ae05df47","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""},{"line_number":2462,"context_line":"                \"resource provider to heal from.\")"},{"line_number":2463,"context_line":""},{"line_number":2464,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8819ee22","line":2461,"range":{"start_line":2461,"start_character":17,"end_line":2461,"end_character":24},"updated":"2019-06-27 21:36:12.000000000","message":"\"does not\" - avoid contractions in translatable messages or things a user could see.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2458,"context_line":"    msg_fmt \u003d _(\"More than one matching resource provider %(rp_uuids)s are \""},{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""},{"line_number":2462,"context_line":"                \"resource provider to heal from.\")"},{"line_number":2463,"context_line":""},{"line_number":2464,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_d809d0dc","line":2461,"range":{"start_line":2461,"start_character":17,"end_line":2461,"end_character":24},"in_reply_to":"9fb8cfa7_8819ee22","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""},{"line_number":2462,"context_line":"                \"resource provider to heal from.\")"},{"line_number":2463,"context_line":""},{"line_number":2464,"context_line":""},{"line_number":2465,"context_line":"class NoResourceProviderToHealFrom(HealPortAllocationException):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_48d5361b","line":2462,"range":{"start_line":2462,"start_character":35,"end_line":2462,"end_character":47},"updated":"2019-06-27 21:36:12.000000000","message":"grammar nit: from which to heal","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. This script \""},{"line_number":2461,"context_line":"                \"doesn\u0027t have enough information to select the proper \""},{"line_number":2462,"context_line":"                \"resource provider to heal from.\")"},{"line_number":2463,"context_line":""},{"line_number":2464,"context_line":""},{"line_number":2465,"context_line":"class NoResourceProviderToHealFrom(HealPortAllocationException):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_38fa6cbc","line":2462,"range":{"start_line":2462,"start_character":35,"end_line":2462,"end_character":47},"in_reply_to":"9fb8cfa7_48d5361b","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2467,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2468,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. There are no \""},{"line_number":2469,"context_line":"                \"resource providers with matching traits %(traits)s in the \""},{"line_number":2470,"context_line":"                \"provider tree of the resource provider %(rp_uuid)s .\")"},{"line_number":2471,"context_line":""},{"line_number":2472,"context_line":""},{"line_number":2473,"context_line":"class UnableToQueryPorts(HealPortAllocationException):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_2ec00fe1","line":2470,"range":{"start_line":2470,"start_character":58,"end_line":2470,"end_character":65},"updated":"2019-06-27 18:50:06.000000000","message":"I would call this node_uuid","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2467,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2468,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. There are no \""},{"line_number":2469,"context_line":"                \"resource providers with matching traits %(traits)s in the \""},{"line_number":2470,"context_line":"                \"provider tree of the resource provider %(rp_uuid)s .\")"},{"line_number":2471,"context_line":""},{"line_number":2472,"context_line":""},{"line_number":2473,"context_line":"class UnableToQueryPorts(HealPortAllocationException):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_18fbe8bd","line":2470,"range":{"start_line":2470,"start_character":58,"end_line":2470,"end_character":65},"in_reply_to":"9fb8cfa7_2ec00fe1","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2471,"context_line":""},{"line_number":2472,"context_line":""},{"line_number":2473,"context_line":"class UnableToQueryPorts(HealPortAllocationException):"},{"line_number":2474,"context_line":"    msg_fmt \u003d _(\"Unable to query ports for instance %(instance_uuid)s due to \""},{"line_number":2475,"context_line":"                \"%(error)s\")"},{"line_number":2476,"context_line":""},{"line_number":2477,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_4e68e3f3","line":2474,"range":{"start_line":2474,"start_character":69,"end_line":2474,"end_character":76},"updated":"2019-06-27 18:50:06.000000000","message":"s/ due to/:/\n\notherwise you\u0027re relying on sentence compounding working properly in other languages","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2471,"context_line":""},{"line_number":2472,"context_line":""},{"line_number":2473,"context_line":"class UnableToQueryPorts(HealPortAllocationException):"},{"line_number":2474,"context_line":"    msg_fmt \u003d _(\"Unable to query ports for instance %(instance_uuid)s due to \""},{"line_number":2475,"context_line":"                \"%(error)s\")"},{"line_number":2476,"context_line":""},{"line_number":2477,"context_line":""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_38234c58","line":2474,"range":{"start_line":2474,"start_character":69,"end_line":2474,"end_character":76},"in_reply_to":"9fb8cfa7_4e68e3f3","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":2477,"context_line":""},{"line_number":2478,"context_line":"class UnableToUpdatePorts(HealPortAllocationException):"},{"line_number":2479,"context_line":"    msg_fmt \u003d _(\"Unable to update ports with allocations that are created in \""},{"line_number":2480,"context_line":"                \"placement due to %(error)s Make sure that the \""},{"line_number":2481,"context_line":"                \"binding:profile.allocation key of the ports are manually \""},{"line_number":2482,"context_line":"                \"updated in neutron with the following CLI commands before \""},{"line_number":2483,"context_line":"                \"you run the healing script again. If you re-run the script \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_0e6e6bff","line":2480,"range":{"start_line":2480,"start_character":26,"end_line":2480,"end_character":43},"updated":"2019-06-27 18:50:06.000000000","message":"likewise","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":2477,"context_line":""},{"line_number":2478,"context_line":"class UnableToUpdatePorts(HealPortAllocationException):"},{"line_number":2479,"context_line":"    msg_fmt \u003d _(\"Unable to update ports with allocations that are created in \""},{"line_number":2480,"context_line":"                \"placement due to %(error)s Make sure that the \""},{"line_number":2481,"context_line":"                \"binding:profile.allocation key of the ports are manually \""},{"line_number":2482,"context_line":"                \"updated in neutron with the following CLI commands before \""},{"line_number":2483,"context_line":"                \"you run the healing script again. If you re-run the script \""}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_78da244b","line":2480,"range":{"start_line":2480,"start_character":26,"end_line":2480,"end_character":43},"in_reply_to":"9fb8cfa7_0e6e6bff","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2471,"context_line":"                \"This probably means that the neutron QoS configuration is \""},{"line_number":2472,"context_line":"                \"wrong. Consult with \""},{"line_number":2473,"context_line":"                \"https://docs.openstack.org/neutron/latest/admin/\""},{"line_number":2474,"context_line":"                \"config-qos-min-bw.html for information how to configure \""},{"line_number":2475,"context_line":"                \"neutron. If the configuration is fixed the script can be run \""},{"line_number":2476,"context_line":"                \"again.\")"},{"line_number":2477,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e6e2de01","line":2474,"range":{"start_line":2474,"start_character":56,"end_line":2474,"end_character":59},"updated":"2019-07-03 16:49:22.000000000","message":"nit: \"on how\"","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2471,"context_line":"                \"This probably means that the neutron QoS configuration is \""},{"line_number":2472,"context_line":"                \"wrong. Consult with \""},{"line_number":2473,"context_line":"                \"https://docs.openstack.org/neutron/latest/admin/\""},{"line_number":2474,"context_line":"                \"config-qos-min-bw.html for information how to configure \""},{"line_number":2475,"context_line":"                \"neutron. If the configuration is fixed the script can be run \""},{"line_number":2476,"context_line":"                \"again.\")"},{"line_number":2477,"context_line":""}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_94a0e6a7","line":2474,"range":{"start_line":2474,"start_character":56,"end_line":2474,"end_character":59},"in_reply_to":"7faddb67_e6e2de01","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2494,"context_line":"                \"also failed: %(error)s. Make sure that the \""},{"line_number":2495,"context_line":"                \"binding:profile.allocation key of the affected ports \""},{"line_number":2496,"context_line":"                \"%(port_uuids)s are manually cleaned in neutron according to \""},{"line_number":2497,"context_line":"                \"document https://docs.openstack.org/nova/rocky/cli/\""},{"line_number":2498,"context_line":"                \"nova-manage.html#placement. If you re-run the script without \""},{"line_number":2499,"context_line":"                \"the manual fix then the missing allocation for these ports \""},{"line_number":2500,"context_line":"                \"will not be healed in placement.\")"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_e6517e64","line":2497,"range":{"start_line":2497,"start_character":58,"end_line":2497,"end_character":63},"updated":"2019-07-03 16:49:22.000000000","message":"Do you mean \"latest\" here? Because if you\u0027re referring to:\n\nhttps://review.opendev.org/#/c/637955/30/doc/source/cli/nova-manage.rst@440\n\nThen that won\u0027t be in the rocky docs.","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2494,"context_line":"                \"also failed: %(error)s. Make sure that the \""},{"line_number":2495,"context_line":"                \"binding:profile.allocation key of the affected ports \""},{"line_number":2496,"context_line":"                \"%(port_uuids)s are manually cleaned in neutron according to \""},{"line_number":2497,"context_line":"                \"document https://docs.openstack.org/nova/rocky/cli/\""},{"line_number":2498,"context_line":"                \"nova-manage.html#placement. If you re-run the script without \""},{"line_number":2499,"context_line":"                \"the manual fix then the missing allocation for these ports \""},{"line_number":2500,"context_line":"                \"will not be healed in placement.\")"}],"source_content_type":"text/x-python","patch_set":30,"id":"7faddb67_74af6a94","line":2497,"range":{"start_line":2497,"start_character":58,"end_line":2497,"end_character":63},"in_reply_to":"7faddb67_e6517e64","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"85fe1d2a110ee70076ed3acf0eae2b902e84f374","unresolved":false,"context_lines":[{"line_number":2459,"context_line":"                \"available for healing the port allocation for port \""},{"line_number":2460,"context_line":"                \"%(port_id)s for instance %(instance_uuid)s. There are no \""},{"line_number":2461,"context_line":"                \"resource providers with matching traits %(traits)s in the \""},{"line_number":2462,"context_line":"                \"provider tree of the resource provider %(node_uuid)s .\""},{"line_number":2463,"context_line":"                \"This probably means that the neutron QoS configuration is \""},{"line_number":2464,"context_line":"                \"wrong. Consult with \""},{"line_number":2465,"context_line":"                \"https://docs.openstack.org/neutron/latest/admin/\""}],"source_content_type":"text/x-python","patch_set":36,"id":"7faddb67_4b2a2e2c","line":2462,"range":{"start_line":2462,"start_character":69,"end_line":2462,"end_character":71},"updated":"2019-07-15 22:37:45.000000000","message":"`. `","commit_id":"54dea2531c887f77e4b7a8e7edb978d8f1ccfe50"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"85fe1d2a110ee70076ed3acf0eae2b902e84f374","unresolved":false,"context_lines":[{"line_number":2485,"context_line":"                \"automatic rollback of the previously successful port updates \""},{"line_number":2486,"context_line":"                \"also failed: %(error)s. Make sure that the \""},{"line_number":2487,"context_line":"                \"binding:profile.allocation key of the affected ports \""},{"line_number":2488,"context_line":"                \"%(port_uuids)s are manually cleaned in neutron according to \""},{"line_number":2489,"context_line":"                \"document https://docs.openstack.org/nova/latest/cli/\""},{"line_number":2490,"context_line":"                \"nova-manage.html#placement. If you re-run the script without \""},{"line_number":2491,"context_line":"                \"the manual fix then the missing allocation for these ports \""}],"source_content_type":"text/x-python","patch_set":36,"id":"7faddb67_cb15bee6","line":2488,"range":{"start_line":2488,"start_character":32,"end_line":2488,"end_character":35},"updated":"2019-07-15 22:37:45.000000000","message":"is (though neither reads especially well)","commit_id":"54dea2531c887f77e4b7a8e7edb978d8f1ccfe50"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"85fe1d2a110ee70076ed3acf0eae2b902e84f374","unresolved":false,"context_lines":[{"line_number":2488,"context_line":"                \"%(port_uuids)s are manually cleaned in neutron according to \""},{"line_number":2489,"context_line":"                \"document https://docs.openstack.org/nova/latest/cli/\""},{"line_number":2490,"context_line":"                \"nova-manage.html#placement. If you re-run the script without \""},{"line_number":2491,"context_line":"                \"the manual fix then the missing allocation for these ports \""},{"line_number":2492,"context_line":"                \"will not be healed in placement.\")"}],"source_content_type":"text/x-python","patch_set":36,"id":"7faddb67_6bd4ca20","line":2491,"range":{"start_line":2491,"start_character":49,"end_line":2491,"end_character":59},"updated":"2019-07-15 22:37:45.000000000","message":"allocations","commit_id":"54dea2531c887f77e4b7a8e7edb978d8f1ccfe50"}],"nova/scheduler/client/report.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":491,"context_line":"        LOG.error(msg, args)"},{"line_number":492,"context_line":"        raise exception.ResourceProviderRetrievalFailed(message\u003dmsg % args)"},{"line_number":493,"context_line":""},{"line_number":494,"context_line":"    @safe_connect"},{"line_number":495,"context_line":"    def get_providers_in_tree(self, context, uuid):"},{"line_number":496,"context_line":"        \"\"\"Queries the placement API for a list of the resource providers in"},{"line_number":497,"context_line":"        the tree associated with the specified UUID."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_0e474b79","line":494,"range":{"start_line":494,"start_character":4,"end_line":494,"end_character":17},"updated":"2019-06-27 18:50:06.000000000","message":"Sure would prefer to get rid of this before we add more things that use it.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":491,"context_line":"        LOG.error(msg, args)"},{"line_number":492,"context_line":"        raise exception.ResourceProviderRetrievalFailed(message\u003dmsg % args)"},{"line_number":493,"context_line":""},{"line_number":494,"context_line":"    @safe_connect"},{"line_number":495,"context_line":"    def get_providers_in_tree(self, context, uuid):"},{"line_number":496,"context_line":"        \"\"\"Queries the placement API for a list of the resource providers in"},{"line_number":497,"context_line":"        the tree associated with the specified UUID."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_280a421f","line":494,"range":{"start_line":494,"start_character":4,"end_line":494,"end_character":17},"in_reply_to":"9fb8cfa7_0e474b79","updated":"2019-06-27 21:36:12.000000000","message":"If we do that I would suggest splitting that work out into a separate patch because this one is already really big. I think it would be as easy as adding a public method that wraps this one and if this private method returns None then raise ResourceProviderRetrievalFailed - or do like what Eric talked me into doing recently here:\n\nI504c374d3863a2a956d5c0156a43be2d2a2bc712\n\nI\u0027d probably split this change out of the large patch anyway just to make it smaller for the cognitive load on reviewers.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[{"line_number":491,"context_line":"        LOG.error(msg, args)"},{"line_number":492,"context_line":"        raise exception.ResourceProviderRetrievalFailed(message\u003dmsg % args)"},{"line_number":493,"context_line":""},{"line_number":494,"context_line":"    @safe_connect"},{"line_number":495,"context_line":"    def get_providers_in_tree(self, context, uuid):"},{"line_number":496,"context_line":"        \"\"\"Queries the placement API for a list of the resource providers in"},{"line_number":497,"context_line":"        the tree associated with the specified UUID."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_eb70a474","line":494,"range":{"start_line":494,"start_character":4,"end_line":494,"end_character":17},"in_reply_to":"9fb8cfa7_280a421f","updated":"2019-06-27 22:15:11.000000000","message":"Okay, I made this for you [1]. Feel free to fold it into the series below this patch. (It\u0027ll make the delta smaller, because it covers the rename as well.)\n\n[1] https://review.opendev.org/668062","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"46628177a1d784678de551128c4df04dbf0e91fa","unresolved":false,"context_lines":[{"line_number":491,"context_line":"        LOG.error(msg, args)"},{"line_number":492,"context_line":"        raise exception.ResourceProviderRetrievalFailed(message\u003dmsg % args)"},{"line_number":493,"context_line":""},{"line_number":494,"context_line":"    @safe_connect"},{"line_number":495,"context_line":"    def get_providers_in_tree(self, context, uuid):"},{"line_number":496,"context_line":"        \"\"\"Queries the placement API for a list of the resource providers in"},{"line_number":497,"context_line":"        the tree associated with the specified UUID."}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_414f516a","line":494,"range":{"start_line":494,"start_character":4,"end_line":494,"end_character":17},"in_reply_to":"9fb8cfa7_eb70a474","updated":"2019-06-28 01:26:29.000000000","message":"Thanks, +2.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"7cb3eb55addb3305235079fe7a9a3a44d2534ace","unresolved":false,"context_lines":[{"line_number":1802,"context_line":"            return False"},{"line_number":1803,"context_line":""},{"line_number":1804,"context_line":"        rps \u003d self.get_providers_in_tree(context, root_rp_uuid)"},{"line_number":1805,"context_line":"        rp_uuids \u003d [rp[\u0027uuid\u0027] for rp in rps]"},{"line_number":1806,"context_line":""},{"line_number":1807,"context_line":"        # go through the current allocations and remove every RP from it that"},{"line_number":1808,"context_line":"        # belongs to the RP tree identified by the root_rp_uuid parameter"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_a82e3289","line":1805,"range":{"start_line":1805,"start_character":41,"end_line":1805,"end_character":44},"updated":"2019-06-27 21:36:12.000000000","message":"Unrelated to this change, but now that I\u0027m thinking about it - this could be None if @safe_connect on get_providers_in_tree hits a client error.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[{"line_number":1802,"context_line":"            return False"},{"line_number":1803,"context_line":""},{"line_number":1804,"context_line":"        rps \u003d self.get_providers_in_tree(context, root_rp_uuid)"},{"line_number":1805,"context_line":"        rp_uuids \u003d [rp[\u0027uuid\u0027] for rp in rps]"},{"line_number":1806,"context_line":""},{"line_number":1807,"context_line":"        # go through the current allocations and remove every RP from it that"},{"line_number":1808,"context_line":"        # belongs to the RP tree identified by the root_rp_uuid parameter"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ebf504cc","line":1805,"range":{"start_line":1805,"start_character":41,"end_line":1805,"end_character":44},"in_reply_to":"9fb8cfa7_a82e3289","updated":"2019-06-27 22:15:11.000000000","message":"Yup, which will cause L1805 to raise a TypeError instead of L1804 raising ClientException. The latter makes more sense, and is how I \"handled\" it in the aforementioned patch.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"}],"nova/tests/functional/test_nova_manage.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":784,"context_line":"            networks\u003d[{\u0027port\u0027: port[\u0027id\u0027]}])"},{"line_number":785,"context_line":"        server \u003d self._wait_for_state_change(self.admin_api, server, \u0027ACTIVE\u0027)"},{"line_number":786,"context_line":""},{"line_number":787,"context_line":"        # Its a hack to simulate that we have a server that missing allocation"},{"line_number":788,"context_line":"        # for its port"},{"line_number":789,"context_line":"        self._add_resource_request_to_a_bound_port("},{"line_number":790,"context_line":"            self.neutron.port_1[\u0027id\u0027], resource_request)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_ae615f07","line":787,"range":{"start_line":787,"start_character":10,"end_line":787,"end_character":13},"updated":"2019-06-27 18:50:06.000000000","message":"This is","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":784,"context_line":"            networks\u003d[{\u0027port\u0027: port[\u0027id\u0027]}])"},{"line_number":785,"context_line":"        server \u003d self._wait_for_state_change(self.admin_api, server, \u0027ACTIVE\u0027)"},{"line_number":786,"context_line":""},{"line_number":787,"context_line":"        # Its a hack to simulate that we have a server that missing allocation"},{"line_number":788,"context_line":"        # for its port"},{"line_number":789,"context_line":"        self._add_resource_request_to_a_bound_port("},{"line_number":790,"context_line":"            self.neutron.port_1[\u0027id\u0027], resource_request)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_8e525b34","line":787,"range":{"start_line":787,"start_character":55,"end_line":787,"end_character":78},"updated":"2019-06-27 18:50:06.000000000","message":"that\u0027s missing allocations","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":784,"context_line":"            networks\u003d[{\u0027port\u0027: port[\u0027id\u0027]}])"},{"line_number":785,"context_line":"        server \u003d self._wait_for_state_change(self.admin_api, server, \u0027ACTIVE\u0027)"},{"line_number":786,"context_line":""},{"line_number":787,"context_line":"        # Its a hack to simulate that we have a server that missing allocation"},{"line_number":788,"context_line":"        # for its port"},{"line_number":789,"context_line":"        self._add_resource_request_to_a_bound_port("},{"line_number":790,"context_line":"            self.neutron.port_1[\u0027id\u0027], resource_request)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_d88eb02c","line":787,"range":{"start_line":787,"start_character":55,"end_line":787,"end_character":78},"in_reply_to":"9fb8cfa7_8e525b34","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":784,"context_line":"            networks\u003d[{\u0027port\u0027: port[\u0027id\u0027]}])"},{"line_number":785,"context_line":"        server \u003d self._wait_for_state_change(self.admin_api, server, \u0027ACTIVE\u0027)"},{"line_number":786,"context_line":""},{"line_number":787,"context_line":"        # Its a hack to simulate that we have a server that missing allocation"},{"line_number":788,"context_line":"        # for its port"},{"line_number":789,"context_line":"        self._add_resource_request_to_a_bound_port("},{"line_number":790,"context_line":"            self.neutron.port_1[\u0027id\u0027], resource_request)"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_f89b746f","line":787,"range":{"start_line":787,"start_character":10,"end_line":787,"end_character":13},"in_reply_to":"9fb8cfa7_ae615f07","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":921,"context_line":"        server, updated_port \u003d self._create_server_with_missing_port_alloc("},{"line_number":922,"context_line":"            self.neutron.port_1)"},{"line_number":923,"context_line":""},{"line_number":924,"context_line":"        # delete the server allocation in placement to simulate that is needs"},{"line_number":925,"context_line":"        # to be healed"},{"line_number":926,"context_line":""},{"line_number":927,"context_line":"        # NOTE(gibi): putting empty allocation will delete the consumer in"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_6e05a713","line":924,"range":{"start_line":924,"start_character":69,"end_line":924,"end_character":71},"updated":"2019-06-27 18:50:06.000000000","message":"it","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":921,"context_line":"        server, updated_port \u003d self._create_server_with_missing_port_alloc("},{"line_number":922,"context_line":"            self.neutron.port_1)"},{"line_number":923,"context_line":""},{"line_number":924,"context_line":"        # delete the server allocation in placement to simulate that is needs"},{"line_number":925,"context_line":"        # to be healed"},{"line_number":926,"context_line":""},{"line_number":927,"context_line":"        # NOTE(gibi): putting empty allocation will delete the consumer in"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_b8909c44","line":924,"range":{"start_line":924,"start_character":69,"end_line":924,"end_character":71},"in_reply_to":"9fb8cfa7_6e05a713","updated":"2019-07-01 14:49:20.000000000","message":"Done","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"7c0dd4352eeb5edbbbfcff7feb3689b91a72cc8e","unresolved":false,"context_lines":[{"line_number":1149,"context_line":"             \u0027rp_uuid\u0027: self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]},"},{"line_number":1150,"context_line":"            output)"},{"line_number":1151,"context_line":"        self.assertEqual(6, result)"},{"line_number":1152,"context_line":""},{"line_number":1153,"context_line":""},{"line_number":1154,"context_line":"class TestNovaManagePlacementSyncAggregates("},{"line_number":1155,"context_line":"        integrated_helpers.ProviderUsageBaseTestCase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_9943833f","line":1152,"updated":"2019-06-27 18:50:06.000000000","message":"How about a test case for:\n\n- Stuff to heal both port and non-port related\n- Invoke with skip_port_allocations\u003dTrue\n- assert placement was updated, but only for the non-port related stuff\n- assert_neutron_not_updated\n- assertEqual(0, result)","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"4f74de86d0df58f4f064c227871dd5a13fbe9635","unresolved":false,"context_lines":[{"line_number":1149,"context_line":"             \u0027rp_uuid\u0027: self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]},"},{"line_number":1150,"context_line":"            output)"},{"line_number":1151,"context_line":"        self.assertEqual(6, result)"},{"line_number":1152,"context_line":""},{"line_number":1153,"context_line":""},{"line_number":1154,"context_line":"class TestNovaManagePlacementSyncAggregates("},{"line_number":1155,"context_line":"        integrated_helpers.ProviderUsageBaseTestCase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_d7abfa84","line":1152,"in_reply_to":"9fb8cfa7_9943833f","updated":"2019-06-28 12:38:06.000000000","message":"Good point. This case is missing.","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"1d5ccc06fcd4035cdb1ca9223ceb16a48fc3a07f","unresolved":false,"context_lines":[{"line_number":1149,"context_line":"             \u0027rp_uuid\u0027: self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]},"},{"line_number":1150,"context_line":"            output)"},{"line_number":1151,"context_line":"        self.assertEqual(6, result)"},{"line_number":1152,"context_line":""},{"line_number":1153,"context_line":""},{"line_number":1154,"context_line":"class TestNovaManagePlacementSyncAggregates("},{"line_number":1155,"context_line":"        integrated_helpers.ProviderUsageBaseTestCase):"}],"source_content_type":"text/x-python","patch_set":28,"id":"9fb8cfa7_38402ca5","line":1152,"in_reply_to":"9fb8cfa7_d7abfa84","updated":"2019-07-01 14:49:20.000000000","message":"Done: test_skip_heal_port_allocation_but_heal_the_rest","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":814,"context_line":"        self.assertEqual(server[\u0027user_id\u0027], rsp[\u0027user_id\u0027])"},{"line_number":815,"context_line":""},{"line_number":816,"context_line":"        network_allocations \u003d allocations["},{"line_number":817,"context_line":"            self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]][\u0027resources\u0027]"},{"line_number":818,"context_line":""},{"line_number":819,"context_line":"        # this code assumes that every port is allocated from the same OVS"},{"line_number":820,"context_line":"        # bridge RP"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_7c2b9381","line":817,"range":{"start_line":817,"start_character":17,"end_line":817,"end_character":39},"updated":"2019-07-09 16:23:43.000000000","message":"OK and this works because of the setup done in PortResourceRequestBasedSchedulingTestBase.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":814,"context_line":"        self.assertEqual(server[\u0027user_id\u0027], rsp[\u0027user_id\u0027])"},{"line_number":815,"context_line":""},{"line_number":816,"context_line":"        network_allocations \u003d allocations["},{"line_number":817,"context_line":"            self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]][\u0027resources\u0027]"},{"line_number":818,"context_line":""},{"line_number":819,"context_line":"        # this code assumes that every port is allocated from the same OVS"},{"line_number":820,"context_line":"        # bridge RP"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_35bf5e62","line":817,"range":{"start_line":817,"start_character":17,"end_line":817,"end_character":39},"in_reply_to":"7faddb67_7c2b9381","updated":"2019-07-11 09:38:49.000000000","message":"yes","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":865,"context_line":"            [self.neutron.port_1])"},{"line_number":866,"context_line":""},{"line_number":867,"context_line":"        # let\u0027s trigger a heal"},{"line_number":868,"context_line":"        result \u003d self.cli.heal_allocations(verbose\u003dTrue, max_count\u003d2)"},{"line_number":869,"context_line":""},{"line_number":870,"context_line":"        self._assert_placement_updated(server, ports)"},{"line_number":871,"context_line":"        self._assert_ports_updated(ports)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_ba310e0e","line":868,"updated":"2019-07-09 16:23:43.000000000","message":"nit: this is where you could use something like this:\n\nhttps://review.opendev.org/#/c/655908/4/nova/tests/functional/test_nova_manage.py@365","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":865,"context_line":"            [self.neutron.port_1])"},{"line_number":866,"context_line":""},{"line_number":867,"context_line":"        # let\u0027s trigger a heal"},{"line_number":868,"context_line":"        result \u003d self.cli.heal_allocations(verbose\u003dTrue, max_count\u003d2)"},{"line_number":869,"context_line":""},{"line_number":870,"context_line":"        self._assert_placement_updated(server, ports)"},{"line_number":871,"context_line":"        self._assert_ports_updated(ports)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_759396fb","line":868,"in_reply_to":"7faddb67_ba310e0e","updated":"2019-07-11 09:38:49.000000000","message":"Sorry I don\u0027t see what would be the real benefit of the extra helper.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":890,"context_line":"        self._assert_ports_not_updated(ports)"},{"line_number":891,"context_line":""},{"line_number":892,"context_line":"        self.assertNotIn("},{"line_number":893,"context_line":"            \u0027Successfully updated port allocations\u0027,"},{"line_number":894,"context_line":"            self.output.getvalue())"},{"line_number":895,"context_line":"        self.assertEqual(4, result)"},{"line_number":896,"context_line":""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_bc5eeb0f","line":893,"range":{"start_line":893,"start_character":34,"end_line":893,"end_character":38},"updated":"2019-07-09 16:23:43.000000000","message":"Is this always true? I don\u0027t see this string in the code. Looks like instead you should be checking:\n\nself.assertIn(\u0027Nothing to be healed\u0027, self.output.getvalue())","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":890,"context_line":"        self._assert_ports_not_updated(ports)"},{"line_number":891,"context_line":""},{"line_number":892,"context_line":"        self.assertNotIn("},{"line_number":893,"context_line":"            \u0027Successfully updated port allocations\u0027,"},{"line_number":894,"context_line":"            self.output.getvalue())"},{"line_number":895,"context_line":"        self.assertEqual(4, result)"},{"line_number":896,"context_line":""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_d83721e3","line":893,"range":{"start_line":893,"start_character":34,"end_line":893,"end_character":38},"in_reply_to":"7faddb67_bc5eeb0f","updated":"2019-07-11 09:38:49.000000000","message":"Done","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":895,"context_line":"        self.assertEqual(4, result)"},{"line_number":896,"context_line":""},{"line_number":897,"context_line":"    def test_skip_heal_port_allocation_but_heal_the_rest(self):"},{"line_number":898,"context_line":"        \"\"\"Test that the instance doesn\u0027t have allocation at, needs"},{"line_number":899,"context_line":"        allocation for ports as well, but only heal the non port related"},{"line_number":900,"context_line":"        allocation."},{"line_number":901,"context_line":"        \"\"\""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_3c819b4b","line":898,"range":{"start_line":898,"start_character":58,"end_line":898,"end_character":60},"updated":"2019-07-09 16:23:43.000000000","message":"at all?","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":895,"context_line":"        self.assertEqual(4, result)"},{"line_number":896,"context_line":""},{"line_number":897,"context_line":"    def test_skip_heal_port_allocation_but_heal_the_rest(self):"},{"line_number":898,"context_line":"        \"\"\"Test that the instance doesn\u0027t have allocation at, needs"},{"line_number":899,"context_line":"        allocation for ports as well, but only heal the non port related"},{"line_number":900,"context_line":"        allocation."},{"line_number":901,"context_line":"        \"\"\""}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_b841053c","line":898,"range":{"start_line":898,"start_character":58,"end_line":898,"end_character":60},"in_reply_to":"7faddb67_3c819b4b","updated":"2019-07-11 09:38:49.000000000","message":"Done","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":936,"context_line":""},{"line_number":937,"context_line":"        # override allocation with  placement microversion \u003c1.8 to simulate"},{"line_number":938,"context_line":"        # missing project_id and user_id"},{"line_number":939,"context_line":"        alloc_body \u003d {"},{"line_number":940,"context_line":"            \"allocations\": ["},{"line_number":941,"context_line":"                {"},{"line_number":942,"context_line":"                    \"resource_provider\": {"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_3c5d1be9","line":939,"updated":"2019-07-09 16:23:43.000000000","message":"Alternatively you could have gotten the allocations dict from 1.0 and just PUT it back right?","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":936,"context_line":""},{"line_number":937,"context_line":"        # override allocation with  placement microversion \u003c1.8 to simulate"},{"line_number":938,"context_line":"        # missing project_id and user_id"},{"line_number":939,"context_line":"        alloc_body \u003d {"},{"line_number":940,"context_line":"            \"allocations\": ["},{"line_number":941,"context_line":"                {"},{"line_number":942,"context_line":"                    \"resource_provider\": {"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_587a5188","line":939,"in_reply_to":"7faddb67_3c5d1be9","updated":"2019-07-11 09:38:49.000000000","message":"Yeah, I could get the current allocation in the old format and put it back.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":992,"context_line":""},{"line_number":993,"context_line":"        output \u003d self.output.getvalue()"},{"line_number":994,"context_line":"        self.assertIn("},{"line_number":995,"context_line":"            \u0027Successfully updated allocations for instance\u0027, output)"},{"line_number":996,"context_line":"        self.assertEqual(0, result)"},{"line_number":997,"context_line":""},{"line_number":998,"context_line":"    def test_heal_port_allocation_not_enough_resources_for_port(self):"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_fc1c6314","line":995,"range":{"start_line":995,"start_character":26,"end_line":995,"end_character":33},"updated":"2019-07-09 16:23:43.000000000","message":"Why would this be updated if the instance didn\u0027t have any allocations (would expect created here).\n\nI guess because of this:\n\nhttps://review.opendev.org/#/c/637955/34/nova/cmd/manage.py@2053\n\nMaybe that logic should be:\n\nneed_healing \u003d \u0027Update\u0027 if not need_healing else need_healing","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":992,"context_line":""},{"line_number":993,"context_line":"        output \u003d self.output.getvalue()"},{"line_number":994,"context_line":"        self.assertIn("},{"line_number":995,"context_line":"            \u0027Successfully updated allocations for instance\u0027, output)"},{"line_number":996,"context_line":"        self.assertEqual(0, result)"},{"line_number":997,"context_line":""},{"line_number":998,"context_line":"    def test_heal_port_allocation_not_enough_resources_for_port(self):"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_78c62d4b","line":995,"range":{"start_line":995,"start_character":26,"end_line":995,"end_character":33},"in_reply_to":"7faddb67_fc1c6314","updated":"2019-07-11 09:38:49.000000000","message":"you are right. done.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":1137,"context_line":"        orig_update_port \u003d self.neutron.update_port"},{"line_number":1138,"context_line":"        update \u003d []"},{"line_number":1139,"context_line":""},{"line_number":1140,"context_line":"        def fake_update_port(*args, **kwargs):"},{"line_number":1141,"context_line":"            if len(update) \u003d\u003d 0 or len(update) \u003e 1:"},{"line_number":1142,"context_line":"                update.append(True)"},{"line_number":1143,"context_line":"                return orig_update_port(*args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_fc78e32b","line":1140,"updated":"2019-07-09 16:23:43.000000000","message":"nit: I\u0027m not sure you need this for the side_effect function, can\u0027t you just do:\n\nside_effect\u003d(None, neutron_client_exc.Forbidden, None)\n\n? That means the mock is called thrice, passes on the first call, fails on the second, and passes on the third (the rollback).\n\nOr do you need the function to pass through the call to self.neutron.update_port? I feel like this is where the wraps kwarg on a Mock comes in, but I\u0027m not experienced using wraps with mock. Anyway, you don\u0027t need to change this, I\u0027m just thinking out loud.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":1137,"context_line":"        orig_update_port \u003d self.neutron.update_port"},{"line_number":1138,"context_line":"        update \u003d []"},{"line_number":1139,"context_line":""},{"line_number":1140,"context_line":"        def fake_update_port(*args, **kwargs):"},{"line_number":1141,"context_line":"            if len(update) \u003d\u003d 0 or len(update) \u003e 1:"},{"line_number":1142,"context_line":"                update.append(True)"},{"line_number":1143,"context_line":"                return orig_update_port(*args, **kwargs)"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_1804d97a","line":1140,"in_reply_to":"7faddb67_fc78e32b","updated":"2019-07-11 09:38:49.000000000","message":"With the above change the test would not detect if the code  update the port but missed to roll it back because those update_port calls would be not recorded in the neutron fixture.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d8041109f74fc7dd21aa2db622358decdef0253c","unresolved":false,"context_lines":[{"line_number":1191,"context_line":""},{"line_number":1192,"context_line":"        self._assert_placement_not_updated(server)"},{"line_number":1193,"context_line":""},{"line_number":1194,"context_line":"        # the order of the ports is random due to usage of dicts so we"},{"line_number":1195,"context_line":"        # need the info from the fake_update_port that which port update"},{"line_number":1196,"context_line":"        # failed"},{"line_number":1197,"context_line":"        # the first port update was successful and the rollback of it"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_1ce7ffe6","line":1194,"range":{"start_line":1194,"start_character":59,"end_line":1194,"end_character":64},"updated":"2019-07-09 16:23:43.000000000","message":"If you wanted to make the test predictable you could use an OrderedDict in the code, but maybe it\u0027s unnecessary.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"60097a007ce9a32ba156ee2152d89d7ed040b39a","unresolved":false,"context_lines":[{"line_number":1191,"context_line":""},{"line_number":1192,"context_line":"        self._assert_placement_not_updated(server)"},{"line_number":1193,"context_line":""},{"line_number":1194,"context_line":"        # the order of the ports is random due to usage of dicts so we"},{"line_number":1195,"context_line":"        # need the info from the fake_update_port that which port update"},{"line_number":1196,"context_line":"        # failed"},{"line_number":1197,"context_line":"        # the first port update was successful and the rollback of it"}],"source_content_type":"text/x-python","patch_set":34,"id":"7faddb67_d8f5e184","line":1194,"range":{"start_line":1194,"start_character":59,"end_line":1194,"end_character":64},"in_reply_to":"7faddb67_1ce7ffe6","updated":"2019-07-11 09:38:49.000000000","message":"I guess the order of the list of ports returned from neutron also undefined so it is better not to rely on any ordering.","commit_id":"3e14b1b658b3b7eb1543d0a0f90bb4b8075d0df0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":757,"context_line":"        self.output \u003d StringIO()"},{"line_number":758,"context_line":"        self.useFixture(fixtures.MonkeyPatch(\u0027sys.stdout\u0027, self.output))"},{"line_number":759,"context_line":""},{"line_number":760,"context_line":"        # Make it easier to debug failed test cases"},{"line_number":761,"context_line":"        def print_stdout_at_cleanup():"},{"line_number":762,"context_line":"            import sys"},{"line_number":763,"context_line":"            sys.stderr.write(self.output.getvalue())"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_fb175ea2","line":760,"updated":"2019-07-11 17:04:24.000000000","message":"Hmm, this does make all tests dump the output even if they passed:\n\nhttp://logs.openstack.org/55/637955/35/check/nova-tox-functional/2e417c4/job-output.txt.gz#_2019-07-11_10_02_49_050308\n\nThat could be a problem for our subunit parser bug that likes to pop up from time to time. Or just the size of the console logs in general to index in logstash.\n\nI guess the problem is when a test case fails it\u0027ll only dump stderr which won\u0027t have the useful information? For the other heal_allocations tests I\u0027ve always dumped the output in the assertion methods, e.g. self.assertEqual(x, y, self.output.getvalue()) but it\u0027s kind of annoying.\n\nAnyway, maybe this isn\u0027t a big deal, I just noticed it. Unfortunately I\u0027m not sure if there is a great way to tell during tearDown if the test failed.\n\nMaybe we could hook into this?\n\nhttps://github.com/testing-cabal/testtools/blob/master/testtools/testcase.py#L385\n\nAnd then add an exception handler to dump stdout to stderr on a test case failing rather than always.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0a133ffe47442c3da6f04052f9b81fddbecdacee","unresolved":false,"context_lines":[{"line_number":757,"context_line":"        self.output \u003d StringIO()"},{"line_number":758,"context_line":"        self.useFixture(fixtures.MonkeyPatch(\u0027sys.stdout\u0027, self.output))"},{"line_number":759,"context_line":""},{"line_number":760,"context_line":"        # Make it easier to debug failed test cases"},{"line_number":761,"context_line":"        def print_stdout_at_cleanup():"},{"line_number":762,"context_line":"            import sys"},{"line_number":763,"context_line":"            sys.stderr.write(self.output.getvalue())"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_162fafa1","line":760,"in_reply_to":"7faddb67_3672eb12","updated":"2019-07-11 18:21:56.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"d05c0bad3c1f9598afe26d98b1f46bba6971efb5","unresolved":false,"context_lines":[{"line_number":757,"context_line":"        self.output \u003d StringIO()"},{"line_number":758,"context_line":"        self.useFixture(fixtures.MonkeyPatch(\u0027sys.stdout\u0027, self.output))"},{"line_number":759,"context_line":""},{"line_number":760,"context_line":"        # Make it easier to debug failed test cases"},{"line_number":761,"context_line":"        def print_stdout_at_cleanup():"},{"line_number":762,"context_line":"            import sys"},{"line_number":763,"context_line":"            sys.stderr.write(self.output.getvalue())"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_3672eb12","line":760,"in_reply_to":"7faddb67_fb175ea2","updated":"2019-07-11 18:04:43.000000000","message":"Yeah addOnException is what we want here, I did it locally and made a test fail to show it working:\n\nhttp://paste.openstack.org/show/754309/\n\nIn the success case where nothing fails there is nothing in the stderr output like in the console logs above.\n\nHaving said that, we can address it in a follow up.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":1223,"context_line":"                raise neutron_client_exc.Forbidden()"},{"line_number":1224,"context_line":""},{"line_number":1225,"context_line":"        with mock.patch.object("},{"line_number":1226,"context_line":"                self.neutron, \"update_port\", side_effect\u003dfake_update_port):"},{"line_number":1227,"context_line":"            # let\u0027s trigger a heal"},{"line_number":1228,"context_line":"            result \u003d self.cli.heal_allocations(verbose\u003dTrue, max_count\u003d2)"},{"line_number":1229,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_7bc7ee46","line":1226,"range":{"start_line":1226,"start_character":57,"end_line":1226,"end_character":73},"updated":"2019-07-11 17:04:24.000000000","message":"nit: might be good to assert this was called exactly 5 times per the counting within the fake method.","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0a133ffe47442c3da6f04052f9b81fddbecdacee","unresolved":false,"context_lines":[{"line_number":1223,"context_line":"                raise neutron_client_exc.Forbidden()"},{"line_number":1224,"context_line":""},{"line_number":1225,"context_line":"        with mock.patch.object("},{"line_number":1226,"context_line":"                self.neutron, \"update_port\", side_effect\u003dfake_update_port):"},{"line_number":1227,"context_line":"            # let\u0027s trigger a heal"},{"line_number":1228,"context_line":"            result \u003d self.cli.heal_allocations(verbose\u003dTrue, max_count\u003d2)"},{"line_number":1229,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_362aeb8f","line":1226,"range":{"start_line":1226,"start_character":57,"end_line":1226,"end_character":73},"in_reply_to":"7faddb67_7bc7ee46","updated":"2019-07-11 18:21:56.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"}],"nova/tests/functional/test_report_client.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":28,"id":"9fb8cfa7_6b3354ac","updated":"2019-06-27 22:15:11.000000000","message":"this...","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"}],"nova/tests/unit/scheduler/client/test_report.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"5ceae3c6aec41d9925337a9b6facb6eb38af55e5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":28,"id":"9fb8cfa7_2b29dc35","updated":"2019-06-27 22:15:11.000000000","message":"...and this will go away if you fold [1] in below this patch.\n\n[1] https://review.opendev.org/668062","commit_id":"34e9e6782385f86cbffccf17c36414896fb4daae"}],"nova/tests/unit/test_nova_manage.py":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2791,"context_line":"                      self.output.getvalue())"},{"line_number":2792,"context_line":"        self.assertIn(\"Conflict!\", self.output.getvalue())"},{"line_number":2793,"context_line":""},{"line_number":2794,"context_line":"    def test_has_request_but_no_allocation(self):"},{"line_number":2795,"context_line":"        self.assertFalse("},{"line_number":2796,"context_line":"            self.cli._has_request_but_no_allocation("},{"line_number":2797,"context_line":"                {"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_5b747268","line":2794,"updated":"2019-07-11 17:04:24.000000000","message":"nit: a code comment for each case would be nice","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0a133ffe47442c3da6f04052f9b81fddbecdacee","unresolved":false,"context_lines":[{"line_number":2791,"context_line":"                      self.output.getvalue())"},{"line_number":2792,"context_line":"        self.assertIn(\"Conflict!\", self.output.getvalue())"},{"line_number":2793,"context_line":""},{"line_number":2794,"context_line":"    def test_has_request_but_no_allocation(self):"},{"line_number":2795,"context_line":"        self.assertFalse("},{"line_number":2796,"context_line":"            self.cli._has_request_but_no_allocation("},{"line_number":2797,"context_line":"                {"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_d61a777b","line":2794,"in_reply_to":"7faddb67_5b747268","updated":"2019-07-11 18:21:56.000000000","message":"Done","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2804,"context_line":"                            \u0027CUSTOM_VNIC_TYPE_NORMAL\u0027"},{"line_number":2805,"context_line":"                        ]"},{"line_number":2806,"context_line":"                    },"},{"line_number":2807,"context_line":"                    \u0027binding:profile\u0027: {\u0027allocation\u0027: uuidsentinel.rp1}"},{"line_number":2808,"context_line":"                }))"},{"line_number":2809,"context_line":"        self.assertTrue("},{"line_number":2810,"context_line":"            self.cli._has_request_but_no_allocation("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_1b6efa94","line":2807,"updated":"2019-07-11 17:04:24.000000000","message":"already allocated ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2818,"context_line":"                            \u0027CUSTOM_VNIC_TYPE_NORMAL\u0027"},{"line_number":2819,"context_line":"                        ]"},{"line_number":2820,"context_line":"                    },"},{"line_number":2821,"context_line":"                    \u0027binding:profile\u0027: {}"},{"line_number":2822,"context_line":"                }))"},{"line_number":2823,"context_line":"        self.assertTrue("},{"line_number":2824,"context_line":"            self.cli._has_request_but_no_allocation("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_3b69768a","line":2821,"updated":"2019-07-11 17:04:24.000000000","message":"no allocation ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2832,"context_line":"                            \u0027CUSTOM_VNIC_TYPE_NORMAL\u0027"},{"line_number":2833,"context_line":"                        ]"},{"line_number":2834,"context_line":"                    },"},{"line_number":2835,"context_line":"                    \u0027binding:profile\u0027: None,"},{"line_number":2836,"context_line":"                }))"},{"line_number":2837,"context_line":"        self.assertFalse("},{"line_number":2838,"context_line":"            self.cli._has_request_but_no_allocation("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_fb08bed9","line":2835,"updated":"2019-07-11 17:04:24.000000000","message":"no allocation b/c no binding profile ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2839,"context_line":"                {"},{"line_number":2840,"context_line":"                    \u0027id\u0027: uuidsentinel.empty_resources,"},{"line_number":2841,"context_line":"                    \u0027resource_request\u0027: {"},{"line_number":2842,"context_line":"                        \u0027resources\u0027: {},"},{"line_number":2843,"context_line":"                        \u0027required\u0027: ["},{"line_number":2844,"context_line":"                            \u0027CUSTOM_VNIC_TYPE_NORMAL\u0027"},{"line_number":2845,"context_line":"                        ]"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_bb124608","line":2842,"updated":"2019-07-11 17:04:24.000000000","message":"no resources ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2850,"context_line":"            self.cli._has_request_but_no_allocation("},{"line_number":2851,"context_line":"                {"},{"line_number":2852,"context_line":"                    \u0027id\u0027: uuidsentinel.missing_resources,"},{"line_number":2853,"context_line":"                    \u0027resource_request\u0027: {"},{"line_number":2854,"context_line":"                        \u0027required\u0027: ["},{"line_number":2855,"context_line":"                            \u0027CUSTOM_VNIC_TYPE_NORMAL\u0027"},{"line_number":2856,"context_line":"                        ]"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_5bf9d2c5","line":2853,"updated":"2019-07-11 17:04:24.000000000","message":"no \u0027resources\u0027 key ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2865,"context_line":"                        \u0027resources\u0027: {"},{"line_number":2866,"context_line":"                            \u0027NET_BW_EGR_KILOBIT_PER_SEC\u0027: 1000,"},{"line_number":2867,"context_line":"                        },"},{"line_number":2868,"context_line":"                        \u0027required\u0027: []"},{"line_number":2869,"context_line":"                    },"},{"line_number":2870,"context_line":"                    \u0027binding:profile\u0027: {}"},{"line_number":2871,"context_line":"                }))"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_1b035aba","line":2868,"updated":"2019-07-11 17:04:24.000000000","message":"no required traits ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2876,"context_line":"                    \u0027resource_request\u0027: {"},{"line_number":2877,"context_line":"                        \u0027resources\u0027: {"},{"line_number":2878,"context_line":"                            \u0027NET_BW_EGR_KILOBIT_PER_SEC\u0027: 1000,"},{"line_number":2879,"context_line":"                        },"},{"line_number":2880,"context_line":"                    },"},{"line_number":2881,"context_line":"                    \u0027binding:profile\u0027: {}"},{"line_number":2882,"context_line":"                }))"}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_3bfe56ae","line":2879,"updated":"2019-07-11 17:04:24.000000000","message":"no \u0027required\u0027 key ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2884,"context_line":"            self.cli._has_request_but_no_allocation("},{"line_number":2885,"context_line":"                {"},{"line_number":2886,"context_line":"                    \u0027id\u0027: uuidsentinel.empty_resource_request,"},{"line_number":2887,"context_line":"                    \u0027resource_request\u0027: {},"},{"line_number":2888,"context_line":"                    \u0027binding:profile\u0027: {}"},{"line_number":2889,"context_line":"                }))"},{"line_number":2890,"context_line":"        self.assertFalse("}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_db2e2242","line":2887,"updated":"2019-07-11 17:04:24.000000000","message":"no \u0027resources\u0027 nor \u0027required\u0027 key ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"a94f1840bf73aff5fef15b4558a713ba130acf59","unresolved":false,"context_lines":[{"line_number":2891,"context_line":"            self.cli._has_request_but_no_allocation("},{"line_number":2892,"context_line":"                {"},{"line_number":2893,"context_line":"                    \u0027id\u0027: uuidsentinel.missing_resource_request,"},{"line_number":2894,"context_line":"                    \u0027binding:profile\u0027: {}"},{"line_number":2895,"context_line":"                }))"},{"line_number":2896,"context_line":""},{"line_number":2897,"context_line":""}],"source_content_type":"text/x-python","patch_set":35,"id":"7faddb67_fb319ea3","line":2894,"updated":"2019-07-11 17:04:24.000000000","message":"no \u0027resource_request\u0027 key ✓","commit_id":"b1c7ff9947db83a96dae74099a253b6f6e9d7fb0"}],"releasenotes/notes/nova-manage-heal-port-allocation-48cc1a34c92d42cd.yaml":[{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0d547c4bf80c6a5d07c669547e3ea240ce0b9338","unresolved":false,"context_lines":[{"line_number":2,"context_line":"other:"},{"line_number":3,"context_line":"  - |"},{"line_number":4,"context_line":"    The ``nova-manage placement heal_allocations`` `CLI`_ has been extended to"},{"line_number":5,"context_line":"    heal missing port allocations possible due to `bug 1819923`_ ."},{"line_number":6,"context_line":""},{"line_number":7,"context_line":""},{"line_number":8,"context_line":"    .. _bug 1819923: https://bugs.launchpad.net/nova/+bug/1819923"}],"source_content_type":"text/x-yaml","patch_set":30,"id":"9fb8cfa7_02190d21","line":5,"range":{"start_line":5,"start_character":34,"end_line":5,"end_character":42},"updated":"2019-07-03 16:49:22.000000000","message":"Wording here is weird. Maybe just \"which are possible due to\"?","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"a6183b9efd5f08ba3ab941ed6157af0c2e53a78c","unresolved":false,"context_lines":[{"line_number":2,"context_line":"other:"},{"line_number":3,"context_line":"  - |"},{"line_number":4,"context_line":"    The ``nova-manage placement heal_allocations`` `CLI`_ has been extended to"},{"line_number":5,"context_line":"    heal missing port allocations possible due to `bug 1819923`_ ."},{"line_number":6,"context_line":""},{"line_number":7,"context_line":""},{"line_number":8,"context_line":"    .. _bug 1819923: https://bugs.launchpad.net/nova/+bug/1819923"}],"source_content_type":"text/x-yaml","patch_set":30,"id":"7faddb67_f49a7a6c","line":5,"range":{"start_line":5,"start_character":34,"end_line":5,"end_character":42},"in_reply_to":"9fb8cfa7_02190d21","updated":"2019-07-04 13:24:26.000000000","message":"Done","commit_id":"eb345c9d498ee90ccda80c0940d8439dcf027208"}]}
