)]}'
{"placement/objects/research_context.py":[{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"58ecb12a44e9d2b3007a5fd504747987bc0b916f","unresolved":false,"context_lines":[{"line_number":861,"context_line":"    get_providers_with_shared_capacity(ctx, \"DISK_GB\", 100), we would want to"},{"line_number":862,"context_line":"    get back the ID for the NFS_SHARE resource provider."},{"line_number":863,"context_line":""},{"line_number":864,"context_line":"    :param rc_id: Internal ID of the requested resource class."},{"line_number":865,"context_line":"    :param amount: Amount of the requested resource."},{"line_number":866,"context_line":"    :param member_of: When present, contains a list of lists of aggregate"},{"line_number":867,"context_line":"                      uuids that are used to filter the returned list of"}],"source_content_type":"text/x-python","patch_set":1,"id":"bfb3d3c7_fcdc8347","line":864,"updated":"2019-05-20 20:48:18.000000000","message":"update these","commit_id":"e4bc2bf891202ee125fc4aa88bc34b8a8c166ff1"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"58ecb12a44e9d2b3007a5fd504747987bc0b916f","unresolved":false,"context_lines":[{"line_number":868,"context_line":"                      resource providers that *directly* belong to the"},{"line_number":869,"context_line":"                      aggregates referenced."},{"line_number":870,"context_line":"    \"\"\""},{"line_number":871,"context_line":"    # The SQL we need to generate here looks like this:"},{"line_number":872,"context_line":"    #"},{"line_number":873,"context_line":"    # SELECT rp.id"},{"line_number":874,"context_line":"    # FROM resource_providers AS rp"}],"source_content_type":"text/x-python","patch_set":1,"id":"bfb3d3c7_9c18e71d","line":871,"updated":"2019-05-20 20:48:18.000000000","message":"update this","commit_id":"e4bc2bf891202ee125fc4aa88bc34b8a8c166ff1"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"58ecb12a44e9d2b3007a5fd504747987bc0b916f","unresolved":false,"context_lines":[{"line_number":914,"context_line":"        ),"},{"line_number":915,"context_line":"    )"},{"line_number":916,"context_line":""},{"line_number":917,"context_line":"    sel \u003d sa.select([rp_tbl.c.id]).select_from(rpt_to_t_join)"},{"line_number":918,"context_line":"    if rp_ids:"},{"line_number":919,"context_line":"        sel \u003d sel.where(rp_tbl.c.id.in_(rp_ids))"},{"line_number":920,"context_line":"    sel \u003d sel.group_by(rp_tbl.c.id)"}],"source_content_type":"text/x-python","patch_set":1,"id":"bfb3d3c7_fc832303","line":917,"updated":"2019-05-20 20:48:18.000000000","message":"I like this from a code DRYing standpoint.\n\nBUT\n\nAssuming there are a lot more non-sharing providers than sharing providers, even with a given resource class available (I\u0027m thinking DISK_GB), this takes us from\n\n get $small_number_of_providers in SQL\n\nto\n\n get $large_number_of_providers in SQL\n pass those to more SQL to filter down to $small_number_of_providers\n\nI haven\u0027t done any profiling, but I\u0027m assuming the latter has a bigger cost at least in terms of memory consumed by the python layer.\n\n[Later] Oh, I see now. We were already getting $large_number_of_providers and caching them. So this cuts out the db logic that checks capacity and aggregate membership.\n\nNice ++","commit_id":"e4bc2bf891202ee125fc4aa88bc34b8a8c166ff1"}]}
