)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c172d5f2c20b55769f800999e4673bbbd350c4f1","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":9,"id":"0655e4b3_8d50b8d1","updated":"2022-08-29 21:10:37.000000000","message":"as noted on other patches im holding +2 until i review this proplry tomorrow\nbut over all it looks good.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"85b89fae1332ab9a3ce1ac1971653cdba40569fc","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"20e067b6_b70823ba","updated":"2022-12-09 17:15:21.000000000","message":"Sean\u0027s comment helped. Might be worth including some of them as context comments at some point. This is is good enough for now though","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"baccbdf92f7972fd280b82b23a88728bfa1cbd4f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"ec962250_92029ee8","updated":"2022-12-16 09:33:48.000000000","message":"at the lowest point the worker still had 200M available memory and the top users were mysqld 700M and qemu guests with 400M memory each. ","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"64dc73f58c6e9ada854a02960d9166821448cdd9","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"ae8a78e8_6fd6dfd1","updated":"2022-12-15 14:18:32.000000000","message":"recheck Details: b\u0027400 Bad Request\\n\\nThe Store URI was malformed.\\n\\n   \n\nI see this happened couple of times already:\n$ logsearch log --job-group nova-devstack --result FAILURE \u0027The Store URI was malformed.\u0027 --days 40\n[snip]\nBuilds with matching logs 7/589:\n+----------------------------------+---------------------+--------------------+----------+-----------------------------------+--------+----------------------+\n| uuid                             | finished            | project            | pipeline | review                            | branch | job                  |\n+----------------------------------+---------------------+--------------------+----------+-----------------------------------+--------+----------------------+\n| 67cb43320f4240c4829b50dbfb4ebb60 | 2022-12-15T12:55:27 | openstack/nova     | gate     | https://review.opendev.org/854119 | master | nova-ceph-multistore |\n| faba1d60b734486aa477df42e1303aaa | 2022-12-13T16:57:20 | openstack/nova     | gate     | https://review.opendev.org/854118 | master | nova-ceph-multistore |\n| 922b9ef8b32e41c998cb03400bb454e4 | 2022-12-13T11:02:34 | openstack/devstack | gate     | https://review.opendev.org/865544 | master | nova-ceph-multistore |\n| 99b8e1ddaa014eccba173420f5d90711 | 2022-12-12T13:39:08 | openstack/devstack | check    | https://review.opendev.org/867215 | master | nova-ceph-multistore |\n| 1a6de03afd39494cb7a88f39b07da236 | 2022-12-03T04:10:23 | openstack/nova     | gate     | https://review.opendev.org/852171 | master | nova-ceph-multistore |\n| ae4a8cc0d3394f5d840013c8ce657bfc | 2022-12-02T06:14:19 | openstack/nova     | gate     | https://review.opendev.org/852171 | master | nova-ceph-multistore |\n| d1e7510e804448c2b68f588d68093220 | 2022-11-29T14:41:39 | openstack/devstack | check    | https://review.opendev.org/865014 | master | nova-ceph-multistore |\n+----------------------------------+---------------------+--------------------+----------+-----------------------------------+--------+----------------------+\n","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"6e96d99d6b1b33b06f57e0e84053170c6b913e28","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"006ff24e_9ca11d6e","updated":"2022-12-15 18:03:43.000000000","message":"recheck I\u0027m feeling lucky to hit all the new gate failures. This is the newest from the functional job this time:\n\ntest_live_migrate_one_multi_created_instance\n\n022-12-15 14:34:43,599 INFO [nova.compute.claims] Claim successful on node host1\n2022-12-15 14:34:45,015 ERROR [nova.compute.manager] Instance failed to spawn\nTraceback (most recent call last):\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2368, in _instance_update\n    update_on_match(compare, \u0027uuid\u0027, updates)\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/sqlalchemy/orm.py\", line 52, in update_on_match\n    return update_match.update_on_match(\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/sqlalchemy/update_match.py\", line 194, in update_on_match\n    raise NoRowsMatched(\"Zero rows matched for %d attempts\" % attempts)\noslo_db.sqlalchemy.update_match.NoRowsMatched: Zero rows matched for 3 attempts\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/compute/manager.py\", line 2758, in _build_resources\n    yield resources\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/compute/manager.py\", line 2509, in _build_and_run_instance\n    instance.save(expected_task_state\u003d\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_versionedobjects/base.py\", line 226, in wrapper\n    return fn(self, *args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/objects/instance.py\", line 838, in save\n    old_ref, inst_ref \u003d db.instance_update_and_get_original(\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/utils.py\", line 35, in wrapper\n    return f(*args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/api.py\", line 144, in wrapper\n    with excutils.save_and_reraise_exception() as ectxt:\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_utils/excutils.py\", line 227, in __exit__\n    self.force_reraise()\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_utils/excutils.py\", line 200, in force_reraise\n    raise self.value\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/api.py\", line 142, in wrapper\n    return f(*args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 207, in wrapper\n    return f(context, *args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2285, in instance_update_and_get_original\n    return (copy.copy(instance_ref), _instance_update(\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2427, in _instance_update\n    raise exc(**exc_props)\nnova.exception.UnexpectedTaskStateError: Conflict updating instance 967e756a-408f-4165-b12c-20b395fb7b75. Expected: {\u0027task_state\u0027: [\u0027block_device_mapping\u0027]}. Actual: {\u0027task_state\u0027: None}\n2022-12-15 14:34:45,018 INFO [nova.compute.manager] Terminating instance\n2022-12-15 14:34:45,031 INFO [placement.requestlog] 127.0.0.1 \"GET /placement/resource_providers/af02d559-9a75-4c35-8583-9c92fc6b73a6/allocations\" status: 200 len: 152 microversion: 1.0\n2022-12-15 14:34:45,045 WARNING [nova.virt.fake] Key \u0027967e756a-408f-4165-b12c-20b395fb7b75\u0027 not in instances \u0027{}\u0027\n2022-12-15 14:34:45,045 INFO [nova.compute.manager] Took 0.00 seconds to destroy the instance on the hypervisor.\n2022-12-15 14:34:45,080 INFO [placement.requestlog] 127.0.0.1 \"GET /placement/allocations/82c068b3-a95b-4e07-857e-0d3fa8a4d0af\" status: 200 len: 230 microversion: 1.28\n2022-12-15 14:34:45,096 INFO [nova.api.openstack.requestlog] 127.0.0.1 \"GET /v2.1/servers/82c068b3-a95b-4e07-857e-0d3fa8a4d0af\" status: 200 len: 1847 microversion: 2.93 time: 0.413454\n2022-12-15 14:34:45,178 INFO [nova.compute.manager] Took 0.00 seconds to spawn the instance on the hypervisor.\n2022-12-15 14:34:45,187 INFO [placement.requestlog] 127.0.0.1 \"GET /placement/resource_providers/a01bed4a-c2a7-4bfc-8e12-041755f173d9/allocations\" status: 200 len: 152 microversion: 1.0\n2022-12-15 14:34:45,217 INFO [nova.compute.manager] Took 1.67 seconds to build instance.\n2022-12-15 14:34:45,223 INFO [placement.requestlog] 127.0.0.1 \"GET /placement/allocations/967e756a-408f-4165-b12c-20b395fb7b75\" status: 200 len: 230 microversion: 1.28\n2022-12-15 14:34:45,233 ERROR [nova.compute.manager] Failed to build and run instance\nTraceback (most recent call last):\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2368, in _instance_update\n    update_on_match(compare, \u0027uuid\u0027, updates)\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/sqlalchemy/orm.py\", line 52, in update_on_match\n    return update_match.update_on_match(\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/sqlalchemy/update_match.py\", line 194, in update_on_match\n    raise NoRowsMatched(\"Zero rows matched for %d attempts\" % attempts)\noslo_db.sqlalchemy.update_match.NoRowsMatched: Zero rows matched for 3 attempts\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/compute/manager.py\", line 2509, in _build_and_run_instance\n    instance.save(expected_task_state\u003d\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_versionedobjects/base.py\", line 226, in wrapper\n    return fn(self, *args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/objects/instance.py\", line 838, in save\n    old_ref, inst_ref \u003d db.instance_update_and_get_original(\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/utils.py\", line 35, in wrapper\n    return f(*args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/api.py\", line 144, in wrapper\n    with excutils.save_and_reraise_exception() as ectxt:\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_utils/excutils.py\", line 227, in __exit__\n    self.force_reraise()\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_utils/excutils.py\", line 200, in force_reraise\n    raise self.value\n  File \"/home/zuul/src/opendev.org/openstack/nova/.tox/functional-py310/lib/python3.10/site-packages/oslo_db/api.py\", line 142, in wrapper\n    return f(*args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 207, in wrapper\n    return f(context, *args, **kwargs)\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2285, in instance_update_and_get_original\n    return (copy.copy(instance_ref), _instance_update(\n  File \"/home/zuul/src/opendev.org/openstack/nova/nova/db/main/api.py\", line 2427, in _instance_update\n    raise exc(**exc_props)\nnova.exception.UnexpectedTaskStateError: Conflict updating instance 967e756a-408f-4165-b12c-20b395fb7b75. Expected: {\u0027task_state\u0027: [\u0027block_device_mapping\u0027]}. Actual: {\u0027task_state\u0027: None}\n\nThis is the only hit for it in the last 30 days.\n","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"c518b444822a5d55c39710e6c2934d6e727fc5c0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"2f0ec55d_ea47731b","updated":"2022-10-27 08:48:05.000000000","message":"recheck bug 1912310 (it is a very close match to this closed bug)","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"cbc89e1ce1cd3a0f8fd1365849e0a4cffd94bb47","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"d94b6ef4_48665065","updated":"2022-12-16 14:07:23.000000000","message":"recheck bug 1999893 \n\nAnother tempest connection timeout in nova-grenade-multinode. So now I filed a bug https://bugs.launchpad.net/tempest/+bug/1999893","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"adc47cc2a933cde01244b18923203dd716d4f907","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"5b6e274d_b393ab79","updated":"2022-12-16 09:28:50.000000000","message":"recheck nova-live-migration\nhttps://zuul.opendev.org/t/openstack/build/e9cf7c858d5247859b0b6b8baa3728dc/log/job-output.txt#17731 \n\n\n2022-12-15 18:32:44.797540 | compute1 |   \"msg\": \"Warning: Permanently added \u0027149.202.168.179\u0027 (ECDSA) to the list of known hosts.\\r\\nrsync: [sender] link_stat \\\"/var/lib/zuul/builds/e9cf7c858d5247859b0b6b8baa3728dc/work/ca-bundle.pem\\\" failed: No such file or directory (2)\\nrsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1333) [sender\u003d3.2.3]\\n\",\n\nrecheck nova-next\nIt seems tempest got network timeout while calling create_volume:\n\nTraceback (most recent call last):\n  File \"/opt/stack/tempest/tempest/api/compute/volumes/test_attach_volume_negative.py\", line 49, in test_attach_attached_volume_to_same_server\n    volume \u003d self.create_volume()\n  File \"/opt/stack/tempest/tempest/api/compute/base.py\", line 544, in create_volume\n    volume \u003d cls.volumes_client.create_volume(**kwargs)[\u0027volume\u0027]\n  File \"/opt/stack/tempest/tempest/lib/services/volume/v3/volumes_client.py\", line 116, in create_volume\n    resp, body \u003d self.post(\u0027volumes\u0027, post_body)\n  File \"/opt/stack/tempest/tempest/lib/common/rest_client.py\", line 299, in post\n    return self.request(\u0027POST\u0027, url, extra_headers, headers, body, chunked)\n  File \"/opt/stack/tempest/tempest/lib/services/volume/base_client.py\", line 39, in request\n    resp, resp_body \u003d super(BaseClient, self).request(\n  File \"/opt/stack/tempest/tempest/lib/common/rest_client.py\", line 704, in request\n    resp, resp_body \u003d self._request(method, url, headers\u003dheaders,\n  File \"/opt/stack/tempest/tempest/lib/common/rest_client.py\", line 583, in _request\n    resp, resp_body \u003d self.raw_request(\n  File \"/opt/stack/tempest/tempest/lib/common/rest_client.py\", line 623, in raw_request\n    resp, resp_body \u003d self.http_obj.request(\n  File \"/opt/stack/tempest/tempest/lib/common/http.py\", line 110, in request\n    r \u003d super(ClosingHttp, self).request(method, url, retries\u003dretry,\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/request.py\", line 78, in request\n    return self.request_encode_body(\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/request.py\", line 170, in request_encode_body\n    return self.urlopen(method, url, **extra_kw)\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/poolmanager.py\", line 376, in urlopen\n    response \u003d conn.urlopen(method, u.request_uri, **kw)\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n    retries \u003d retries.increment(\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/util/retry.py\", line 550, in increment\n    raise six.reraise(type(error), error, _stacktrace)\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/packages/six.py\", line 770, in reraise\n    raise value\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 703, in urlopen\n    httplib_response \u003d self._make_request(\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 451, in _make_request\n    self._raise_timeout(err\u003de, url\u003durl, timeout_value\u003dread_timeout)\n  File \"/opt/stack/tempest/.tox/tempest/lib/python3.10/site-packages/urllib3/connectionpool.py\", line 340, in _raise_timeout\n    raise ReadTimeoutError(\nurllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host\u003d\u0027149.202.175.1\u0027, port\u003d443): Read timed out. (read timeout\u003d60)\n\nI saw similar before with create_server as well. Something is going on in the worker. Either the networking is shaky or we have some resource contention. In the same run, different testcase we have:\nDetails: Fault: {\u0027code\u0027: 500, \u0027created\u0027: \u00272022-12-15T19:14:19Z\u0027, \u0027message\u0027: \u0027MessagingTimeout\u0027}.\n\n\n\n","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":7634,"name":"Takashi Natsume","email":"takanattie@gmail.com","username":"natsumet"},"change_message_id":"fe720dc4dcb23a5039996df7f8373df8f3791071","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"979073fd_c8c7391c","updated":"2022-12-17 13:48:27.000000000","message":"recheck nova-live-migration post_failure","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d02fa1d7476da0322f1f838820da4f198bab0a1c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"8c0b38c3_73380cd0","updated":"2022-12-14 17:32:24.000000000","message":"recheck parent failed an rechecked","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"bf6bbbf3099ba2a6832b89fb750e9357b748b572","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"57250366_e216aeea","updated":"2022-12-15 09:42:08.000000000","message":"recheck tempest got network timeout during server create. I don\u0027t see anything on the nova-api side so I guess it was a networking issue in the test env.","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"078865dd813728fe3121b2e3eb8075711883780d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"23ad762b_51037d87","in_reply_to":"20e067b6_b70823ba","updated":"2022-12-13 13:49:44.000000000","message":"Added some context comment in the FUP","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"}],"nova/scheduler/manager.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c172d5f2c20b55769f800999e4673bbbd350c4f1","unresolved":true,"context_lines":[{"line_number":357,"context_line":"            # generate candidates as that would require space on the current"},{"line_number":358,"context_line":"            # compute for double allocation. So no allocation candidates for"},{"line_number":359,"context_line":"            # rebuild and therefore alloc_reqs_by_rp_uuid is None"},{"line_number":360,"context_line":"            return self._legacy_find_hosts("},{"line_number":361,"context_line":"                context, num_instances, spec_obj, hosts, num_alts,"},{"line_number":362,"context_line":"                instance_uuids\u003dinstance_uuids)"},{"line_number":363,"context_line":""}],"source_content_type":"text/x-python","patch_set":9,"id":"c366e7a7_b6dec92f","line":360,"updated":"2022-08-29 21:10:37.000000000","message":"ok that makes sense\nthanks for recording it.\n\ni dont know if we could eventully remove this by getting the instnaces current allcoation and mapping those in some way.\n\nbut for now this is enough.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"93a56a76be029c69bfdac25ffd409f05f102e66e","unresolved":false,"context_lines":[{"line_number":357,"context_line":"            # generate candidates as that would require space on the current"},{"line_number":358,"context_line":"            # compute for double allocation. So no allocation candidates for"},{"line_number":359,"context_line":"            # rebuild and therefore alloc_reqs_by_rp_uuid is None"},{"line_number":360,"context_line":"            return self._legacy_find_hosts("},{"line_number":361,"context_line":"                context, num_instances, spec_obj, hosts, num_alts,"},{"line_number":362,"context_line":"                instance_uuids\u003dinstance_uuids)"},{"line_number":363,"context_line":""}],"source_content_type":"text/x-python","patch_set":9,"id":"1507e475_4becec30","line":360,"in_reply_to":"c366e7a7_b6dec92f","updated":"2022-08-30 11:21:47.000000000","message":"Ack","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c172d5f2c20b55769f800999e4673bbbd350c4f1","unresolved":true,"context_lines":[{"line_number":412,"context_line":"                # want to order the allocation_requests in the future based on"},{"line_number":413,"context_line":"                # information in the provider summaries, we\u0027ll just try to"},{"line_number":414,"context_line":"                # claim resources using the first allocation_request"},{"line_number":415,"context_line":"                alloc_req \u003d host.allocation_candidates[0]"},{"line_number":416,"context_line":"                if utils.claim_resources("},{"line_number":417,"context_line":"                    elevated, self.placement_client, spec_obj, instance_uuid,"},{"line_number":418,"context_line":"                    alloc_req,"}],"source_content_type":"text/x-python","patch_set":9,"id":"5be153ed_77dd73a0","line":415,"updated":"2022-08-29 21:10:37.000000000","message":"in the future we could do the ordering of the alloction candiates in the weigher without needing to change this by the way.\n\ni.e. order the list before it gets here.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"93a56a76be029c69bfdac25ffd409f05f102e66e","unresolved":false,"context_lines":[{"line_number":412,"context_line":"                # want to order the allocation_requests in the future based on"},{"line_number":413,"context_line":"                # information in the provider summaries, we\u0027ll just try to"},{"line_number":414,"context_line":"                # claim resources using the first allocation_request"},{"line_number":415,"context_line":"                alloc_req \u003d host.allocation_candidates[0]"},{"line_number":416,"context_line":"                if utils.claim_resources("},{"line_number":417,"context_line":"                    elevated, self.placement_client, spec_obj, instance_uuid,"},{"line_number":418,"context_line":"                    alloc_req,"}],"source_content_type":"text/x-python","patch_set":9,"id":"87eec20a_7e561f2e","line":415,"in_reply_to":"5be153ed_77dd73a0","updated":"2022-08-30 11:21:47.000000000","message":"yeah that would be a possibility too. Good point.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c172d5f2c20b55769f800999e4673bbbd350c4f1","unresolved":true,"context_lines":[{"line_number":631,"context_line":"                # TODO(gibi): In theory we could generate alternatives on the"},{"line_number":632,"context_line":"                # same host if that host has different possible allocation"},{"line_number":633,"context_line":"                # candidates for the request. But we don\u0027t do that today"},{"line_number":634,"context_line":"                if host.cell_uuid \u003d\u003d cell_uuid and host not in selected_hosts:"},{"line_number":635,"context_line":"                    if alloc_reqs_by_rp_uuid is not None:"},{"line_number":636,"context_line":"                        if not host.allocation_candidates:"},{"line_number":637,"context_line":"                            msg \u003d (\"A host state with uuid \u003d \u0027%s\u0027 that did \""}],"source_content_type":"text/x-python","patch_set":9,"id":"5a8669ec_6e41d0ed","line":634,"updated":"2022-08-29 21:10:37.000000000","message":"im not sure we would want to recondier an alternitive allocation candate on teh same host at this point.\n\nits possible for numa reasons that that would be valid but i think we would want to prefer candiates form other host first in a sort of bareber poll striping algortiom\n\ni.e. wehre we take the first candiate from all hsot then the second candiate for all hosts and so one \n\nif we only had one host that would still try up to n candiates but i think that would be better form a multi create perspective.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"93a56a76be029c69bfdac25ffd409f05f102e66e","unresolved":false,"context_lines":[{"line_number":631,"context_line":"                # TODO(gibi): In theory we could generate alternatives on the"},{"line_number":632,"context_line":"                # same host if that host has different possible allocation"},{"line_number":633,"context_line":"                # candidates for the request. But we don\u0027t do that today"},{"line_number":634,"context_line":"                if host.cell_uuid \u003d\u003d cell_uuid and host not in selected_hosts:"},{"line_number":635,"context_line":"                    if alloc_reqs_by_rp_uuid is not None:"},{"line_number":636,"context_line":"                        if not host.allocation_candidates:"},{"line_number":637,"context_line":"                            msg \u003d (\"A host state with uuid \u003d \u0027%s\u0027 that did \""}],"source_content_type":"text/x-python","patch_set":9,"id":"63d59bf4_e1080988","line":634,"in_reply_to":"5a8669ec_6e41d0ed","updated":"2022-08-30 11:21:47.000000000","message":"I agree we want to spread by host first to avoid races. \nThere is a hidden complication here btw. For the main selection we already allocated the candidate in placement so that is fixed. BUT for the alternative selections all candidate is just a candidate. So the quest should we use a single candidate in more than on alternative for different VMs? In case of PCI and PF this would lead to unnecessary reschedules. But for VFs or non PCI VMs this might be totally fine. The core of the issue is that placement does not tell us how many times a candidate would fit to a given host based on the available resources. We only know that it fit at least once.","commit_id":"93805d2f0a0319d911ece4827bc61c9fb7208d66"}],"nova/tests/unit/scheduler/test_manager.py":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"85b89fae1332ab9a3ce1ac1971653cdba40569fc","unresolved":false,"context_lines":[{"line_number":1702,"context_line":"        def host_passes(self, host_state, filter_properties):"},{"line_number":1703,"context_line":"            # record what candidate the filter saw for each host"},{"line_number":1704,"context_line":"            self.seen_candidates.append(list(host_state.allocation_candidates))"},{"line_number":1705,"context_line":"            return True"},{"line_number":1706,"context_line":""},{"line_number":1707,"context_line":"    class DropFirstFilter(filters.BaseHostFilter):"},{"line_number":1708,"context_line":"        \"\"\"A filter that removes one candidate and keeps the rest on each"}],"source_content_type":"text/x-python","patch_set":12,"id":"84e5b262_b05a22d9","line":1705,"updated":"2022-12-09 17:15:21.000000000","message":"Neat","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"023d53d63b37b346a34565a4bbb402b96d68b895","unresolved":true,"context_lines":[{"line_number":1899,"context_line":"    )"},{"line_number":1900,"context_line":"    @mock.patch(\"nova.scheduler.manager.SchedulerManager._get_all_host_states\")"},{"line_number":1901,"context_line":"    def test_filters_removes_all_a_c_host_is_not_selected("},{"line_number":1902,"context_line":"        self,"},{"line_number":1903,"context_line":"        mock_get_all_host_states,"},{"line_number":1904,"context_line":"        mock_consume,"},{"line_number":1905,"context_line":"    ):"}],"source_content_type":"text/x-python","patch_set":12,"id":"b85d215c_730aaf24","line":1902,"updated":"2022-11-03 12:09:49.000000000","message":"+1 i was going to ask about this.","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d3cb4219383a547074b4f76871aebe4683160f44","unresolved":false,"context_lines":[{"line_number":1899,"context_line":"    )"},{"line_number":1900,"context_line":"    @mock.patch(\"nova.scheduler.manager.SchedulerManager._get_all_host_states\")"},{"line_number":1901,"context_line":"    def test_filters_removes_all_a_c_host_is_not_selected("},{"line_number":1902,"context_line":"        self,"},{"line_number":1903,"context_line":"        mock_get_all_host_states,"},{"line_number":1904,"context_line":"        mock_consume,"},{"line_number":1905,"context_line":"    ):"}],"source_content_type":"text/x-python","patch_set":12,"id":"76b22a13_0ccf48f1","line":1902,"in_reply_to":"b85d215c_730aaf24","updated":"2022-11-05 14:05:36.000000000","message":"Ack","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"023d53d63b37b346a34565a4bbb402b96d68b895","unresolved":true,"context_lines":[{"line_number":2047,"context_line":"        alloc_reqs_by_rp_uuid[uuids.host1] \u003d ["},{"line_number":2048,"context_line":"            {"},{"line_number":2049,"context_line":"                \"mappings\": {"},{"line_number":2050,"context_line":"                    \"\": [uuids.host1],"},{"line_number":2051,"context_line":"                    uuids.group_req1: [getattr(uuids, f\"host1_child{i}\")],"},{"line_number":2052,"context_line":"                }"},{"line_number":2053,"context_line":"            } for i in [1, 2, 3]"}],"source_content_type":"text/x-python","patch_set":12,"id":"18c06690_2cd1bf7e","line":2050,"range":{"start_line":2050,"start_character":20,"end_line":2050,"end_character":22},"updated":"2022-11-03 12:09:49.000000000","message":"this  is a littel odd but ok","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d3cb4219383a547074b4f76871aebe4683160f44","unresolved":false,"context_lines":[{"line_number":2047,"context_line":"        alloc_reqs_by_rp_uuid[uuids.host1] \u003d ["},{"line_number":2048,"context_line":"            {"},{"line_number":2049,"context_line":"                \"mappings\": {"},{"line_number":2050,"context_line":"                    \"\": [uuids.host1],"},{"line_number":2051,"context_line":"                    uuids.group_req1: [getattr(uuids, f\"host1_child{i}\")],"},{"line_number":2052,"context_line":"                }"},{"line_number":2053,"context_line":"            } for i in [1, 2, 3]"}],"source_content_type":"text/x-python","patch_set":12,"id":"e2099e93_d97ba3e7","line":2050,"range":{"start_line":2050,"start_character":20,"end_line":2050,"end_character":22},"in_reply_to":"18c06690_2cd1bf7e","updated":"2022-11-05 14:05:36.000000000","message":"That is our sad reality :) The un-name request group uses \"\" as the name of the group.","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"023d53d63b37b346a34565a4bbb402b96d68b895","unresolved":true,"context_lines":[{"line_number":2071,"context_line":"        self.assertEqual(1, len(selections))"},{"line_number":2072,"context_line":"        selection \u003d selections[0]"},{"line_number":2073,"context_line":"        self.assertEqual(uuids.host1, selection.compute_node_uuid)"},{"line_number":2074,"context_line":"        # we expect that host1_child2 candidate is selected"},{"line_number":2075,"context_line":"        expected_a_c \u003d {"},{"line_number":2076,"context_line":"            \"mappings\": {"},{"line_number":2077,"context_line":"                \"\": [uuids.host1],"}],"source_content_type":"text/x-python","patch_set":12,"id":"272029d7_748193fb","line":2074,"updated":"2022-11-03 12:09:49.000000000","message":"its not obvious why child2 is selected","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d3cb4219383a547074b4f76871aebe4683160f44","unresolved":true,"context_lines":[{"line_number":2071,"context_line":"        self.assertEqual(1, len(selections))"},{"line_number":2072,"context_line":"        selection \u003d selections[0]"},{"line_number":2073,"context_line":"        self.assertEqual(uuids.host1, selection.compute_node_uuid)"},{"line_number":2074,"context_line":"        # we expect that host1_child2 candidate is selected"},{"line_number":2075,"context_line":"        expected_a_c \u003d {"},{"line_number":2076,"context_line":"            \"mappings\": {"},{"line_number":2077,"context_line":"                \"\": [uuids.host1],"}],"source_content_type":"text/x-python","patch_set":12,"id":"4fd834fc_d093debd","line":2074,"in_reply_to":"272029d7_748193fb","updated":"2022-11-05 14:05:36.000000000","message":"L2038-L2039 The DropFirstFilter() filter will drop host1_child1 and our whole filter scheduler is deterministic by default so the next host1_child2 candidate will be selected. Let me know if you need me to add this info here as a comment.","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c5e5cbb8df8735f9487ebae141ee0c999d38acfb","unresolved":false,"context_lines":[{"line_number":2071,"context_line":"        self.assertEqual(1, len(selections))"},{"line_number":2072,"context_line":"        selection \u003d selections[0]"},{"line_number":2073,"context_line":"        self.assertEqual(uuids.host1, selection.compute_node_uuid)"},{"line_number":2074,"context_line":"        # we expect that host1_child2 candidate is selected"},{"line_number":2075,"context_line":"        expected_a_c \u003d {"},{"line_number":2076,"context_line":"            \"mappings\": {"},{"line_number":2077,"context_line":"                \"\": [uuids.host1],"}],"source_content_type":"text/x-python","patch_set":12,"id":"cfdb4821_8484fa8b","line":2074,"in_reply_to":"4fd834fc_d093debd","updated":"2022-11-07 15:01:18.000000000","message":"ah sorry i missed the use fo drop first i was scrolled of my screen ya that make more sense. i think this is fine as is.","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"023d53d63b37b346a34565a4bbb402b96d68b895","unresolved":true,"context_lines":[{"line_number":2153,"context_line":"        self.manager.host_manager.enabled_filters \u003d ["},{"line_number":2154,"context_line":"            RPFilter(allowed_rp_uuids\u003d[uuids.host1_child2, uuids.host3_child2])"},{"line_number":2155,"context_line":"        ]"},{"line_number":2156,"context_line":""},{"line_number":2157,"context_line":"        result \u003d self.manager._schedule("},{"line_number":2158,"context_line":"            self.context,"},{"line_number":2159,"context_line":"            self.request_spec,"}],"source_content_type":"text/x-python","patch_set":12,"id":"aa07407c_6cc500d6","line":2156,"updated":"2022-11-03 12:09:49.000000000","message":"this is a little complicated vs hardcodeing the candiates but it test the behvior fo filters taht would remove allocation candiates as there primary means of filtering hosts so i guess this makes sense","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"d3cb4219383a547074b4f76871aebe4683160f44","unresolved":false,"context_lines":[{"line_number":2153,"context_line":"        self.manager.host_manager.enabled_filters \u003d ["},{"line_number":2154,"context_line":"            RPFilter(allowed_rp_uuids\u003d[uuids.host1_child2, uuids.host3_child2])"},{"line_number":2155,"context_line":"        ]"},{"line_number":2156,"context_line":""},{"line_number":2157,"context_line":"        result \u003d self.manager._schedule("},{"line_number":2158,"context_line":"            self.context,"},{"line_number":2159,"context_line":"            self.request_spec,"}],"source_content_type":"text/x-python","patch_set":12,"id":"888f346f_3204c2c2","line":2156,"in_reply_to":"aa07407c_6cc500d6","updated":"2022-11-05 14:05:36.000000000","message":"Ack","commit_id":"3d818c3473cdd94cac846309b555b771243f82b8"}]}
