)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"a9fe9a2cd69a12aaadd1d1c286f52cdd61098ac4","unresolved":true,"context_lines":[{"line_number":9,"context_line":"Removing tpool.Proxy() as this caused very slow upload speeds during"},{"line_number":10,"context_line":"an image upload. In combination with the python-glanceclient patch that"},{"line_number":11,"context_line":"removes the chunked encoding[1], the upload speed increases significant."},{"line_number":12,"context_line":"Without the tpool.execute() the image upload could block other"},{"line_number":13,"context_line":"concurrent processes."},{"line_number":14,"context_line":""},{"line_number":15,"context_line":"[1] https://review.opendev.org/c/openstack/python-glanceclient/+/733984"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":5,"id":"54cc9013_48b34635","line":12,"updated":"2026-04-01 05:40:54.000000000","message":"The tpool.execute() approach does still prevent blocking, just more efficiently than tpool.Proxy(). The sentence makes it sound like removing tpool.execute() is the goal","commit_id":"26f38f51af6f3cd33d3ee3d883b06a3c754aeca0"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"cd121a8a2cdf5a9e56b7664456a6541dab5e5317","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"a9882c1e_4e3d9396","updated":"2023-07-19 15:14:32.000000000","message":"Please rebase the patch.\n\nWhy is tpool.execute perform better than tpool.Proxy here?","commit_id":"88441dd17a57e8a758cd0c916d67aaee196a17ae"},{"author":{"_account_id":12988,"name":"Peter Penchev","email":"openstack-dev@storpool.com","username":"ppenchev"},"change_message_id":"8ac05ff79be2b0455c76e2e713336219f59d6975","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"87b78e31_aca5710c","updated":"2023-07-18 10:49:58.000000000","message":"run-storpoolci","commit_id":"88441dd17a57e8a758cd0c916d67aaee196a17ae"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"7e11490bd0cce4ddd7d3e74ed541ef345d7820cb","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"e62d8ee1_5282cdef","in_reply_to":"a9882c1e_4e3d9396","updated":"2023-07-20 18:24:42.000000000","message":"\u003e Why is tpool.execute perform better than tpool.Proxy here?\n\nI think it\u0027s probably because `Proxy` ends up delegating every method call on the object it wraps to a thread, instead of just running the whole upload operation in a thread. So with proxy, it looks like this:\n\n1. requests calls `image_file.read()` (because you passed it to the PUT/POST)\n2. we grab a thread from the pool\n3. ask it to run `image_file.read()` for real\n4. yield to other eventlet green threads\n5. when we get scheduled again, see if our worker thread is done\n6. assuming so, collect its result and send it to our socket\n7. goto 1\n\nThat makes the act of reading chunks from the file and sending them to the socket exceedingly \"chatty\", CPU-intensive, and inconsistent. If you have anything else in the single-threaded eventlet hub that slows you down at step 5 such that you can\u0027t keep the socket buffer full, you\u0027re not maxing out the network connection and you make your IO-bound operation as slow as your single-busiest CPU core that is running the rest of the cinder-volume process.\n\nBy running the whole thing in a worker thread (which is the `execute()` approach) you spawn one thread one time and blast the data as quick as the socket (and disk) will go and then return the thread to the pool when you\u0027re done. While you\u0027re doing that, the rest of cinder-volume is totally unblocked and uninvolved, and the green thread that started this isn\u0027t re-scheduled until the thread worker is done.\n\nUnless you\u0027re changing it, the default thread pool size is 20, and unless you\u0027re spawning worker threads for anything else (I\u0027d guess not), that\u0027s a very reasonable size limit for the maximum number of those that can be going on, IMHO.","commit_id":"88441dd17a57e8a758cd0c916d67aaee196a17ae"},{"author":{"_account_id":32464,"name":"caiqilong","email":"406454833@qq.com","username":"cccqqqlll"},"change_message_id":"59e85ec3b3eb3ab44d967a00dba8dbb1520249c0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"8c46a5ac_b0dc187e","updated":"2023-07-24 08:43:31.000000000","message":"run-TOYOU TYDS CI","commit_id":"7a0b9531f606afce9cffd47579900c2d35ccbcea"},{"author":{"_account_id":36198,"name":"Robert Franzke","display_name":"Robert Franzke","email":"robert.franzke@mail.schwarz","username":"r-franzke"},"change_message_id":"55e5e18cdc8b7e1ebdc4442b10a22aac08984a5b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"cffd648a_5dfcc159","updated":"2024-05-21 06:17:48.000000000","message":"recheck","commit_id":"26f38f51af6f3cd33d3ee3d883b06a3c754aeca0"}],"cinder/tests/unit/test_image_utils.py":[{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"a9fe9a2cd69a12aaadd1d1c286f52cdd61098ac4","unresolved":true,"context_lines":[{"line_number":749,"context_line":"        mock_info.assert_called_with(temp_file, run_as_root\u003dTrue)"},{"line_number":750,"context_line":"        self.assertEqual(2, mock_info.call_count)"},{"line_number":751,"context_line":"        mock_open.assert_called_once_with(temp_file, \u0027rb\u0027)"},{"line_number":752,"context_line":"        mock_execute.assert_called_once_with("},{"line_number":753,"context_line":"            image_service.update, ctxt, image_meta[\u0027id\u0027], {},"},{"line_number":754,"context_line":"            mock_open.return_value.__enter__.return_value, store_id\u003dNone,"},{"line_number":755,"context_line":"            base_image_ref\u003dNone)"}],"source_content_type":"text/x-python","patch_set":5,"id":"6d7b7f1a_7191ac49","line":752,"updated":"2026-04-01 05:40:54.000000000","message":"This test will always pass regardless of what the production code does, making it a no-op test. It also passes mock_open.return_value instead of mock_open.return_value.__enter__.return_value (the file object from the context manager).\n\nCompare with other tests in the same patch that correctly use mock_execute.assert_called_once_with(...).","commit_id":"26f38f51af6f3cd33d3ee3d883b06a3c754aeca0"}]}
