)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"4889b0bc6a88833133bf67032188e3f560a7b139","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Execute glance upload in a native thread as it may block the current"},{"line_number":10,"context_line":"coroutine until it completes."},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"Dispact the fact we use eventlet monkey_patching [1] to achieve cooperative"},{"line_number":13,"context_line":"yielding for network IO, file IO on busy file system may still get"},{"line_number":14,"context_line":"nova-compute hanging."},{"line_number":15,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"bf51134e_5bf05091","line":12,"range":{"start_line":12,"start_character":0,"end_line":12,"end_character":7},"updated":"2020-06-16 16:27:31.000000000","message":"Despite?","commit_id":"cb4139468cbc47661d146df4a47a2e1092230baf"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"46cb85525537d6323857be86f7ae0ad4a36033ea","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Execute glance upload in a native thread as it may block the current"},{"line_number":10,"context_line":"coroutine until it completes."},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"Dispact the fact we use eventlet monkey_patching [1] to achieve cooperative"},{"line_number":13,"context_line":"yielding for network IO, file IO on busy file system may still get"},{"line_number":14,"context_line":"nova-compute hanging."},{"line_number":15,"context_line":""}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"bf51134e_f0e16fd2","line":12,"range":{"start_line":12,"start_character":0,"end_line":12,"end_character":7},"in_reply_to":"bf51134e_5bf05091","updated":"2020-06-17 12:03:59.000000000","message":"Done","commit_id":"cb4139468cbc47661d146df4a47a2e1092230baf"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"4889b0bc6a88833133bf67032188e3f560a7b139","unresolved":false,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"This change also introduces [DEFAULT]/max_concurrent_snapshots"},{"line_number":20,"context_line":"in order to limit by default compute resource overuse related"},{"line_number":21,"context_line":"to snapshot."},{"line_number":22,"context_line":""},{"line_number":23,"context_line":"[1] https://eventlet.net/doc/patching.html"},{"line_number":24,"context_line":"[2] https://eventlet.net/doc/threading.html"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"bf51134e_1b24180d","line":21,"updated":"2020-06-16 16:27:31.000000000","message":"Any time a commit message says \"oh and also do this other thing\" that is a signal to me that the patch should be split into two. You\u0027ve got two completely separate things in this patch:\n\n 1. Moving the execution to a separate thread\n 2. Limiting the number of parallel snapshots\n\nTo me, those should be in separate patches, especially since one of them is at least potentially backportable. Also, the title of this in a short log looks like it just offloads to a thread, when in reality, it *also* adds a new config option and new behavior.","commit_id":"cb4139468cbc47661d146df4a47a2e1092230baf"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"46cb85525537d6323857be86f7ae0ad4a36033ea","unresolved":false,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"This change also introduces [DEFAULT]/max_concurrent_snapshots"},{"line_number":20,"context_line":"in order to limit by default compute resource overuse related"},{"line_number":21,"context_line":"to snapshot."},{"line_number":22,"context_line":""},{"line_number":23,"context_line":"[1] https://eventlet.net/doc/patching.html"},{"line_number":24,"context_line":"[2] https://eventlet.net/doc/threading.html"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"bf51134e_70f6ff91","line":21,"in_reply_to":"bf51134e_1b24180d","updated":"2020-06-17 12:03:59.000000000","message":"Yes better to split:\n-So re repush this one with a reno(without new parameter).\n-and new one https://review.opendev.org/#/c/736169/ for max_concurrent_snapshots.","commit_id":"cb4139468cbc47661d146df4a47a2e1092230baf"}],"nova/conf/compute.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"1bb364bf122b198f5dddf6c2fb6fa5addea7c80a","unresolved":false,"context_lines":[{"line_number":901,"context_line":"  failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the"},{"line_number":902,"context_line":"  live migration"},{"line_number":903,"context_line":"\"\"\"),"},{"line_number":904,"context_line":"    cfg.IntOpt(\u0027max_concurrent_disk_ops\u0027,"},{"line_number":905,"context_line":"        default\u003d10,"},{"line_number":906,"context_line":"        min\u003d0,"},{"line_number":907,"context_line":"        help\u003d\"\"\""},{"line_number":908,"context_line":"Number of concurrent disk-IO-intensive operations (glance image downloads,"},{"line_number":909,"context_line":"image format conversions, etc.) that we will do in parallel.  If this is set"},{"line_number":910,"context_line":"too high then response time suffers."},{"line_number":911,"context_line":"The default value of 0 means no limit."},{"line_number":912,"context_line":" \"\"\"),"},{"line_number":913,"context_line":"    cfg.IntOpt(\u0027max_disk_devices_to_attach\u0027,"},{"line_number":914,"context_line":"        default\u003d-1,"},{"line_number":915,"context_line":"        min\u003d-1,"}],"source_content_type":"text/x-python","patch_set":2,"id":"ff570b3c_e2e8737d","line":912,"range":{"start_line":904,"start_character":2,"end_line":912,"end_character":6},"updated":"2020-06-11 12:06:30.000000000","message":"i was expecting you to add a new config option for snapshot and an explcit semapone in the cod enot update this config option.\n\ni guess we could do that but i was under the impression we wanted to limit the number of snapshots and not all io operations.","commit_id":"716aff2be3f85652788269b9cb17eac336775aa5"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"30310542ef3247c63b375f2263830df7c7b11518","unresolved":false,"context_lines":[{"line_number":901,"context_line":"  failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the"},{"line_number":902,"context_line":"  live migration"},{"line_number":903,"context_line":"\"\"\"),"},{"line_number":904,"context_line":"    cfg.IntOpt(\u0027max_concurrent_disk_ops\u0027,"},{"line_number":905,"context_line":"        default\u003d10,"},{"line_number":906,"context_line":"        min\u003d0,"},{"line_number":907,"context_line":"        help\u003d\"\"\""},{"line_number":908,"context_line":"Number of concurrent disk-IO-intensive operations (glance image downloads,"},{"line_number":909,"context_line":"image format conversions, etc.) that we will do in parallel.  If this is set"},{"line_number":910,"context_line":"too high then response time suffers."},{"line_number":911,"context_line":"The default value of 0 means no limit."},{"line_number":912,"context_line":" \"\"\"),"},{"line_number":913,"context_line":"    cfg.IntOpt(\u0027max_disk_devices_to_attach\u0027,"},{"line_number":914,"context_line":"        default\u003d-1,"},{"line_number":915,"context_line":"        min\u003d-1,"}],"source_content_type":"text/x-python","patch_set":2,"id":"ff570b3c_6ebe0c00","line":912,"range":{"start_line":904,"start_character":2,"end_line":912,"end_character":6},"in_reply_to":"ff570b3c_58715244","updated":"2020-06-11 14:34:23.000000000","message":"So, I was suggesting a higher-level \"max_concurrent_snapshots\" config, where we don\u0027t even start the snapshot operation until we have one of the available slots. By using this config, and doing the semaphore acquire deep down in the glance code, we will pause the instance and then presumably wait for a long time until we get the semaphore. If we do it in the snapshot operation, we can keep running until we get a slot, pause, snapshot, resume.\n\nNow, we maybe should *also* grab this one in the glance code so that we honor the intention of this config (i.e. any/all heavy disk ops), but this isn\u0027t what I was suggesting.","commit_id":"716aff2be3f85652788269b9cb17eac336775aa5"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"219f5c64978234f73af57a4ce2d030ff7faac9af","unresolved":false,"context_lines":[{"line_number":901,"context_line":"  failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the"},{"line_number":902,"context_line":"  live migration"},{"line_number":903,"context_line":"\"\"\"),"},{"line_number":904,"context_line":"    cfg.IntOpt(\u0027max_concurrent_disk_ops\u0027,"},{"line_number":905,"context_line":"        default\u003d10,"},{"line_number":906,"context_line":"        min\u003d0,"},{"line_number":907,"context_line":"        help\u003d\"\"\""},{"line_number":908,"context_line":"Number of concurrent disk-IO-intensive operations (glance image downloads,"},{"line_number":909,"context_line":"image format conversions, etc.) that we will do in parallel.  If this is set"},{"line_number":910,"context_line":"too high then response time suffers."},{"line_number":911,"context_line":"The default value of 0 means no limit."},{"line_number":912,"context_line":" \"\"\"),"},{"line_number":913,"context_line":"    cfg.IntOpt(\u0027max_disk_devices_to_attach\u0027,"},{"line_number":914,"context_line":"        default\u003d-1,"},{"line_number":915,"context_line":"        min\u003d-1,"}],"source_content_type":"text/x-python","patch_set":2,"id":"ff570b3c_8e85605c","line":912,"range":{"start_line":904,"start_character":2,"end_line":912,"end_character":6},"in_reply_to":"ff570b3c_58715244","updated":"2020-06-30 12:48:30.000000000","message":"by the way we shoudl have a release note to cover the modificaiton or intoduction of the new config option too.","commit_id":"716aff2be3f85652788269b9cb17eac336775aa5"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"d4e7e1de7939058d99fedccc4e54f393f501e95e","unresolved":false,"context_lines":[{"line_number":901,"context_line":"  failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the"},{"line_number":902,"context_line":"  live migration"},{"line_number":903,"context_line":"\"\"\"),"},{"line_number":904,"context_line":"    cfg.IntOpt(\u0027max_concurrent_disk_ops\u0027,"},{"line_number":905,"context_line":"        default\u003d10,"},{"line_number":906,"context_line":"        min\u003d0,"},{"line_number":907,"context_line":"        help\u003d\"\"\""},{"line_number":908,"context_line":"Number of concurrent disk-IO-intensive operations (glance image downloads,"},{"line_number":909,"context_line":"image format conversions, etc.) that we will do in parallel.  If this is set"},{"line_number":910,"context_line":"too high then response time suffers."},{"line_number":911,"context_line":"The default value of 0 means no limit."},{"line_number":912,"context_line":" \"\"\"),"},{"line_number":913,"context_line":"    cfg.IntOpt(\u0027max_disk_devices_to_attach\u0027,"},{"line_number":914,"context_line":"        default\u003d-1,"},{"line_number":915,"context_line":"        min\u003d-1,"}],"source_content_type":"text/x-python","patch_set":2,"id":"ff570b3c_b097d73a","line":912,"range":{"start_line":904,"start_character":2,"end_line":912,"end_character":6},"in_reply_to":"ff570b3c_6ebe0c00","updated":"2020-06-12 07:39:40.000000000","message":"Ok, so I repushed with max_concurrent_snaphots parameter/semaphore, largely inspired by your _build_semaphore work.","commit_id":"716aff2be3f85652788269b9cb17eac336775aa5"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"7ed052e30fa801743b326c13aa9e2bbf81b19dd5","unresolved":false,"context_lines":[{"line_number":901,"context_line":"  failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the"},{"line_number":902,"context_line":"  live migration"},{"line_number":903,"context_line":"\"\"\"),"},{"line_number":904,"context_line":"    cfg.IntOpt(\u0027max_concurrent_disk_ops\u0027,"},{"line_number":905,"context_line":"        default\u003d10,"},{"line_number":906,"context_line":"        min\u003d0,"},{"line_number":907,"context_line":"        help\u003d\"\"\""},{"line_number":908,"context_line":"Number of concurrent disk-IO-intensive operations (glance image downloads,"},{"line_number":909,"context_line":"image format conversions, etc.) that we will do in parallel.  If this is set"},{"line_number":910,"context_line":"too high then response time suffers."},{"line_number":911,"context_line":"The default value of 0 means no limit."},{"line_number":912,"context_line":" \"\"\"),"},{"line_number":913,"context_line":"    cfg.IntOpt(\u0027max_disk_devices_to_attach\u0027,"},{"line_number":914,"context_line":"        default\u003d-1,"},{"line_number":915,"context_line":"        min\u003d-1,"}],"source_content_type":"text/x-python","patch_set":2,"id":"ff570b3c_58715244","line":912,"range":{"start_line":904,"start_character":2,"end_line":912,"end_character":6},"in_reply_to":"ff570b3c_e2e8737d","updated":"2020-06-11 13:26:02.000000000","message":"oops ok, I was thinking disk_ops was enaught, doing new dedicated config parameter","commit_id":"716aff2be3f85652788269b9cb17eac336775aa5"}],"nova/image/glance.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"8a91c04ffe086c135139af80344304c2b099506d","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_335f7873","line":518,"updated":"2020-06-10 12:21:57.000000000","message":"so isnt this a race.\nwe are kicing this to another native tread but we shoudl yield here until it complete ritght before  returnig below.\n\ni also tought we aggreed to not do this and instead just add an semephore to rate limit the uploads.\n\nthis looks pretty clean but before this was a blocking call and we would only do the get once it completed and you have not maintained that behavior.","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"19563694f4469a0bd085f939c56955bec87ce10f","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_d91a9d6d","line":518,"in_reply_to":"ff570b3c_13187482","updated":"2020-06-10 13:27:36.000000000","message":"I understand the need.\nWe are already currently covered by the semaphore \ndisk_ops_semaphore here:\nhttps://github.com/openstack/nova/blob/master/nova/virt/libvirt/driver.py#L2465\n\nlinked to parameter CONF.max_concurrent_disk_ops but by default there is no limitation.\nmaybe should we put something like 10 by default instead?\nAs currenty thread pool size is 20.\n\nNot sure is is necessary to add a specific semaphore for snapshot","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"e20b196c22a5e7cffaa29c2e3cb8fb7a789b9375","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_3973d9b3","line":518,"in_reply_to":"ff570b3c_13187482","updated":"2020-06-10 13:24:23.000000000","message":"Yep, agree, we need a semaphore to avoid doing more of these than that limit.","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"5516e814ab1429c9736ba01c7b1f6329d7c8ca21","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_93af642a","line":518,"in_reply_to":"ff570b3c_335f7873","updated":"2020-06-10 12:32:07.000000000","message":"actully so reading the eventlest docs its not clear if this will race.\n\nhttps://eventlet.net/doc/threading.html#eventlet.tpool.execute\n\n\n\neventlet.tpool.execute(meth, *args, **kwargs)\n\n    Execute meth in a Python thread, blocking the current coroutine/ greenthread until the method completes.\n\n    The primary use case for this is to wrap an object or module that is not amenable to monkeypatching or any of the other tricks that Eventlet uses to achieve cooperative yielding. With tpool, you can force such objects to cooperate with green threads by sticking them in native threads, at the cost of some overhead.\n\n\nso will this lauch the process in native thread then yield the current greenthread so other can run on the main thread resume this greenthread again after the native thread has completed?\n\nif so this is ok if not then we need some way to get that behvior.","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"19563694f4469a0bd085f939c56955bec87ce10f","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_f9784190","line":518,"in_reply_to":"ff570b3c_335f7873","updated":"2020-06-10 13:27:36.000000000","message":"see next comment","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"19563694f4469a0bd085f939c56955bec87ce10f","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_b98c0980","line":518,"in_reply_to":"ff570b3c_3973d9b3","updated":"2020-06-10 13:27:36.000000000","message":"set max_concurrent_disk_ops to 10 is fine for you? (see my replied to sean)","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"19563694f4469a0bd085f939c56955bec87ce10f","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_de756334","line":518,"in_reply_to":"ff570b3c_93af642a","updated":"2020-06-10 13:27:36.000000000","message":"Yes the idea is coroutine delegate to native thread wait until completion, but in the meantime always yield to others coroutine.\n\nI don\u0027t know if you saw my first self comment on the change, it display threads activity of nova-compute when multiple upload are running.","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d8f27e81c88a654d8daa09ef5b2057b0b9bd1c36","unresolved":false,"context_lines":[{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""},{"line_number":521,"context_line":"    def _get_image_create_disk_format_default(self, context):"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff570b3c_13187482","line":518,"in_reply_to":"ff570b3c_93af642a","updated":"2020-06-10 12:38:23.000000000","message":"i missed the prelude\n\nThe simplest thing to do with tpool is to execute() a function with it. The function will be run in a random thread in the pool, while the calling coroutine blocks on its completion:\n\nso it will still be blocking in terms of the lifetime of this coroutine but it will not block the main tread as the current greenthread will yield until the snapshot is completed.\n\n\neven though the total number of snapshots will be limited by the size of the thread pool i think we still need to add the new max_concurrent_shapshots config option and add a sepmephoen to limit the number of snapshots that happen at once. so ill keep my -1 untill other way in on that but the blocking/yeilding behavior is fine. there is no race here.","commit_id":"8b3d1a25bfe53ecfa709c73cffa56236ea5c4841"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"0ce88c87d92d859fbcffde107a82d594c79028da","unresolved":false,"context_lines":[{"line_number":514,"context_line":"        # coroutine in busy environment."},{"line_number":515,"context_line":"        utils.tpool_execute(self._client.call,"},{"line_number":516,"context_line":"                      context, 2, \u0027upload\u0027,"},{"line_number":517,"context_line":"                      args\u003d(image_id, data))"},{"line_number":518,"context_line":""},{"line_number":519,"context_line":"        return self._client.call(context, 2, \u0027get\u0027, args\u003d(image_id,))"},{"line_number":520,"context_line":""}],"source_content_type":"text/x-python","patch_set":6,"id":"bf51134e_c8beadd3","line":517,"updated":"2020-06-25 15:37:48.000000000","message":"Note for other reviewers, unlike the spawn() we\u0027re used to seeing, this blocks until completion, but runs the meat of this in a thread. So this will complete and then we\u0027ll fall through to the next line, so the behavior should *appear* unchanged.","commit_id":"20ee84596fb7588c4b7430ca028f63e4d3004bde"}],"nova/utils.py":[{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"0ce88c87d92d859fbcffde107a82d594c79028da","unresolved":false,"context_lines":[{"line_number":700,"context_line":""},{"line_number":701,"context_line":"def tpool_execute(func, *args, **kwargs):"},{"line_number":702,"context_line":"    \"\"\"Run func in a native thread\"\"\""},{"line_number":703,"context_line":"    eventlet.tpool.execute(func, *args, **kwargs)"},{"line_number":704,"context_line":""},{"line_number":705,"context_line":""},{"line_number":706,"context_line":"def is_none_string(val):"}],"source_content_type":"text/x-python","patch_set":6,"id":"bf51134e_08b545ee","line":703,"updated":"2020-06-25 15:37:48.000000000","message":"Can you write a quick unit test for this just to make sure it does what we expect? Should be trivial, but right now we stub out so much of the image code that we\u0027d only catch such an issue in a real snapshotting test in tempest/devstack, which is a heavy way to validate that this isn\u0027t missing an import or something :)","commit_id":"20ee84596fb7588c4b7430ca028f63e4d3004bde"},{"author":{"_account_id":28332,"name":"Alexandre arents","email":"alexandre.arents@corp.ovh.com","username":"aarents"},"change_message_id":"840ae2d8da0f3ef8ab3f74cab43423fe19696e56","unresolved":false,"context_lines":[{"line_number":700,"context_line":""},{"line_number":701,"context_line":"def tpool_execute(func, *args, **kwargs):"},{"line_number":702,"context_line":"    \"\"\"Run func in a native thread\"\"\""},{"line_number":703,"context_line":"    eventlet.tpool.execute(func, *args, **kwargs)"},{"line_number":704,"context_line":""},{"line_number":705,"context_line":""},{"line_number":706,"context_line":"def is_none_string(val):"}],"source_content_type":"text/x-python","patch_set":6,"id":"bf51134e_73282596","line":703,"in_reply_to":"bf51134e_08b545ee","updated":"2020-06-26 10:23:01.000000000","message":"I see, added one.","commit_id":"20ee84596fb7588c4b7430ca028f63e4d3004bde"}]}
