)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"afa65ab9_faf05f2a","updated":"2024-05-31 20:50:23.000000000","message":"shoot, i lost this change-id and moved to 918366: Parallel distirbuted task container iteration | https://review.opendev.org/c/openstack/swift/+/918366","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"}],"swift/obj/expirer.py":[{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":200,"context_line":"        self.process \u003d int(self.conf.get(\u0027process\u0027, 0))"},{"line_number":201,"context_line":"        if self.processes \u003e 0:"},{"line_number":202,"context_line":"            self.my_indexes \u003d [self.process]"},{"line_number":203,"context_line":"            self.divisor \u003d self.processes"},{"line_number":204,"context_line":"        else:"},{"line_number":205,"context_line":"            self.my_indexes \u003d [0]"},{"line_number":206,"context_line":"            self.divisor \u003d 1"}],"source_content_type":"text/x-python","patch_set":6,"id":"30be7d08_0466e3eb","line":203,"updated":"2024-04-23 20:46:44.000000000","message":"Short of the special case below, is `divisor` ever *not* `processes`? Seems like we could say\n```\nif self.processes \u003c\u003d 0:\n    self.processes \u003d 1\n```\ninstead and get rid of divisor.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":200,"context_line":"        self.process \u003d int(self.conf.get(\u0027process\u0027, 0))"},{"line_number":201,"context_line":"        if self.processes \u003e 0:"},{"line_number":202,"context_line":"            self.my_indexes \u003d [self.process]"},{"line_number":203,"context_line":"            self.divisor \u003d self.processes"},{"line_number":204,"context_line":"        else:"},{"line_number":205,"context_line":"            self.my_indexes \u003d [0]"},{"line_number":206,"context_line":"            self.divisor \u003d 1"}],"source_content_type":"text/x-python","patch_set":6,"id":"f777e241_5ecf0858","line":203,"in_reply_to":"30be7d08_0466e3eb","updated":"2024-05-31 20:50:23.000000000","message":"Yes it\u0027s reasonable to think of divisor as an alias for processes.\n\nI think the idea is that the variable name \"divisor\" makes more sense in context -  especially in the special case when it\u0027s not equal to the configured processes value.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"987de7223fe356c135aa80f16149ca757625e2b2","unresolved":true,"context_lines":[{"line_number":279,"context_line":"        # task_container name is timestamp"},{"line_number":280,"context_line":"        return Timestamp(task_container)"},{"line_number":281,"context_line":""},{"line_number":282,"context_line":"    def pick_my_containers_and_set_my_indexes(self, task_containers_cache):"},{"line_number":283,"context_line":"        if self.processes \u003c\u003d 0:"},{"line_number":284,"context_line":"            self.my_indexes \u003d [0]"},{"line_number":285,"context_line":"            return task_containers_cache"}],"source_content_type":"text/x-python","patch_set":6,"id":"93616488_120912b5","line":282,"updated":"2024-05-29 05:26:24.000000000","message":"add docstring","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"987de7223fe356c135aa80f16149ca757625e2b2","unresolved":true,"context_lines":[{"line_number":281,"context_line":""},{"line_number":282,"context_line":"    def pick_my_containers_and_set_my_indexes(self, task_containers_cache):"},{"line_number":283,"context_line":"        if self.processes \u003c\u003d 0:"},{"line_number":284,"context_line":"            self.my_indexes \u003d [0]"},{"line_number":285,"context_line":"            return task_containers_cache"},{"line_number":286,"context_line":"        # each cycle we move around the processes ring to cover for down nodes"},{"line_number":287,"context_line":"        process_index \u003d (self.process + self.cycle_count) % self.processes"}],"source_content_type":"text/x-python","patch_set":6,"id":"d5d22349_cc7630b0","line":284,"updated":"2024-05-29 05:26:24.000000000","message":"this is set already at line 205.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"987de7223fe356c135aa80f16149ca757625e2b2","unresolved":true,"context_lines":[{"line_number":284,"context_line":"            self.my_indexes \u003d [0]"},{"line_number":285,"context_line":"            return task_containers_cache"},{"line_number":286,"context_line":"        # each cycle we move around the processes ring to cover for down nodes"},{"line_number":287,"context_line":"        process_index \u003d (self.process + self.cycle_count) % self.processes"},{"line_number":288,"context_line":"        if len(task_containers_cache) \u003e\u003d self.processes:"},{"line_number":289,"context_line":"            # pigeon hole says each process will handle all of the tasks in"},{"line_number":290,"context_line":"            # at least one container"}],"source_content_type":"text/x-python","patch_set":6,"id":"4560304d_c927983f","line":287,"updated":"2024-05-29 05:26:24.000000000","message":"for one process, each cycle it will get a new \"process_index\" and probably different set of task containers to work on. what if it can\u0027t finish its task containers during one cycle? for sure it can come back or other processes will work on those half finished containers later, but  will it leave us many on-going task containers?\n\nmaybe we can move around the processes ring slower by doing below, or even a new config.\n``process_index \u003d (self.process + self.cycle_count / 2) % self.processes``","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":290,"context_line":"            # at least one container"},{"line_number":291,"context_line":"            self.my_indexes \u003d [p for p in range(self.processes)]"},{"line_number":292,"context_line":"            return distribute_evenly(task_containers_cache,"},{"line_number":293,"context_line":"                                     self.processes)[process_index]"},{"line_number":294,"context_line":"        else:"},{"line_number":295,"context_line":"            # somewhat more tricky, each container will be processed by"},{"line_number":296,"context_line":"            # potentially multiple processes in parallel - there might be a"}],"source_content_type":"text/x-python","patch_set":6,"id":"b248d684_be10601c","line":293,"updated":"2024-04-23 20:46:44.000000000","message":"I\u0027m a little nervous about the uneven distribution of work when `len(task_containers_cache) ≅ self.processes` -- some cycles will be twice or half as long as most others. But I also don\u0027t have much of any better idea that would still hit our goal of minimizing container-server load.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":290,"context_line":"            # at least one container"},{"line_number":291,"context_line":"            self.my_indexes \u003d [p for p in range(self.processes)]"},{"line_number":292,"context_line":"            return distribute_evenly(task_containers_cache,"},{"line_number":293,"context_line":"                                     self.processes)[process_index]"},{"line_number":294,"context_line":"        else:"},{"line_number":295,"context_line":"            # somewhat more tricky, each container will be processed by"},{"line_number":296,"context_line":"            # potentially multiple processes in parallel - there might be a"}],"source_content_type":"text/x-python","patch_set":6,"id":"675a0ec2_f1a09abc","line":293,"in_reply_to":"b248d684_be10601c","updated":"2024-05-31 20:50:23.000000000","message":"yes, I think there\u0027s some inherent trade-off.  Node\u0027s can participate in DELETEs of ready tasks within task containers they don\u0027t list.\n\nFWIW I\u0027m sure this block (task_containers \u003e nodes, or more-rarely \u003d\u003d\u003d) will come up in a lot of small/medium clusters (\u003c100 nodes), but it\u0027s not the case I\u0027m trying to optimize for.  And I think it\u0027s \"worst\" when you have ~80 nodes and a 2 day back log; with 200 containers each node gets 2-3 containers, with the *majority* getting only 2, so those poor loosers have to do 33% more work!\n\nCurrently in our clusters we have 1700+ nodes all listing 1B objects in a queue just to find the few 100 million that are processable, the \"work\" is mostly \"listing queues\" and not \"doing deletes\"\n\nI think we can *dramatically* improve the ratio of \"busy\" work to \"actual\" work by cutting down the number of listings by two orders of magnitude.  Listing each container ~twice (instead of 1K+) and still doing the same number of DELETEs per-cycle.\n\nIn my thinking this reduction is *total* work is worth any penalty from a (shifting) subset of nodes doing the majority of DELETEs in any given cycle.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":300,"context_line":"            container_index_to_process_map \u003d distribute_evenly("},{"line_number":301,"context_line":"                range(self.processes), len(task_containers_cache))"},{"line_number":302,"context_line":"            for i, task_container in enumerate(task_containers_cache):"},{"line_number":303,"context_line":"                assigned_procs \u003d container_index_to_process_map[i]"},{"line_number":304,"context_line":"                if process_index in assigned_procs:"},{"line_number":305,"context_line":"                    j \u003d assigned_procs.index(process_index)"},{"line_number":306,"context_line":"                    self.my_indexes \u003d [h for h in range(self.processes)"}],"source_content_type":"text/x-python","patch_set":6,"id":"27a2d5c9_755643b3","line":303,"updated":"2024-04-23 20:46:44.000000000","message":"`zip` instead of `enumerate`?\n\nOr we do something smarter with `divmod` instead of a bunch of extra loops.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":300,"context_line":"            container_index_to_process_map \u003d distribute_evenly("},{"line_number":301,"context_line":"                range(self.processes), len(task_containers_cache))"},{"line_number":302,"context_line":"            for i, task_container in enumerate(task_containers_cache):"},{"line_number":303,"context_line":"                assigned_procs \u003d container_index_to_process_map[i]"},{"line_number":304,"context_line":"                if process_index in assigned_procs:"},{"line_number":305,"context_line":"                    j \u003d assigned_procs.index(process_index)"},{"line_number":306,"context_line":"                    self.my_indexes \u003d [h for h in range(self.processes)"}],"source_content_type":"text/x-python","patch_set":6,"id":"f52ace15_a8d050f9","line":303,"in_reply_to":"27a2d5c9_755643b3","updated":"2024-05-31 20:50:23.000000000","message":"If you can think of an equivilent spelling that\u0027s more clear diffs are welcome!  I\u0027m sure anything you do would be \"smarter\" than whatever working code *I\u0027ve* come up with.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":306,"context_line":"                    self.my_indexes \u003d [h for h in range(self.processes)"},{"line_number":307,"context_line":"                                       # each assigned_proc will take their"},{"line_number":308,"context_line":"                                       # share of the *whole* hash_mod space"},{"line_number":309,"context_line":"                                       if h % len(assigned_procs) \u003d\u003d j]"},{"line_number":310,"context_line":"                    return [task_container]"},{"line_number":311,"context_line":"            else:"},{"line_number":312,"context_line":"                assert False, \u0027process was assigned no containers?\u0027"}],"source_content_type":"text/x-python","patch_set":6,"id":"7c5ca988_5771f0dc","line":309,"updated":"2024-04-23 20:46:44.000000000","message":"This feels like a weird hack just to avoid plumbing `my_index\u003dj, divisor\u003dlen(assigned_procs)` into `iter_task_to_expire`...\n\nIf we *have* to keep this, I\u0027d at least like to see a special case for `len(assigned_procs) \u003d\u003d 1`","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":306,"context_line":"                    self.my_indexes \u003d [h for h in range(self.processes)"},{"line_number":307,"context_line":"                                       # each assigned_proc will take their"},{"line_number":308,"context_line":"                                       # share of the *whole* hash_mod space"},{"line_number":309,"context_line":"                                       if h % len(assigned_procs) \u003d\u003d j]"},{"line_number":310,"context_line":"                    return [task_container]"},{"line_number":311,"context_line":"            else:"},{"line_number":312,"context_line":"                assert False, \u0027process was assigned no containers?\u0027"}],"source_content_type":"text/x-python","patch_set":6,"id":"886ddb8d_11cbd6ed","line":309,"in_reply_to":"7c5ca988_5771f0dc","updated":"2024-05-31 20:50:23.000000000","message":"\u003e a weird hack just to avoid plumbing\n\ndo you mean using self to set state instead of changing the structure of values yeilded from of `iter_task_account_containers_to_expire`?  Yes, I\u0027m definately trying to use object state to avoid a more disruptive refactor; I don\u0027t think I entirely understand the *original* motiviation for lifting self.process \u0026 self.processes to arguments of `iter_task_to_expire` in the *first* place, they *were* mostly static.\n\nNow they\u0027re dynamic and dependent on what containers we find in this method.  I think self.report_last_time is doing essentially the same thing in run_once (to pass a value to the report method using object state), but maybe \"my_indexes\" is \"weirder\".  I\u0027ll try something different and see what tests think about it!\n\n\u003e I\u0027d at least like to see a special case for `len(assigned_procs) \u003d\u003d 1`\n\nI think that\u0027s a *pretty* special case?  When you have *exactly* as many proccesses as task_containers?","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":309,"context_line":"                                       if h % len(assigned_procs) \u003d\u003d j]"},{"line_number":310,"context_line":"                    return [task_container]"},{"line_number":311,"context_line":"            else:"},{"line_number":312,"context_line":"                assert False, \u0027process was assigned no containers?\u0027"},{"line_number":313,"context_line":""},{"line_number":314,"context_line":"    def order_selected_containers(self, task_containers_cache):"},{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("}],"source_content_type":"text/x-python","patch_set":6,"id":"4862917a_0d2767fc","line":312,"updated":"2024-04-23 20:46:44.000000000","message":"This should only come up if `process \u003e\u003d processes`, yeah? We check for that in `get_process_values` -- maybe we should also do it up in `__init__`? I don\u0027t really _like_ the current behavior of logging a traceback, sleeping for some fraction of an interval, then repeating...","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":309,"context_line":"                                       if h % len(assigned_procs) \u003d\u003d j]"},{"line_number":310,"context_line":"                    return [task_container]"},{"line_number":311,"context_line":"            else:"},{"line_number":312,"context_line":"                assert False, \u0027process was assigned no containers?\u0027"},{"line_number":313,"context_line":""},{"line_number":314,"context_line":"    def order_selected_containers(self, task_containers_cache):"},{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("}],"source_content_type":"text/x-python","patch_set":6,"id":"5e206684_77a26949","line":312,"in_reply_to":"4862917a_0d2767fc","updated":"2024-05-31 20:50:23.000000000","message":"I don\u0027t know why the cli/kwarg validation happens in run_once and not run_forever - gross.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"987de7223fe356c135aa80f16149ca757625e2b2","unresolved":true,"context_lines":[{"line_number":311,"context_line":"            else:"},{"line_number":312,"context_line":"                assert False, \u0027process was assigned no containers?\u0027"},{"line_number":313,"context_line":""},{"line_number":314,"context_line":"    def order_selected_containers(self, task_containers_cache):"},{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("},{"line_number":316,"context_line":"            task_containers_cache)"},{"line_number":317,"context_line":"        if self.randomized_task_container_iteration:"}],"source_content_type":"text/x-python","patch_set":6,"id":"364bb3ae_16cc43d7","line":314,"updated":"2024-05-29 05:26:24.000000000","message":"add docstring","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("},{"line_number":316,"context_line":"            task_containers_cache)"},{"line_number":317,"context_line":"        if self.randomized_task_container_iteration:"},{"line_number":318,"context_line":"            shuffle(selected_containers)"},{"line_number":319,"context_line":"        return selected_containers"},{"line_number":320,"context_line":""},{"line_number":321,"context_line":"    def iter_task_account_containers_to_expire(self, task_account):"}],"source_content_type":"text/x-python","patch_set":6,"id":"a793d805_dc64a2a8","line":318,"updated":"2024-04-23 20:46:44.000000000","message":"This is less of an issue now, yeah? It seems like now, we\u0027ll mostly either\n\n- have just one container we\u0027re (perhaps only partially) responsible for, so there\u0027s nothing to shuffle, or\n- have a list of containers which we should be exclusively responsible for\n\nAt most I\u0027d expect just one or two other processes out of 2k-ish to be trying to hit the same container set, and that\u0027s only after we\u0027d had a bunch of time to do multiple cycles.\n\nDo we have a sense for how many cycles we manage to get through between restarts today? Any expectations for what it\u0027ll be like with this change?","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"987de7223fe356c135aa80f16149ca757625e2b2","unresolved":true,"context_lines":[{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("},{"line_number":316,"context_line":"            task_containers_cache)"},{"line_number":317,"context_line":"        if self.randomized_task_container_iteration:"},{"line_number":318,"context_line":"            shuffle(selected_containers)"},{"line_number":319,"context_line":"        return selected_containers"},{"line_number":320,"context_line":""},{"line_number":321,"context_line":"    def iter_task_account_containers_to_expire(self, task_account):"}],"source_content_type":"text/x-python","patch_set":6,"id":"d8b65539_e5774445","line":318,"in_reply_to":"a793d805_dc64a2a8","updated":"2024-05-29 05:26:24.000000000","message":"I feel we should make a few strategies for SRE to choose from, such as ``legacy``, ``randomized`` and ``distributed``. \n\nfor the base case of many task containers crossing multiple days (let\u0027s say day 1, day 2, .. day 10), if user don\u0027t use delayed reaping, then it will make sense to users to expect that expirers will first clear all task containers in day 1, and then day 2, ... then day 10. then this user can just use ``legacy``. (with this example, I also think that\u0027ll be great if this new distributed strategy can take this into consideration in future as well.)","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":315,"context_line":"        selected_containers \u003d self.pick_my_containers_and_set_my_indexes("},{"line_number":316,"context_line":"            task_containers_cache)"},{"line_number":317,"context_line":"        if self.randomized_task_container_iteration:"},{"line_number":318,"context_line":"            shuffle(selected_containers)"},{"line_number":319,"context_line":"        return selected_containers"},{"line_number":320,"context_line":""},{"line_number":321,"context_line":"    def iter_task_account_containers_to_expire(self, task_account):"}],"source_content_type":"text/x-python","patch_set":6,"id":"01422d1a_a55f5efa","line":318,"in_reply_to":"a793d805_dc64a2a8","updated":"2024-05-31 20:50:23.000000000","message":"Yeah, randomization at this level only makes sense if you\u0027re running only a single expirer for the whole cluster; or maybe if you have 20 nodes and run with processes \u003d 10 for redundancy coverage.\n\n\u003e Do we have a sense for how many cycles we manage to get through between restarts today?\n\nour cycle is about 1.5 days currently; we\u0027ve been avoiding restarts and gotten through as many as 3-6 cycles between upgrades.\n\n\u003e Any expectations for what it\u0027ll be like with this change?\n\nI\u0027d like to get a \"cycle\" down to ~2hrs, at the edge when there\u0027s a bunch of actual DELETEs that come due I\u0027d still be happy even if we balloon up to ~12hr.","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"58ee032ab191c345d1669e022f0ec4390fe5e8f2","unresolved":true,"context_lines":[{"line_number":475,"context_line":"            self.logger.exception(\u0027Unhandled exception\u0027)"},{"line_number":476,"context_line":"        finally:"},{"line_number":477,"context_line":"            self.cycle_count +\u003d 1"},{"line_number":478,"context_line":"            # exceptions raised here are caught in run_forever"},{"line_number":479,"context_line":"            self.report(final\u003dTrue)"},{"line_number":480,"context_line":""},{"line_number":481,"context_line":"    def run_forever(self, *args, **kwargs):"}],"source_content_type":"text/x-python","patch_set":6,"id":"4f6f212b_341e9ba7","line":478,"updated":"2024-04-23 20:46:44.000000000","message":"Or they bubble out of `run` when just running once.\n\nMaybe `report` should have some error handling?","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"f1c2afa91590074a40323d3e83e6e9a5866f72ee","unresolved":true,"context_lines":[{"line_number":475,"context_line":"            self.logger.exception(\u0027Unhandled exception\u0027)"},{"line_number":476,"context_line":"        finally:"},{"line_number":477,"context_line":"            self.cycle_count +\u003d 1"},{"line_number":478,"context_line":"            # exceptions raised here are caught in run_forever"},{"line_number":479,"context_line":"            self.report(final\u003dTrue)"},{"line_number":480,"context_line":""},{"line_number":481,"context_line":"    def run_forever(self, *args, **kwargs):"}],"source_content_type":"text/x-python","patch_set":6,"id":"a2cc5058_8b985069","line":478,"in_reply_to":"4f6f212b_341e9ba7","updated":"2024-05-31 20:50:23.000000000","message":"this comment was really about moving the call site out of the try block (end of the loop) and into finally (so that I could add the guard return and still maintain existing behavior of a final report on no work todo).\n\nI don\u0027t actually think report ever raises un-handled exceptions; but if it did it wouldn\u0027t stop the proccess (unless, as you say, in run_once - where we\u0027re already done!)\n\nI\u0027m not going to scope creep \"imporove error handling in report\" work into this change: I could either choose different words for this comment or just call report twice and leave it w/i the try block.  Any preference?","commit_id":"8d8c539250237530a53ad5794a7f9273290bb69e"}]}
