)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"8c5e335c02dce6077e7894717a91bc6159724385","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"06e77acb_d9738f2d","updated":"2022-12-12 13:59:41.000000000","message":"this fails on this branch https://review.opendev.org/c/openstack/swift/+/867225\nbut needs tidying up","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"8cecbfcb7d8f4bac8d34ab29fbf3fc8413777635","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"fa505821_4be839a6","updated":"2022-12-14 06:37:57.000000000","message":"Thanks for the review and help. Now it passes on the master branch, and fails without the \"sharder: merge shard shard_ranges from root while sharding\" commit.","commit_id":"357749bb1341a041a8afb52c491f893405891cfb"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"a02b512b0bd88a95cfb84386db2102d6e7244bed","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"34138897_07baff28","updated":"2022-12-15 09:50:40.000000000","message":"@JianJian 2 of the probe test failures are unrelated but the new test failed and it looks genuine (probably an intermittent broken assumption in the test), so we should investigate more before merging:\n\n  popenargs \u003d ([\u0027swift-manage-shard-ranges\u0027, \u0027/srv/2/node/sdb2/containers/228/c5a/3925d78eceb8a19d66c929e0003b3c5a/3925d78eceb8a19d66c929e0003b3c5a.db\u0027, \u0027find_and_replace\u0027, \u00273\u0027, \u0027--enable\u0027],)\nkwargs \u003d {\u0027stderr\u0027: -2, \u0027stdout\u0027: -1}\nprocess \u003d \u003csubprocess.Popen object at 0x7fb621d8c630\u003e\nstdout \u003d b\u0027Loaded db broker for .shards_AUTH_test/container-54a88e45-f049-4fa5-a90e-e4eb4d0baf63-0aff1b2583406363e603cbd383dbdb...or to replicate them to other nodes.\\nWARNING: container in state cleaved (should be active or sharding).\\nAborting.\\n\u0027\nstderr \u003d None, retcode \u003d 1","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"850e570dbce3b38bd6b0da3d3311933b8fefe21b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"6e3212b9_0f637365","updated":"2022-12-14 11:35:15.000000000","message":"Great. Test fails with this regression:\n\n  diff --git a/swift/container/sharder.py b/swift/container/sharder.py\n  index 14de69d0d..951c87219 100644\n  --- a/swift/container/sharder.py\n  +++ b/swift/container/sharder.py\n  @@ -1418,7 +1418,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):\n                   self.logger.debug(\n                       \u0027Updating %s other shard range(s) from root\u0027,\n                       len(filtered_other_shard_ranges))\n  -                broker.merge_shard_ranges(filtered_other_shard_ranges)\n  +                #broker.merge_shard_ranges(filtered_other_shard_ranges)\n \n           return own_shard_range, own_shard_range_from_root\n\n\n\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\nFAIL: test_manage_shard_ranges_deleted_child_and_parent_gap (test.probe.test_sharder.TestManagedContainerSharding)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n  File \"/vagrant/swift/test/probe/test_sharder.py\", line 3952, in test_manage_shard_ranges_deleted_child_and_parent_gap\n    self.assert_container_state(\n  File \"/vagrant/swift/test/probe/test_sharder.py\", line 381, in assert_container_state\n    self.assertEqual(num_shard_ranges, len(shard_ranges))\nAssertionError: 3 !\u003d 1\n    \u00273 !\u003d 1\u0027 \u003d \u0027%s !\u003d %s\u0027 % _common_shorten_repr(3, 1)\n    \u00273 !\u003d 1\u0027 \u003d self._formatMessage(\u00273 !\u003d 1\u0027, \u00273 !\u003d 1\u0027)\n\u003e\u003e  raise self.failureException(\u00273 !\u003d 1\u0027)\n    \n\n----------------------------------------------------------------------\nRan 1 test in 58.585s\n\nFAILED (failures\u003d1)\n","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"cb5eaa2dda7d1023dde38de45c87fd830418d464","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"2d90569e_287414b7","updated":"2022-12-14 17:20:09.000000000","message":"recheck\n\nSome weird failures in xprofile -- not seen that before.","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"6c4bd5d420b27bd18738a0a214c1fa25be5e9e94","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"34e42454_53c44ab1","updated":"2022-12-14 21:25:24.000000000","message":"recheck\n\nunrelated probe test failures","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"5b1b359d5f90fdf58d89058bcf27900a7be0796d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"526f2f03_c06ffb8b","updated":"2022-12-20 18:17:56.000000000","message":"I also checked this fails when the sharder regresses to no longer pull \u0027other\u0027 shard ranges from root. I also cycled in 10\u0027s of times to check there is no intermittent failure.\n\nIt\u0027s a shame that the rename trick does not quite reproduce the real life scenario we imagine: the sharder will presumably be finding a \u0027fresh\u0027 DB on the \u0027bad\u0027 replica, that has been sync\u0027d from the good replicas. But I guess that before the latest patchset, the other replicas were still able to sync to the \u0027bad\u0027 replica\u0027s retiring DB. So that was perfect either.\n\nThe best I can think of is to rename the entire bad replica directory so that later in the test, when that dir is reinstated, any replicated fresh DB is lost and the bad replica is returned to exactly the state it was in when it was taken \u0027offline\u0027. But IDK if that is *necessary* :shrug:?","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"9c4925b973be4e81330e9b6e8310a0e4ca23cd0e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"3458fe76_03b893da","updated":"2022-12-21 06:07:41.000000000","message":"Thanks. The latest update also passes 100 runs in my local vsaio.","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"a561300018303371436b47aa73af4fe0221249ef","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"3448b7c0_cadd39ed","updated":"2022-12-20 10:02:40.000000000","message":"recheck\n\nunrelated probe test failure:\ntest/probe/test_container_merge_policy_index.py::TestContainerMergePolicyIndex::test_reconciler_move_object_twice FAILED [100%]","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"4a3587725352aa850ab0af789dad3da3bfee6692","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"aa576fbf_e7de3c75","updated":"2022-12-20 07:43:14.000000000","message":"test passes with 100 local runs, and still fails without the \"sharder: merge shard shard_ranges from root while sharding\" commit.\n","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"93e39c4efba11dcaa4edb420e4503b3e15c59376","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"57f23e69_a0501174","updated":"2023-01-05 02:49:16.000000000","message":"Approving based on Clay\u0027s prior approval.","commit_id":"ec95047339bbf0edf8227a882c14cd8247bc2170"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"2b3196e5605e1a00a8e6e8b606992f786c527dfd","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":7,"id":"ff5b7a82_f6ccb0a4","updated":"2023-01-05 00:23:11.000000000","message":"recheck\n\nGate should be happy now that https://review.opendev.org/c/openstack/swift/+/869135 landed.","commit_id":"ec95047339bbf0edf8227a882c14cd8247bc2170"}],"test/probe/test_sharder.py":[{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"8c5e335c02dce6077e7894717a91bc6159724385","unresolved":true,"context_lines":[{"line_number":3888,"context_line":"                node, \u0027unsharded\u0027, 2, account\u003dshard_ranges[0].account,"},{"line_number":3889,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3890,"context_line":"        # stop the child shard nodes 0 container."},{"line_number":3891,"context_line":"        container_node_to_stop \u003d shard_nodes[0]"},{"line_number":3892,"context_line":"        container_to_stop \u003d shard_ranges[0].container"},{"line_number":3893,"context_line":"        self.stop_container_servers_by_ids("},{"line_number":3894,"context_line":"            [container_node_to_stop.get(\"id\") + 1])"}],"source_content_type":"text/x-python","patch_set":2,"id":"894c700c_017cb447","line":3891,"updated":"2022-12-12 13:59:41.000000000","message":"this stops the container server but does not prevent the sharder running on that node","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"8cecbfcb7d8f4bac8d34ab29fbf3fc8413777635","unresolved":false,"context_lines":[{"line_number":3888,"context_line":"                node, \u0027unsharded\u0027, 2, account\u003dshard_ranges[0].account,"},{"line_number":3889,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3890,"context_line":"        # stop the child shard nodes 0 container."},{"line_number":3891,"context_line":"        container_node_to_stop \u003d shard_nodes[0]"},{"line_number":3892,"context_line":"        container_to_stop \u003d shard_ranges[0].container"},{"line_number":3893,"context_line":"        self.stop_container_servers_by_ids("},{"line_number":3894,"context_line":"            [container_node_to_stop.get(\"id\") + 1])"}],"source_content_type":"text/x-python","patch_set":2,"id":"6b0d3574_69669cce","line":3891,"in_reply_to":"894c700c_017cb447","updated":"2022-12-14 06:37:57.000000000","message":"Got it, this is issue why sharding actually finished. Thanks!","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"8c5e335c02dce6077e7894717a91bc6159724385","unresolved":true,"context_lines":[{"line_number":3896,"context_line":"                           child_shard_part)"},{"line_number":3897,"context_line":"        # get shards to update state from parent..."},{"line_number":3898,"context_line":"        self.sharders_once()"},{"line_number":3899,"context_line":"        for node in shard_nodes[1::]:"},{"line_number":3900,"context_line":"            self.assert_container_state("},{"line_number":3901,"context_line":"                node, \u0027sharded\u0027, 2, account\u003dshard_ranges[0].account,"},{"line_number":3902,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"}],"source_content_type":"text/x-python","patch_set":2,"id":"618d586f_7e0e8edc","line":3899,"updated":"2022-12-12 13:59:41.000000000","message":"I suspect that all 3 replicas have actually sharded at this point","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"8cecbfcb7d8f4bac8d34ab29fbf3fc8413777635","unresolved":false,"context_lines":[{"line_number":3896,"context_line":"                           child_shard_part)"},{"line_number":3897,"context_line":"        # get shards to update state from parent..."},{"line_number":3898,"context_line":"        self.sharders_once()"},{"line_number":3899,"context_line":"        for node in shard_nodes[1::]:"},{"line_number":3900,"context_line":"            self.assert_container_state("},{"line_number":3901,"context_line":"                node, \u0027sharded\u0027, 2, account\u003dshard_ranges[0].account,"},{"line_number":3902,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"}],"source_content_type":"text/x-python","patch_set":2,"id":"1e3a0320_6334902a","line":3899,"in_reply_to":"618d586f_7e0e8edc","updated":"2022-12-14 06:37:57.000000000","message":"Ack","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"8c5e335c02dce6077e7894717a91bc6159724385","unresolved":true,"context_lines":[{"line_number":3902,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3903,"context_line":""},{"line_number":3904,"context_line":"        # shard first grand-child shard into 2 grand-grand-child-shards."},{"line_number":3905,"context_line":"        shard_ranges \u003d self.get_container_shard_ranges()"},{"line_number":3906,"context_line":"        shard_brokers \u003d [self.get_shard_broker(shard_ranges[0], node_index\u003di)"},{"line_number":3907,"context_line":"                         for i in range(3)]"},{"line_number":3908,"context_line":"        self.assert_subprocess_success(["}],"source_content_type":"text/x-python","patch_set":2,"id":"c58743e5_96bda3c2","line":3905,"updated":"2022-12-12 13:59:41.000000000","message":"this gets the root shard ranges - maybe that does now have the grandchild but it might be better to explicitly get the shard ranges from the child shard","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"8cecbfcb7d8f4bac8d34ab29fbf3fc8413777635","unresolved":false,"context_lines":[{"line_number":3902,"context_line":"                container\u003dshard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3903,"context_line":""},{"line_number":3904,"context_line":"        # shard first grand-child shard into 2 grand-grand-child-shards."},{"line_number":3905,"context_line":"        shard_ranges \u003d self.get_container_shard_ranges()"},{"line_number":3906,"context_line":"        shard_brokers \u003d [self.get_shard_broker(shard_ranges[0], node_index\u003di)"},{"line_number":3907,"context_line":"                         for i in range(3)]"},{"line_number":3908,"context_line":"        self.assert_subprocess_success(["}],"source_content_type":"text/x-python","patch_set":2,"id":"680c95dd_d8933370","line":3905,"in_reply_to":"c58743e5_96bda3c2","updated":"2022-12-14 06:37:57.000000000","message":"Ack","commit_id":"38a90371ac746d329753026207a74991863793e4"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"5b1b359d5f90fdf58d89058bcf27900a7be0796d","unresolved":true,"context_lines":[{"line_number":3893,"context_line":"        self.assert_container_state("},{"line_number":3894,"context_line":"            c_shard_nodes[2],"},{"line_number":3895,"context_line":"            \u0027unsharded\u0027, 2, account\u003dc_shard_ranges[0].account,"},{"line_number":3896,"context_line":"            container\u003dc_shard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3897,"context_line":"        # get updates done..."},{"line_number":3898,"context_line":"        self.sharders_once(additional_args\u003d[\u0027--devices\u003d%s\u0027 % devs_arg])"},{"line_number":3899,"context_line":""}],"source_content_type":"text/x-python","patch_set":4,"id":"3121d454_42a43bb9","line":3896,"updated":"2022-12-20 18:17:56.000000000","message":"ok, I guess there is no DB so this would blow up now :)","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"9c4925b973be4e81330e9b6e8310a0e4ca23cd0e","unresolved":false,"context_lines":[{"line_number":3893,"context_line":"        self.assert_container_state("},{"line_number":3894,"context_line":"            c_shard_nodes[2],"},{"line_number":3895,"context_line":"            \u0027unsharded\u0027, 2, account\u003dc_shard_ranges[0].account,"},{"line_number":3896,"context_line":"            container\u003dc_shard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3897,"context_line":"        # get updates done..."},{"line_number":3898,"context_line":"        self.sharders_once(additional_args\u003d[\u0027--devices\u003d%s\u0027 % devs_arg])"},{"line_number":3899,"context_line":""}],"source_content_type":"text/x-python","patch_set":4,"id":"45cdd189_dc09a273","line":3896,"in_reply_to":"3121d454_42a43bb9","updated":"2022-12-21 06:07:41.000000000","message":"Ack","commit_id":"a941e6b5a4191a3e7a4a19ee5c7e5cee2107df97"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"5b1b359d5f90fdf58d89058bcf27900a7be0796d","unresolved":true,"context_lines":[{"line_number":3883,"context_line":"        # replica\u0027s DB file."},{"line_number":3884,"context_line":"        tmp_db_file \u003d c_shard_brokers[2].db_file + \".tmp\""},{"line_number":3885,"context_line":"        os.rename(c_shard_brokers[2].db_file, tmp_db_file)"},{"line_number":3886,"context_line":"        self.sharders_once()"},{"line_number":3887,"context_line":"        for node in c_shard_nodes[:2]:"},{"line_number":3888,"context_line":"            self.assert_container_state("},{"line_number":3889,"context_line":"                node, \u0027sharded\u0027, 2, account\u003dc_shard_ranges[0].account,"}],"source_content_type":"text/x-python","patch_set":5,"id":"b6a7a13d_be39ad61","line":3886,"updated":"2022-12-20 18:17:56.000000000","message":"is it possible to keep the \u0027--partitions\u003d%s\u0027 % child_shard_part\u0027 here? we only want to run sharder on the child at this point?\n\n--partitions just makes the shard cycle and therefore the test a little quicker","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"9c4925b973be4e81330e9b6e8310a0e4ca23cd0e","unresolved":false,"context_lines":[{"line_number":3883,"context_line":"        # replica\u0027s DB file."},{"line_number":3884,"context_line":"        tmp_db_file \u003d c_shard_brokers[2].db_file + \".tmp\""},{"line_number":3885,"context_line":"        os.rename(c_shard_brokers[2].db_file, tmp_db_file)"},{"line_number":3886,"context_line":"        self.sharders_once()"},{"line_number":3887,"context_line":"        for node in c_shard_nodes[:2]:"},{"line_number":3888,"context_line":"            self.assert_container_state("},{"line_number":3889,"context_line":"                node, \u0027sharded\u0027, 2, account\u003dc_shard_ranges[0].account,"}],"source_content_type":"text/x-python","patch_set":5,"id":"7f103bc1_feaf05c5","line":3886,"in_reply_to":"b6a7a13d_be39ad61","updated":"2022-12-21 06:07:41.000000000","message":"Done","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"5b1b359d5f90fdf58d89058bcf27900a7be0796d","unresolved":true,"context_lines":[{"line_number":3929,"context_line":"        # the sharder since, so still has two grand-child shards."},{"line_number":3930,"context_line":"        self.assert_container_state("},{"line_number":3931,"context_line":"            c_shard_nodes[2],"},{"line_number":3932,"context_line":"            \u0027sharding\u0027, 2, account\u003dc_shard_ranges[0].account,"},{"line_number":3933,"context_line":"            container\u003dc_shard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3934,"context_line":""},{"line_number":3935,"context_line":"        # now, finally, run the sharder on the child that is still waiting to"}],"source_content_type":"text/x-python","patch_set":5,"id":"67e0cae4_fa87ead7","line":3932,"range":{"start_line":3932,"start_character":13,"end_line":3932,"end_character":21},"updated":"2022-12-20 18:17:56.000000000","message":"curious that this has changed: I guess that previously the other replicas sync\u0027d to the retiring DB, so at this point there was still only one DB file in the unvisited directory - hence it reporting as \u0027unsharded\u0027. Now, with the retiring DB renamed, the other replicas will create a  \u0027fresh\u0027 DB with timestamp during replication, then we restore the retiring DB -\u003e there are 2 DB files so this appears to be DB-\u0027sharding\u0027 state.\n\nMight be worth a comment to help future us understand how this DB got to \u0027sharding\u0027 state.","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"},{"author":{"_account_id":34930,"name":"Jianjian Huo","email":"jhuo@nvidia.com","username":"jhuo"},"change_message_id":"9c4925b973be4e81330e9b6e8310a0e4ca23cd0e","unresolved":false,"context_lines":[{"line_number":3929,"context_line":"        # the sharder since, so still has two grand-child shards."},{"line_number":3930,"context_line":"        self.assert_container_state("},{"line_number":3931,"context_line":"            c_shard_nodes[2],"},{"line_number":3932,"context_line":"            \u0027sharding\u0027, 2, account\u003dc_shard_ranges[0].account,"},{"line_number":3933,"context_line":"            container\u003dc_shard_ranges[0].container, part\u003dchild_shard_part)"},{"line_number":3934,"context_line":""},{"line_number":3935,"context_line":"        # now, finally, run the sharder on the child that is still waiting to"}],"source_content_type":"text/x-python","patch_set":5,"id":"91f60e9c_c585929b","line":3932,"range":{"start_line":3932,"start_character":13,"end_line":3932,"end_character":21},"in_reply_to":"67e0cae4_fa87ead7","updated":"2022-12-21 06:07:41.000000000","message":"I will switch to DIR renaming instead, which will get us closer to the real world case.","commit_id":"2a2d4f7c6d5727db7a8423d190967ae519817b1c"}]}
