)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"759a18231fa2e99c1ab5d7c7f056001d41144ff1","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"c600cafe_45f2084b","updated":"2022-03-18 20:49:12.000000000","message":"Surprisingly I don\u0027t think I broke any existing unit test which suggests a test should be added! Zuul may prove me wrong.","commit_id":"2e7984e55be97bcc657d63f837319db5919737a0"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"6bf2d1f8e2171776e1b8ba1c45af923983bfc64b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"fe1b775a_126bf938","updated":"2022-03-23 04:18:39.000000000","message":"Nice find, and also love the hole in listing test! Now we just need to fix the CLEAVED too soon and fix this hole and early ACTIVE bug.\n\nGreat work. Just a +1 as I want to double check it in my SAIO first.","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"d194ec90987bfa4cf3953a861b571d91eb012ab1","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":3,"id":"2216eace_378a91f9","updated":"2022-03-28 06:19:26.000000000","message":"So I played with this in my VSAIO. I loaded up a container. Deleted one replica. PUT a new object and put the container again, so I know had a small replica.\n\nMarked the container for sharding and started. When looking the small one:\n\n  $ swift-manage-shard-ranges --config /etc/swift/container-server/1.conf.d  \n/srv/node3/sdb7/containers/774/69e/c1bedc75747760602df84cf9eae9369e/c1bedc75747760602df84cf9eae9369e.db info\n  Loaded db broker for AUTH_test/shardme\n  Sharding enabled \u003d True\n  Own shard range: {\n    \"bytes_used\": 0, \n    \"deleted\": 0, \n    \"epoch\": \"1648447702.86763\", \n    \"lower\": \"\", \n    \"meta_timestamp\": \"1648447882.17838\", \n    \"name\": \"AUTH_test/shardme\", \n    \"object_count\": 0, \n    \"reported\": 0, \n    \"state\": \"sharding\", \n    \"state_timestamp\": \"1648447702.86763\", \n    \"timestamp\": \"1648447702.86699\", \n    \"tombstones\": -1, \n    \"upper\": \"\"\n  }\n  db_state \u003d sharding\n  Retiring db id: 98f7befc-8d7f-4996-929c-fb827d9a85b8\n  Cleaving context: {\n    \"cleave_to_row\": -1, \n    \"cleaving_done\": false, \n    \"cursor\": \"h4\", \n    \"last_cleave_to_row\": null, \n    \"max_row\": -1, \n    \"misplaced_done\": true, \n    \"ranges_done\": 3,                        \u003c-- We have 3 ranges done.\n    \"ranges_todo\": 8, \n    \"ref\": \"98f7befc-8d7f-4996-929c-fb827d9a85b8\"\n  }\n  ...\n\nBut when we look at it\u0027s shards the first 4 are marked as CLEAVED.\n\nExisting shard ranges:\n[\n  {\n    \"bytes_used\": 50, \n    \"deleted\": 0, \n    \"epoch\": null, \n    \"lower\": \"\", \n    \"meta_timestamp\": \"1648447795.49549\", \n    \"name\": \".shards_AUTH_test/shardme-7820d6e9550a1661e01e0538aea8cc1b-1648447697.99321-0\", \n    \"object_count\": 25, \n    \"reported\": 0, \n    \"state\": \"cleaved\", \n    \"state_timestamp\": \"1648447697.99321\", \n    \"timestamp\": \"1648447697.99321\", \n    \"tombstones\": 0, \n    \"upper\": \"c4\"\n  }, \n  {\n    \"bytes_used\": 50, \n    \"deleted\": 0, \n    \"epoch\": null, \n    \"lower\": \"c4\", \n    \"meta_timestamp\": \"1648447796.11607\", \n    \"name\": \".shards_AUTH_test/shardme-7820d6e9550a1661e01e0538aea8cc1b-1648447697.99321-1\", \n    \"object_count\": 25, \n    \"reported\": 0, \n    \"state\": \"cleaved\", \n    \"state_timestamp\": \"1648447697.99321\", \n    \"timestamp\": \"1648447697.99321\", \n    \"tombstones\": 0, \n    \"upper\": \"e9\"\n  }, \n  {\n    \"bytes_used\": 50, \n    \"deleted\": 0, \n    \"epoch\": null, \n    \"lower\": \"e9\", \n    \"meta_timestamp\": \"1648447796.00897\", \n    \"name\": \".shards_AUTH_test/shardme-7820d6e9550a1661e01e0538aea8cc1b-1648447697.99321-2\", \n    \"object_count\": 25, \n    \"reported\": 0, \n    \"state\": \"cleaved\", \n    \"state_timestamp\": \"1648447697.99321\", \n    \"timestamp\": \"1648447697.99321\", \n    \"tombstones\": 0, \n    \"upper\": \"h4\"\n  }, \n  {\n    \"bytes_used\": 50, \n    \"deleted\": 0, \n    \"epoch\": null, \n    \"lower\": \"h4\", \n    \"meta_timestamp\": \"1648447796.65990\", \n    \"name\": \".shards_AUTH_test/shardme-7820d6e9550a1661e01e0538aea8cc1b-1648447697.99321-3\", \n    \"object_count\": 25, \n    \"reported\": 0, \n    \"state\": \"cleaved\", \n    \"state_timestamp\": \"1648447697.99321\", \n    \"timestamp\": \"1648447697.99321\", \n    \"tombstones\": 0, \n    \"upper\": \"j9\"\n  }, \n  {\n    \"bytes_used\": 0, \n    \"deleted\": 0, \n    \"epoch\": null, \n    \"lower\": \"j9\", \n    \"meta_timestamp\": \"1648447795.92406\", \n    \"name\": \".shards_AUTH_test/shardme-7820d6e9550a1661e01e0538aea8cc1b-1648447697.99321-4\", \n    \"object_count\": 0, \n    \"reported\": 0, \n    \"state\": \"created\", \n    \"state_timestamp\": \"1648447697.99321\", \n    \"timestamp\": \"1648447697.99321\", \n    \"tombstones\": 0, \n    \"upper\": \"m4\"\n  }, \n  ...\n\nIt would be nice that ranges_done \u003d\u003d shards CLEAVED in this state. I\u0027ll have a more of a play and see how this happened. (of course it\u0027s happens just as I\u0027m about to head to dinner) :P \n","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"5998bd31983b60ceed50d75737239d526a4ef6b7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"4173cef3_cab3ba1b","updated":"2022-03-28 05:44:55.000000000","message":"recheck","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"8c1c595fc3b5ccff77613ffa698297e0bfc16cdf","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":3,"id":"2b75cbdf_aa28dbf6","in_reply_to":"2216eace_378a91f9","updated":"2022-03-28 06:26:11.000000000","message":"ran a second time and it was correct.. hmm.","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"cccf95e22bcf238bcd3744db9054cdf45a4a9a5e","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":3,"id":"9150c980_c2af8c26","in_reply_to":"2b75cbdf_aa28dbf6","updated":"2022-07-13 14:17:20.000000000","message":"is it possible that you ran the sharder on all replicas? the ranges_done in each replica\u0027s cleaving context will not necessarily match the number of cleaved ranges, since the latter is global (ie. another replica may be further ahead)","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"b7bf07a0eee6a07d72f163f10e9a639862cb43bb","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"56033155_9e7ec8cc","updated":"2022-07-18 03:55:24.000000000","message":"recheck","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"fe7a232316b621a4d4c0dc1e7380f928dff3e23e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"e8723695_a4b7df0b","updated":"2022-07-14 05:09:29.000000000","message":"recheck","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"}],"swift/container/sharder.py":[{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"812771080d8ef811319e074135bd865f6d275e31","unresolved":true,"context_lines":[{"line_number":1720,"context_line":"                # multiple acceptors because in those cases the donor namespace"},{"line_number":1721,"context_line":"                # should not be deleted until *all* shards are cleaved."},{"line_number":1722,"context_line":"                if own_shard_range.update_state(ShardRange.SHRUNK):"},{"line_number":1723,"context_line":"                    own_shard_range.set_deleted()"},{"line_number":1724,"context_line":"                    broker.merge_shard_ranges(own_shard_range)"},{"line_number":1725,"context_line":"                shard_broker.merge_shard_ranges(own_shard_range)"},{"line_number":1726,"context_line":"        elif shard_range.state \u003d\u003d ShardRange.CREATED:"}],"source_content_type":"text/x-python","patch_set":4,"id":"53f4aaae_90a7af29","line":1723,"updated":"2022-07-18 22:40:33.000000000","message":"Oh -- there\u0027s probably also some bad/weird behavior we\u0027re fixing when shrinking away empty shards, too, huh....\n\nLike, previously, you could short-circuit cleaving when shrinking, causing the shrunk shard to not get marked deleted for a bit (though I guess there\u0027s a fallback down around L1931).","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"812771080d8ef811319e074135bd865f6d275e31","unresolved":true,"context_lines":[{"line_number":1733,"context_line":"                info[\u0027object_count\u0027], info[\u0027bytes_used\u0027])"},{"line_number":1734,"context_line":"            # Update state to CLEAVED; only do this when sharding, not when"},{"line_number":1735,"context_line":"            # shrinking"},{"line_number":1736,"context_line":"            shard_range.update_state(ShardRange.CLEAVED)"},{"line_number":1737,"context_line":"            shard_broker.merge_shard_ranges(shard_range)"},{"line_number":1738,"context_line":"            replication_quorum \u003d self.shard_replication_quorum"},{"line_number":1739,"context_line":""}],"source_content_type":"text/x-python","patch_set":4,"id":"1c28c6d9_0350e6a9","line":1736,"updated":"2022-07-18 22:40:33.000000000","message":"So this is the bit we\u0027re trying to hit by getting rid of the early returns and carrying the result around instead.","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"812771080d8ef811319e074135bd865f6d275e31","unresolved":true,"context_lines":[{"line_number":1924,"context_line":"            else:"},{"line_number":1925,"context_line":"                own_shard_range.update_state(ShardRange.SHARDED)"},{"line_number":1926,"context_line":"                modified_shard_ranges \u003d broker.get_shard_ranges("},{"line_number":1927,"context_line":"                    states\u003dShardRange.CLEAVED)"},{"line_number":1928,"context_line":"                for sr in modified_shard_ranges:"},{"line_number":1929,"context_line":"                    sr.update_state(ShardRange.ACTIVE)"},{"line_number":1930,"context_line":"            if (not broker.is_root_container() and not"}],"source_content_type":"text/x-python","patch_set":4,"id":"dfdcdb94_9fc65f8c","line":1927,"updated":"2022-07-18 22:40:33.000000000","message":"I can\u0027t help but wonder if we should just change this to update both CREATED and CLEAVED.","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"}],"test/probe/test_sharder.py":[{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"6bf2d1f8e2171776e1b8ba1c45af923983bfc64b","unresolved":true,"context_lines":[{"line_number":1390,"context_line":"        # complete again"},{"line_number":1391,"context_line":"        self.sharders.once(number\u003dself.brain.node_numbers[1],"},{"line_number":1392,"context_line":"                           additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":1393,"context_line":"        self.assert_container_listing(obj_names, req_hdrs\u003d{\u0027x-newest\u0027: \u0027true\u0027})"},{"line_number":1394,"context_line":""},{"line_number":1395,"context_line":"    def test_async_pendings(self):"},{"line_number":1396,"context_line":"        obj_names \u003d self._make_object_names(self.max_shard_size * 2)"}],"source_content_type":"text/x-python","patch_set":3,"id":"bf335c07_c1a30363","line":1393,"updated":"2022-03-23 04:18:39.000000000","message":"Nice showing the hole in the listings issue during cleaving! Nice test!","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"fe7a232316b621a4d4c0dc1e7380f928dff3e23e","unresolved":true,"context_lines":[{"line_number":1390,"context_line":"        # complete again"},{"line_number":1391,"context_line":"        self.sharders.once(number\u003dself.brain.node_numbers[1],"},{"line_number":1392,"context_line":"                           additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":1393,"context_line":"        self.assert_container_listing(obj_names, req_hdrs\u003d{\u0027x-newest\u0027: \u0027true\u0027})"},{"line_number":1394,"context_line":""},{"line_number":1395,"context_line":"    def test_async_pendings(self):"},{"line_number":1396,"context_line":"        obj_names \u003d self._make_object_names(self.max_shard_size * 2)"}],"source_content_type":"text/x-python","patch_set":3,"id":"69c721d0_23be8888","line":1393,"in_reply_to":"bf335c07_c1a30363","updated":"2022-07-14 05:09:29.000000000","message":"Why is this comment up here and not below.. hmm. oh well still cool test showing the cleaving gap.","commit_id":"628e406c27602318dfb97bfbdeaba9748b95f9e2"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"812771080d8ef811319e074135bd865f6d275e31","unresolved":true,"context_lines":[{"line_number":1387,"context_line":"        self.assertLengthEqual(shard_ranges, 4)"},{"line_number":1388,"context_line":"        self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE,"},{"line_number":1389,"context_line":"                          ShardRange.ACTIVE, ShardRange.ACTIVE],"},{"line_number":1390,"context_line":"                         [sr[\u0027state\u0027] for sr in shard_ranges])"},{"line_number":1391,"context_line":"        self.assertEqual("},{"line_number":1392,"context_line":"            {True, False},"},{"line_number":1393,"context_line":"            set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]))"}],"source_content_type":"text/x-python","patch_set":4,"id":"61fc3703_a55ed272","line":1390,"updated":"2022-07-18 22:40:33.000000000","message":"Right; test pops here when I back out the fix.\n\n(Though I\u0027ll note that we *don\u0027t* trip an audit failure -- that smells like  a bug.)","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"812771080d8ef811319e074135bd865f6d275e31","unresolved":true,"context_lines":[{"line_number":1405,"context_line":"        # complete again"},{"line_number":1406,"context_line":"        self.sharders.once(number\u003dself.brain.node_numbers[1],"},{"line_number":1407,"context_line":"                           additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":1408,"context_line":"        self.assert_container_listing(obj_names, req_hdrs\u003d{\u0027x-newest\u0027: \u0027true\u0027})"},{"line_number":1409,"context_line":""},{"line_number":1410,"context_line":"    def test_async_pendings(self):"},{"line_number":1411,"context_line":"        obj_names \u003d self._make_object_names(self.max_shard_size * 2)"}],"source_content_type":"text/x-python","patch_set":4,"id":"71a214a2_8717aa7a","line":1408,"updated":"2022-07-18 22:40:33.000000000","message":"...and if we didn\u0027t fail above, we\u0027d bomb out here because we\u0027re missing the objects from the CREATED shard. At least, sometimes? Seems hit or miss -- sometimes the test just *passes*.","commit_id":"57f7145f7379de1f736ff2d904e85918c8166536"}]}
