)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"8533c51b79712010fb719e9cadbb685a07be9c18","unresolved":false,"context_lines":[{"line_number":25,"context_line":"push to us."},{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Also, only expand the acceptor\u0027s shard range if the donor transitioned"},{"line_number":28,"context_line":"to sharding -- if it was already there, leave it alone."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Between the two, we can now recover from a sharding split-brain. A root"},{"line_number":31,"context_line":"DB can be fixed-up offline (currently, likely via some manual, in-a-REPL"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":5,"id":"bf51134e_4d1f0fb7","line":28,"updated":"2020-07-17 19:53:49.000000000","message":"i\u0027ve been playing with shrinking and overlapping shard ranges a few days now and do not understand the motivation or justification of this change","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"a11f18a536c21439ba27d3e5350fedeaf3a7eda6","unresolved":false,"context_lines":[{"line_number":25,"context_line":"push to us."},{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Also, only expand the acceptor\u0027s shard range if the donor transitioned"},{"line_number":28,"context_line":"to sharding -- if it was already there, leave it alone."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Between the two, we can now recover from a sharding split-brain. A root"},{"line_number":31,"context_line":"DB can be fixed-up offline (currently, likely via some manual, in-a-REPL"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":5,"id":"bf51134e_30d5cee6","line":28,"in_reply_to":"bf51134e_4d1f0fb7","updated":"2020-07-17 20:57:45.000000000","message":"The trouble is when you\u0027ve got the split-brain in a shard -- after you manually fix the shard ranges, you run into trouble if auto_shard is enabled. The root\u0027s sharded, so it\u0027s willing to look for shrinking candidates, which led to it expanding the acceptor\u0027s range to include the guy we\u0027re trying to get rid of. This, of course, means there\u0027s a new overlapping range: http://paste.openstack.org/show/795900/\n\nMy thinking is, whatever process is responsible for transitioning a shard range to shrinking should also be responsible for ensuring that the namespace is properly covered. And it should only need to think about that *at the point of transition* -- otherwise we need to take a much more holistic view when finding shrinking candidates, rather than the pairwise comparisons we do currently.\n\nLonger term, we may need to take that holistic view so we can address overlapping shard ranges automatically -- but for right now I want to make sure we aren\u0027t going to make anything worse or more complicated.","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"}],"swift/container/sharder.py":[{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"42e0803d316a548ef5dfce2e62ae583645fc5135","unresolved":false,"context_lines":[{"line_number":1569,"context_line":""},{"line_number":1570,"context_line":"        if state \u003d\u003d SHARDED and broker.is_root_container():"},{"line_number":1571,"context_line":"            if is_leader:"},{"line_number":1572,"context_line":"                self._find_and_enable_shrinking_candidates(broker)"},{"line_number":1573,"context_line":"                self._find_and_enable_sharding_candidates(broker)"},{"line_number":1574,"context_line":"            for shard_range in broker.get_shard_ranges("},{"line_number":1575,"context_line":"                    states\u003d[ShardRange.SHARDING]):"}],"source_content_type":"text/x-python","patch_set":2,"id":"bf51134e_8b6521b8","line":1572,"range":{"start_line":1572,"start_character":21,"end_line":1572,"end_character":58},"updated":"2020-07-08 18:40:00.000000000","message":"OK, this is starting to make more sense for what\u0027s going on with the current probe test -- it won\u0027t resolve just by setting the unwanted shards to shrinking, because we only call this when the root is sharded (and in this probe test, it\u0027s still sharding). I should probably write a probe test with the split brain in a shard instead of the root.\n\nIt\u0027s worth noting that I\u0027ve seen overlapping *active* shard ranges in the wild, which implies that the dueling leaders actually managed to fully shard. Still needs more investigation to repro.","commit_id":"48c00358ac28ca2b96a8f66e182bbbe41e14fdb4"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"5e508ea3f70e69e74dfb66658fc8675656ad6ade","unresolved":false,"context_lines":[{"line_number":771,"context_line":"                # and an out-of-date acceptor. If the root discovers and needs"},{"line_number":772,"context_line":"                # to correct conflicting sets of shard ranges, we may be"},{"line_number":773,"context_line":"                # shrinking into multiple other shards."},{"line_number":774,"context_line":"                broker.merge_shard_ranges(shard_ranges)"},{"line_number":775,"context_line":"            else:"},{"line_number":776,"context_line":"                broker.merge_shard_ranges(shard_range)"},{"line_number":777,"context_line":"            own_shard_range \u003d broker.get_own_shard_range()"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_0b209f9a","line":774,"updated":"2020-07-14 20:52:06.000000000","message":"Taking in all the relevant ranges into the shared db is doing a WORLD of good.  We limit shard_ranges with marker/end_marker; we should do this in ALL cases.  I don\u0027t see any negative side of merging \"TMI\" down to the shards and all the tests I have access to seem to agree.","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"0dcc7368c645adc31b81846cfd2462659a25126e","unresolved":false,"context_lines":[{"line_number":771,"context_line":"                # and an out-of-date acceptor. If the root discovers and needs"},{"line_number":772,"context_line":"                # to correct conflicting sets of shard ranges, we may be"},{"line_number":773,"context_line":"                # shrinking into multiple other shards."},{"line_number":774,"context_line":"                broker.merge_shard_ranges(shard_ranges)"},{"line_number":775,"context_line":"            else:"},{"line_number":776,"context_line":"                broker.merge_shard_ranges(shard_range)"},{"line_number":777,"context_line":"            own_shard_range \u003d broker.get_own_shard_range()"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_1c690324","line":774,"in_reply_to":"bf51134e_0b209f9a","updated":"2020-07-14 21:40:16.000000000","message":"Mainly I just hadn\u0027t looked that closely at the implications of doing that. I knew that shrinking ranges would have more than one shard range in them, but wasn\u0027t aware of any other states where we\u0027d coded expecting that behavior. *Maybe* it\u0027d be safe to put all shard ranges from the root into an ACTIVE or SHARDING shard... Things like the \"send everything\" approach in _update_root_container() are a little worrying, though.\n\nOTOH, it\u0027d be nice down in _move_objects() if fetching shard ranges was purely local. Hmm...","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"794cde98677f0ee29be7da1963dd637b8e5bf4d4","unresolved":false,"context_lines":[{"line_number":1270,"context_line":"                              quote(broker.path), shard_range)"},{"line_number":1271,"context_line":""},{"line_number":1272,"context_line":"        replication_quorum \u003d self.existing_shard_replication_quorum"},{"line_number":1273,"context_line":"        if shard_range.includes(own_shard_range):"},{"line_number":1274,"context_line":"            # When shrinking, include deleted own (donor) shard range in"},{"line_number":1275,"context_line":"            # the replicated db so that when acceptor next updates root it"},{"line_number":1276,"context_line":"            # will atomically update its namespace *and* delete the donor."}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_1f58e72e","line":1273,"updated":"2020-07-13 23:35:39.000000000","message":"This logic makes me a little nervous -- the shrinking overlap won\u0027t be entirely included in any one shard range.","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"5e508ea3f70e69e74dfb66658fc8675656ad6ade","unresolved":false,"context_lines":[{"line_number":1356,"context_line":"                              quote(broker.path))"},{"line_number":1357,"context_line":"            return cleaving_context.misplaced_done"},{"line_number":1358,"context_line":""},{"line_number":1359,"context_line":"        ranges_todo \u003d broker.get_shard_ranges(marker\u003dcleaving_context.marker)"},{"line_number":1360,"context_line":"        if cleaving_context.cursor:"},{"line_number":1361,"context_line":"            # always update ranges_todo in case more ranges have been found"},{"line_number":1362,"context_line":"            # since last visit"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_688b21bf","line":1359,"updated":"2020-07-14 20:52:06.000000000","message":"rather than skipping shrinking and raising errors on non-cleaving states - can we not just filter the states we want to process?","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"0dcc7368c645adc31b81846cfd2462659a25126e","unresolved":false,"context_lines":[{"line_number":1356,"context_line":"                              quote(broker.path))"},{"line_number":1357,"context_line":"            return cleaving_context.misplaced_done"},{"line_number":1358,"context_line":""},{"line_number":1359,"context_line":"        ranges_todo \u003d broker.get_shard_ranges(marker\u003dcleaving_context.marker)"},{"line_number":1360,"context_line":"        if cleaving_context.cursor:"},{"line_number":1361,"context_line":"            # always update ranges_todo in case more ranges have been found"},{"line_number":1362,"context_line":"            # since last visit"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_bcd45721","line":1359,"in_reply_to":"bf51134e_688b21bf","updated":"2020-07-14 21:40:16.000000000","message":"IDK -- I kinda like the protection afforded here. There are so many moving pieces even on the happy-path that I\u0027d prefer to have sharding stop if the container isn\u0027t in a known-good state, rather than start moving us down some unknown path we haven\u0027t thought about yet.","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"0dcc7368c645adc31b81846cfd2462659a25126e","unresolved":false,"context_lines":[{"line_number":1388,"context_line":"                    break"},{"line_number":1389,"context_line":"                # else, no errors, but no rows found either. keep going,"},{"line_number":1390,"context_line":"                # and don\u0027t count it against our batch size"},{"line_number":1391,"context_line":"            elif shard_range.state \u003d\u003d ShardRange.SHRINKING:"},{"line_number":1392,"context_line":"                self.logger.warning("},{"line_number":1393,"context_line":"                    \u0027Ignoring shrinking shard range: %s\u0027, shard_range.name)"},{"line_number":1394,"context_line":"                # continue with the other ranges; this one should go away"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_1c9f4350","line":1391,"range":{"start_line":1391,"start_character":38,"end_line":1391,"end_character":58},"updated":"2020-07-14 21:40:16.000000000","message":"I wonder if this should also ignore SHARDING ranges since SHRINKING ranges will transition to SHARDING so they can cleave...\n\nThough at that point, we\u0027ve nearly done the filtering you suggested above. *shrug*","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"5e508ea3f70e69e74dfb66658fc8675656ad6ade","unresolved":false,"context_lines":[{"line_number":1392,"context_line":"                self.logger.warning("},{"line_number":1393,"context_line":"                    \u0027Ignoring shrinking shard range: %s\u0027, shard_range.name)"},{"line_number":1394,"context_line":"                # continue with the other ranges; this one should go away"},{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"},{"line_number":1397,"context_line":"                self.logger.warning("},{"line_number":1398,"context_line":"                    \u0027Unexpected shard range state for cleave: %s\u0027,"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_7c48ff5a","line":1395,"updated":"2020-07-14 20:52:06.000000000","message":"hrm... I didn\u0027t notice this situation coming up since we don\u0027t include_own?","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"0dcc7368c645adc31b81846cfd2462659a25126e","unresolved":false,"context_lines":[{"line_number":1392,"context_line":"                self.logger.warning("},{"line_number":1393,"context_line":"                    \u0027Ignoring shrinking shard range: %s\u0027, shard_range.name)"},{"line_number":1394,"context_line":"                # continue with the other ranges; this one should go away"},{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"},{"line_number":1397,"context_line":"                self.logger.warning("},{"line_number":1398,"context_line":"                    \u0027Unexpected shard range state for cleave: %s\u0027,"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_5cfb7bbc","line":1395,"in_reply_to":"bf51134e_7c48ff5a","updated":"2020-07-14 21:40:16.000000000","message":"It happens in test_manage_shard_ranges_used_poorly -- after we manually fix the shard range states and run the replicators, we have to run the sharders repeatedly. In some earlier patchsets, it required 5 iterations to get it to settle, because the sharding root would need to wait for the shrinking shards to be marked as deleted before it could start making progress again.\n\n(Come to think of it, warning\u0027s probably too noisy here -- info or even debug would be better.)","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"141acbcc891265bdeaf9e47959de07687d52f7bf","unresolved":false,"context_lines":[{"line_number":1496,"context_line":"        shard_ranges \u003d broker.get_shard_ranges("},{"line_number":1497,"context_line":"            include_own\u003dTrue,"},{"line_number":1498,"context_line":"            include_deleted\u003dTrue)"},{"line_number":1499,"context_line":"        # send everything"},{"line_number":1500,"context_line":"        if self._send_shard_ranges("},{"line_number":1501,"context_line":"                broker.root_account, broker.root_container, shard_ranges):"},{"line_number":1502,"context_line":"            # on success, mark ourselves as reported so we don\u0027t keep"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_5fc25f86","line":1499,"range":{"start_line":1499,"start_character":10,"end_line":1499,"end_character":25},"updated":"2020-07-13 23:40:14.000000000","message":"I\u0027m starting to really wonder if this should only send\n\n [sr for sr in shard_ranges if own_shard_range.includes(sr)]","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"794cde98677f0ee29be7da1963dd637b8e5bf4d4","unresolved":false,"context_lines":[{"line_number":1566,"context_line":"            if cleave_complete:"},{"line_number":1567,"context_line":"                self.logger.info(\u0027Completed cleaving of %s\u0027,"},{"line_number":1568,"context_line":"                                 quote(broker.path))"},{"line_number":1569,"context_line":"                if self._complete_sharding(broker):"},{"line_number":1570,"context_line":"                    state \u003d SHARDED"},{"line_number":1571,"context_line":"                    self._increment_stat(\u0027visited\u0027, \u0027completed\u0027, statsd\u003dTrue)"},{"line_number":1572,"context_line":"                else:"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_ff9c13c1","line":1569,"updated":"2020-07-13 23:35:39.000000000","message":"...but I guess we should still return True from _cleave() once we\u0027ve done them both, so we\u0027ll trigger completion down here...","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"a1bb96069001240de903b4347d67fd33d097b55d","unresolved":false,"context_lines":[{"line_number":766,"context_line":""},{"line_number":767,"context_line":"        if shard_range:"},{"line_number":768,"context_line":"            self.logger.debug(\u0027Updating shard from root %s\u0027, dict(shard_range))"},{"line_number":769,"context_line":"            if own_shard_range.state \u003d\u003d ShardRange.SHRINKING:"},{"line_number":770,"context_line":"                # Typically, this should only have two shard ranges: our own"},{"line_number":771,"context_line":"                # and an out-of-date acceptor. If the root discovers and needs"},{"line_number":772,"context_line":"                # to correct conflicting sets of shard ranges, we may be"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_49e3bc05","line":769,"updated":"2020-07-15 00:40:42.000000000","message":"Hey, wait -- I updated the commit message with my plan, but forgot to update the code!","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"8533c51b79712010fb719e9cadbb685a07be9c18","unresolved":false,"context_lines":[{"line_number":771,"context_line":"                # and an out-of-date acceptor. If the root discovers and needs"},{"line_number":772,"context_line":"                # to correct conflicting sets of shard ranges, we may be"},{"line_number":773,"context_line":"                # shrinking into multiple other shards."},{"line_number":774,"context_line":"                broker.merge_shard_ranges(shard_ranges)"},{"line_number":775,"context_line":"            else:"},{"line_number":776,"context_line":"                broker.merge_shard_ranges(shard_range)"},{"line_number":777,"context_line":"            own_shard_range \u003d broker.get_own_shard_range()"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_6d2db3fc","line":774,"updated":"2020-07-17 19:53:49.000000000","message":"i want to do this everytime; it\u0027s especially useful when there\u0027s overlaps (such as shrinking or split-brain)\n\nthere\u0027s no down side I can see and having a single behavior in the shards for overlap is less complexity to keep in our head.","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"8533c51b79712010fb719e9cadbb685a07be9c18","unresolved":false,"context_lines":[{"line_number":1390,"context_line":"                # and don\u0027t count it against our batch size"},{"line_number":1391,"context_line":"            elif shard_range.state \u003d\u003d ShardRange.SHRINKING:"},{"line_number":1392,"context_line":"                self.logger.warning("},{"line_number":1393,"context_line":"                    \u0027Ignoring shrinking shard range: %s\u0027, shard_range.name)"},{"line_number":1394,"context_line":"                # continue with the other ranges; this one should go away"},{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_4dd8af1d","line":1393,"updated":"2020-07-17 19:53:49.000000000","message":"it seems like this is ALSO just logging a warning; i\u0027m not sure why it\u0027s relevant to separate it from the block below...\n\n... also no test","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"a11f18a536c21439ba27d3e5350fedeaf3a7eda6","unresolved":false,"context_lines":[{"line_number":1390,"context_line":"                # and don\u0027t count it against our batch size"},{"line_number":1391,"context_line":"            elif shard_range.state \u003d\u003d ShardRange.SHRINKING:"},{"line_number":1392,"context_line":"                self.logger.warning("},{"line_number":1393,"context_line":"                    \u0027Ignoring shrinking shard range: %s\u0027, shard_range.name)"},{"line_number":1394,"context_line":"                # continue with the other ranges; this one should go away"},{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_50c7a265","line":1393,"in_reply_to":"bf51134e_4dd8af1d","updated":"2020-07-17 20:57:45.000000000","message":"It\u0027s subtle, but the other one has a\n\n break\n\nthat means we stop processing any more shards. Whereas this carve-out lets us keep going.\n\nThis let me lower the number of times we run the sharders over in the probe test from 5 to 4 -- it used to be that we\u0027d (probably) need to do one cycle to get all the shrinking guys to sharded/deleted before the sharding DB could start making progress, but now those can happen in the same cycle.\n\nAlso, this shouldn\u0027t be a warning now -- it\u0027s become expected behavior. Not sure whether to go with info or debug tho...","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"8533c51b79712010fb719e9cadbb685a07be9c18","unresolved":false,"context_lines":[{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"},{"line_number":1397,"context_line":"                self.logger.warning("},{"line_number":1398,"context_line":"                    \u0027Unexpected shard range state for cleave: %s\u0027,"},{"line_number":1399,"context_line":"                    shard_range.state_text)"},{"line_number":1400,"context_line":"                # Something\u0027s off the rails -- no sharding until it gets fixed"},{"line_number":1401,"context_line":"                break"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_6ddf730c","line":1398,"updated":"2020-07-17 19:53:49.000000000","message":"there\u0027s a bug fix here adding the \u0027%s\u0027\n\n... no new test tho","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"a11f18a536c21439ba27d3e5350fedeaf3a7eda6","unresolved":false,"context_lines":[{"line_number":1395,"context_line":"                # on its own"},{"line_number":1396,"context_line":"            else:"},{"line_number":1397,"context_line":"                self.logger.warning("},{"line_number":1398,"context_line":"                    \u0027Unexpected shard range state for cleave: %s\u0027,"},{"line_number":1399,"context_line":"                    shard_range.state_text)"},{"line_number":1400,"context_line":"                # Something\u0027s off the rails -- no sharding until it gets fixed"},{"line_number":1401,"context_line":"                break"}],"source_content_type":"text/x-python","patch_set":5,"id":"bf51134e_d0a8d28d","line":1398,"in_reply_to":"bf51134e_6ddf730c","updated":"2020-07-17 20:57:45.000000000","message":"Yup -- I probably wouldn\u0027t have noticed it except I\u0027d\n\n tail -f /var/log/syslog | sed -e \u0027/raceback/!d; s/#012/\\n/g\u0027\n\nwhile running probe tests in a loop. Which reminds me, https://review.opendev.org/#/c/732996/ is still a thing...","commit_id":"de1756ca1807ec9b3e7948ae0319b0b886041ca9"}],"test/probe/test_sharder.py":[{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"9bdea2ac804a52654d80f0e34bb0877ee2a9941f","unresolved":false,"context_lines":[{"line_number":2457,"context_line":"        self.assert_container_listing(obj_names)"},{"line_number":2458,"context_line":""},{"line_number":2459,"context_line":"    def test_manage_shard_ranges_used_poorly(self):"},{"line_number":2460,"context_line":"        obj_names \u003d self._make_object_names(8)"},{"line_number":2461,"context_line":"        self.put_objects(obj_names)"},{"line_number":2462,"context_line":""},{"line_number":2463,"context_line":"        client.post_container(self.url, self.admin_token, self.container_name,"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_e27bc5f1","line":2460,"range":{"start_line":2460,"start_character":44,"end_line":2460,"end_character":45},"updated":"2020-06-26 04:38:02.000000000","message":"Need to get this based off the configured shard_container_threshold -- as is, the resolution won\u0027t pass in the gate.","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"42e0803d316a548ef5dfce2e62ae583645fc5135","unresolved":false,"context_lines":[{"line_number":2457,"context_line":"        self.assert_container_listing(obj_names)"},{"line_number":2458,"context_line":""},{"line_number":2459,"context_line":"    def test_manage_shard_ranges_used_poorly(self):"},{"line_number":2460,"context_line":"        obj_names \u003d self._make_object_names(8)"},{"line_number":2461,"context_line":"        self.put_objects(obj_names)"},{"line_number":2462,"context_line":""},{"line_number":2463,"context_line":"        client.post_container(self.url, self.admin_token, self.container_name,"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_08f00867","line":2460,"range":{"start_line":2460,"start_character":44,"end_line":2460,"end_character":45},"in_reply_to":"bf51134e_e27bc5f1","updated":"2020-07-08 18:40:00.000000000","message":"Done","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"9bdea2ac804a52654d80f0e34bb0877ee2a9941f","unresolved":false,"context_lines":[{"line_number":2505,"context_line":"        self.assert_container_listing(obj_names)"},{"line_number":2506,"context_line":""},{"line_number":2507,"context_line":"        # Gotta fix it ourselves"},{"line_number":2508,"context_line":"        broker \u003d self.get_broker(self.brain.part, self.brain.nodes[0])"},{"line_number":2509,"context_line":"        grouped \u003d {}"},{"line_number":2510,"context_line":"        for sr in broker.get_shard_ranges():"},{"line_number":2511,"context_line":"            grouped.setdefault(sr.timestamp, []).append(sr)"},{"line_number":2512,"context_line":"        self.assertEqual(2, len(grouped))  # sanity check"},{"line_number":2513,"context_line":"        keep_ts \u003d max(grouped)"},{"line_number":2514,"context_line":"        cleanup_ts \u003d min(grouped)"},{"line_number":2515,"context_line":"        shrink_ts \u003d Timestamp.now()"},{"line_number":2516,"context_line":"        for sr in grouped[cleanup_ts]:"},{"line_number":2517,"context_line":"            if sr.update_state(ShardRange.SHRINKING):"},{"line_number":2518,"context_line":"                sr.epoch \u003d sr.state_timestamp \u003d shrink_ts"},{"line_number":2519,"context_line":"            else:"},{"line_number":2520,"context_line":"                self.fail(\u0027Could not update state on %r!\u0027 % (sr,))"},{"line_number":2521,"context_line":"        broker.merge_shard_ranges(grouped[cleanup_ts])"},{"line_number":2522,"context_line":""},{"line_number":2523,"context_line":"        # Get the updated ranges out to the other roots"},{"line_number":2524,"context_line":"        self.replicators.once()"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_42255108","line":2521,"range":{"start_line":2508,"start_character":8,"end_line":2521,"end_character":54},"updated":"2020-06-26 04:38:02.000000000","message":"Oughta bring this into manage-shard-ranges -- but it may not always be as easy to resolve as this :-/","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2525,"context_line":"        # Then run all the sharders, and multiple times -- it should"},{"line_number":2526,"context_line":"        # both get the root containers unstuck *and* get all the"},{"line_number":2527,"context_line":"        # shrinking shards to sharded"},{"line_number":2528,"context_line":"        ## Get the roots to shard"},{"line_number":2529,"context_line":"        for _ in range(5):"},{"line_number":2530,"context_line":"            self.sharders.once()"},{"line_number":2531,"context_line":"            #self.sharders.once("}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_7d5d9613","line":2528,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E266 too many leading \u0027#\u0027 for block comment","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"9bdea2ac804a52654d80f0e34bb0877ee2a9941f","unresolved":false,"context_lines":[{"line_number":2526,"context_line":"        # both get the root containers unstuck *and* get all the"},{"line_number":2527,"context_line":"        # shrinking shards to sharded"},{"line_number":2528,"context_line":"        ## Get the roots to shard"},{"line_number":2529,"context_line":"        for _ in range(5):"},{"line_number":2530,"context_line":"            self.sharders.once()"},{"line_number":2531,"context_line":"            #self.sharders.once("},{"line_number":2532,"context_line":"            #    additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_028519cf","line":2530,"range":{"start_line":2529,"start_character":8,"end_line":2530,"end_character":32},"updated":"2020-06-26 04:38:02.000000000","message":"I tried to sort out exactly how many shardings were required when and for what partitions, but got tired of trying to suss it out. This seems \"sufficient\".","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2528,"context_line":"        ## Get the roots to shard"},{"line_number":2529,"context_line":"        for _ in range(5):"},{"line_number":2530,"context_line":"            self.sharders.once()"},{"line_number":2531,"context_line":"            #self.sharders.once("},{"line_number":2532,"context_line":"            #    additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"},{"line_number":2534,"context_line":"        ## all at once all the time works, too, it\u0027s just less predictable when"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_dd9dc2bc","line":2531,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E265 block comment should start with \u0027# \u0027","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2530,"context_line":"            self.sharders.once()"},{"line_number":2531,"context_line":"            #self.sharders.once("},{"line_number":2532,"context_line":"            #    additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"},{"line_number":2534,"context_line":"        ## all at once all the time works, too, it\u0027s just less predictable when"},{"line_number":2535,"context_line":"        ## we\u0027ll be done."},{"line_number":2536,"context_line":"        #for _ in range(3):"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_bda0cef5","line":2533,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E266 too many leading \u0027#\u0027 for block comment","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2531,"context_line":"            #self.sharders.once("},{"line_number":2532,"context_line":"            #    additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"},{"line_number":2534,"context_line":"        ## all at once all the time works, too, it\u0027s just less predictable when"},{"line_number":2535,"context_line":"        ## we\u0027ll be done."},{"line_number":2536,"context_line":"        #for _ in range(3):"},{"line_number":2537,"context_line":"        #    self.sharders.once()"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_1da45a07","line":2534,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E266 too many leading \u0027#\u0027 for block comment","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2532,"context_line":"            #    additional_args\u003d\u0027--partitions\u003d%s\u0027 % self.brain.part)"},{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"},{"line_number":2534,"context_line":"        ## all at once all the time works, too, it\u0027s just less predictable when"},{"line_number":2535,"context_line":"        ## we\u0027ll be done."},{"line_number":2536,"context_line":"        #for _ in range(3):"},{"line_number":2537,"context_line":"        #    self.sharders.once()"},{"line_number":2538,"context_line":"        found \u003d {\u0027root\u0027: 0, \u0027active\u0027: 0, \u0027cleaned-up\u0027: 0}"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_fda6c60e","line":2535,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E266 too many leading \u0027#\u0027 for block comment","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"064c6a0539133e934545636b19bd51ba44ef3e3c","unresolved":false,"context_lines":[{"line_number":2533,"context_line":"        ## Then all together to get the shrinking shards sharded. Doing them"},{"line_number":2534,"context_line":"        ## all at once all the time works, too, it\u0027s just less predictable when"},{"line_number":2535,"context_line":"        ## we\u0027ll be done."},{"line_number":2536,"context_line":"        #for _ in range(3):"},{"line_number":2537,"context_line":"        #    self.sharders.once()"},{"line_number":2538,"context_line":"        found \u003d {\u0027root\u0027: 0, \u0027active\u0027: 0, \u0027cleaned-up\u0027: 0}"},{"line_number":2539,"context_line":"        for db_file in self.gather_db_files():"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf51134e_5db1b246","line":2536,"updated":"2020-06-26 05:35:28.000000000","message":"pep8: E265 block comment should start with \u0027# \u0027","commit_id":"08d81be2ef45763b6d2f6f1b045822eb2274915b"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2629,"context_line":"            \u0027swift-manage-shard-ranges\u0027,"},{"line_number":2630,"context_line":"            self.get_db_file(shard_brain.part, shard_brain.nodes[0],"},{"line_number":2631,"context_line":"                             shard_brain.account, shard_brain.container_name),"},{"line_number":2632,"context_line":"            \u0027find_and_replace\u0027, str(int(math.ceil(self.max_shard_size / 4.0)) + 1),"},{"line_number":2633,"context_line":"            \u0027--enable\u0027,"},{"line_number":2634,"context_line":"        ], stderr\u003dsubprocess.STDOUT)"},{"line_number":2635,"context_line":"        self.assert_container_state(shard_brain.nodes[0], \u0027unsharded\u0027, 2,"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_9fbeb7ab","line":2632,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E501 line too long (83 \u003e 79 characters)","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2642,"context_line":"                additional_args\u003d\u0027--partitions\u003d%s\u0027 % shard_brain.part)"},{"line_number":2643,"context_line":""},{"line_number":2644,"context_line":"            # Go write some more data"},{"line_number":2645,"context_line":"            more_obj_names \u003d self._make_object_names(10, start\u003dlen(obj_names) + 1)"},{"line_number":2646,"context_line":"            self.put_objects(more_obj_names)"},{"line_number":2647,"context_line":"            # Lock in the redirect"},{"line_number":2648,"context_line":"            Manager([\u0027object-updater\u0027]).once()"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_ffded30e","line":2645,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E501 line too long (82 \u003e 79 characters)","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2648,"context_line":"            Manager([\u0027object-updater\u0027]).once()"},{"line_number":2649,"context_line":"            # May or may not show up in listings"},{"line_number":2650,"context_line":"            obj_names.extend(more_obj_names)"},{"line_number":2651,"context_line":"            #self.assert_container_listing(obj_names)"},{"line_number":2652,"context_line":"        # resolve all those pendings"},{"line_number":2653,"context_line":"        Manager([\u0027object-updater\u0027]).once()"},{"line_number":2654,"context_line":""}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_dfe14fd2","line":2651,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E265 block comment should start with \u0027# \u0027","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2658,"context_line":"            self.get_db_file(shard_brain.part, shard_brain.nodes[1],"},{"line_number":2659,"context_line":"                             shard_brain.account, shard_brain.container_name),"},{"line_number":2660,"context_line":"            \u0027find_and_replace\u0027, str(int(math.ceil(self.max_shard_size / 4.0))),"},{"line_number":2661,"context_line":"            \u0027--enable\u0027, #\u0027--force\u0027,"},{"line_number":2662,"context_line":"        ], stderr\u003dsubprocess.STDOUT)"},{"line_number":2663,"context_line":"        self.assert_container_state(shard_brain.nodes[1], \u0027unsharded\u0027, 2,"},{"line_number":2664,"context_line":"                                    shard_brain)"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_3fe5ebdc","line":2661,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E261 at least two spaces before inline comment","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2658,"context_line":"            self.get_db_file(shard_brain.part, shard_brain.nodes[1],"},{"line_number":2659,"context_line":"                             shard_brain.account, shard_brain.container_name),"},{"line_number":2660,"context_line":"            \u0027find_and_replace\u0027, str(int(math.ceil(self.max_shard_size / 4.0))),"},{"line_number":2661,"context_line":"            \u0027--enable\u0027, #\u0027--force\u0027,"},{"line_number":2662,"context_line":"        ], stderr\u003dsubprocess.STDOUT)"},{"line_number":2663,"context_line":"        self.assert_container_state(shard_brain.nodes[1], \u0027unsharded\u0027, 2,"},{"line_number":2664,"context_line":"                                    shard_brain)"}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_1fe8e7b0","line":2661,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E262 inline comment should start with \u0027# \u0027","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"d17d06a7ae947e534fd21ec94c0c3359e7d305c5","unresolved":false,"context_lines":[{"line_number":2691,"context_line":"        more_obj_names \u003d self._make_object_names(10, start\u003dlen(obj_names) + 1)"},{"line_number":2692,"context_line":"        self.put_objects(more_obj_names)"},{"line_number":2693,"context_line":"        # ...we (sometimes!?) lose the previous batch of objects!"},{"line_number":2694,"context_line":"        #self.assert_container_listing(obj_names[:-10] + more_obj_names)"},{"line_number":2695,"context_line":"        obj_names.extend(more_obj_names)"},{"line_number":2696,"context_line":""},{"line_number":2697,"context_line":"        # Gotta fix it ourselves, from the root."}],"source_content_type":"text/x-python","patch_set":4,"id":"bf51134e_7fd2c301","line":2694,"updated":"2020-07-14 00:24:16.000000000","message":"pep8: E265 block comment should start with \u0027# \u0027","commit_id":"834eaaadf67cf757fe0f8d1c8e578bf669d94423"}]}
