)]}'
{"etc/object-server.conf-sample":[{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":400,"context_line":"# tombstones have been reclaimed then the stale fragment will never be deleted"},{"line_number":401,"context_line":"# (see https://bugs.launchpad.net/swift/+bug/1655608). Setting the"},{"line_number":402,"context_line":"# quarantine_threshold to a value greater than zero enables the reconstructor"},{"line_number":403,"context_line":"# to quarantine fragments older than the reclaim_age when it fails to fetch"},{"line_number":404,"context_line":"# more than the quarantine_threshold number of fragments (including the stale"},{"line_number":405,"context_line":"# fragment) during an attempt to reconstruct. For example, setting the"},{"line_number":406,"context_line":"# quarantine_threshold to 1 would cause a fragment older than the reclaim_age"}],"source_content_type":"application/octet-stream","patch_set":2,"id":"80123615_717082a6","line":403,"range":{"start_line":403,"start_character":16,"end_line":403,"end_character":52},"updated":"2021-04-19 23:11:46.000000000","message":"This condition doesn\u0027t seem particularly useful -- I\u0027d wager in most clusters 90%+ of fragments are older than a reclaim age. We may still want the condition as a practical matter, but calling it out in the config docs seems like it could confuse the issue, its causes, and why an operator should feel OK about quarantining this data.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":400,"context_line":"# tombstones have been reclaimed then the stale fragment will never be deleted"},{"line_number":401,"context_line":"# (see https://bugs.launchpad.net/swift/+bug/1655608). Setting the"},{"line_number":402,"context_line":"# quarantine_threshold to a value greater than zero enables the reconstructor"},{"line_number":403,"context_line":"# to quarantine fragments older than the reclaim_age when it fails to fetch"},{"line_number":404,"context_line":"# more than the quarantine_threshold number of fragments (including the stale"},{"line_number":405,"context_line":"# fragment) during an attempt to reconstruct. For example, setting the"},{"line_number":406,"context_line":"# quarantine_threshold to 1 would cause a fragment older than the reclaim_age"}],"source_content_type":"application/octet-stream","patch_set":2,"id":"b8a00732_bd004990","line":403,"range":{"start_line":403,"start_character":16,"end_line":403,"end_character":52},"in_reply_to":"80123615_717082a6","updated":"2021-04-20 18:41:42.000000000","message":"I included reclaim_age in the conditions because it is relevant to how we understand the frags to have become lonely, and how quarantine is the only future for the frag. Younger frags *shouldn\u0027t* be lonely. \n\nAlso, with low cardinality EC data/parity it is more likely we have a frag on a primary and tombstones on handoffs, and I\u0027d prefer to let the tombstones be reverted and eventually have the frag deleted by \u0027normal\u0027 means, rather than prematurely quarantine it.\n\nI take your point about possibly \u0027too much information\u0027 in the doc, but ops could be equally confused if they see the logs, set the threshold to non-zero, but still see the logs, unless they understand there is a time component to the quarantining. I\u0027ll try rewording it.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":405,"context_line":"# fragment) during an attempt to reconstruct. For example, setting the"},{"line_number":406,"context_line":"# quarantine_threshold to 1 would cause a fragment older than the reclaim_age"},{"line_number":407,"context_line":"# to be quarantined if no other fragments can be fetched."},{"line_number":408,"context_line":"# Note: the quarantine_threshold applies equally to all policies, but for each"},{"line_number":409,"context_line":"# policy it is effectively capped at (ec_ndata - 1) so that a fragment is never"},{"line_number":410,"context_line":"# quarantined when sufficient fragments exist to reconstruct the object."},{"line_number":411,"context_line":"# quarantine_threshold \u003d 0"}],"source_content_type":"application/octet-stream","patch_set":2,"id":"b45d9621_a89cfc30","line":408,"range":{"start_line":408,"start_character":33,"end_line":408,"end_character":64},"updated":"2021-04-19 23:11:46.000000000","message":"Hmm... we only have per-policy config options for the proxy server currently, is that right?","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":405,"context_line":"# fragment) during an attempt to reconstruct. For example, setting the"},{"line_number":406,"context_line":"# quarantine_threshold to 1 would cause a fragment older than the reclaim_age"},{"line_number":407,"context_line":"# to be quarantined if no other fragments can be fetched."},{"line_number":408,"context_line":"# Note: the quarantine_threshold applies equally to all policies, but for each"},{"line_number":409,"context_line":"# policy it is effectively capped at (ec_ndata - 1) so that a fragment is never"},{"line_number":410,"context_line":"# quarantined when sufficient fragments exist to reconstruct the object."},{"line_number":411,"context_line":"# quarantine_threshold \u003d 0"}],"source_content_type":"application/octet-stream","patch_set":2,"id":"1825724b_05584a65","line":408,"range":{"start_line":408,"start_character":33,"end_line":408,"end_character":64},"in_reply_to":"b45d9621_a89cfc30","updated":"2021-04-20 18:41:42.000000000","message":"correct. I did go down the path of plumbing in per-policy config for this, but backed off for now while we take some first steps.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"77cb10d0cfefa30a7ea1cf09e045b068e465af00","unresolved":true,"context_lines":[{"line_number":420,"context_line":"# default quarantine_handoff_batches value of 1 would result in up to 24"},{"line_number":421,"context_line":"# additional request to handoff nodes before a stale fragment is quarantined."},{"line_number":422,"context_line":"# It is recommended that the quarantine_handoff_batches value is at least 1."},{"line_number":423,"context_line":"# quarantine_handoff_batches \u003d 1"},{"line_number":424,"context_line":""},{"line_number":425,"context_line":"[object-updater]"},{"line_number":426,"context_line":"# You can override the default log routing for this app here (don\u0027t use set!):"}],"source_content_type":"application/octet-stream","patch_set":7,"id":"b66f4f1f_dc65e7ed","line":423,"updated":"2021-04-28 07:44:48.000000000","message":"So a new term, in the proxy we have request_node_count, I wonder if we could use the same name or even mechanism, just so we don\u0027t have to keep having special options.","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"98715a9cb3faedec2669799b2aea77579896eb97","unresolved":true,"context_lines":[{"line_number":420,"context_line":"# default quarantine_handoff_batches value of 1 would result in up to 24"},{"line_number":421,"context_line":"# additional request to handoff nodes before a stale fragment is quarantined."},{"line_number":422,"context_line":"# It is recommended that the quarantine_handoff_batches value is at least 1."},{"line_number":423,"context_line":"# quarantine_handoff_batches \u003d 1"},{"line_number":424,"context_line":""},{"line_number":425,"context_line":"[object-updater]"},{"line_number":426,"context_line":"# You can override the default log routing for this app here (don\u0027t use set!):"}],"source_content_type":"application/octet-stream","patch_set":7,"id":"9c4b1737_c4e5e0e1","line":423,"in_reply_to":"50ba2a90_d78cd72c","updated":"2021-04-29 07:37:27.000000000","message":"Yeah I understand. You could get the same with the request_count as it stands, ie:\n\n  2 * replicas\n\nI guess would be the same as `1 batch`. As the first 1 * replica is taken up by the primaries.","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"83af56ee477c6d0c6a2fe660d0e056dcb46ee8bf","unresolved":true,"context_lines":[{"line_number":420,"context_line":"# default quarantine_handoff_batches value of 1 would result in up to 24"},{"line_number":421,"context_line":"# additional request to handoff nodes before a stale fragment is quarantined."},{"line_number":422,"context_line":"# It is recommended that the quarantine_handoff_batches value is at least 1."},{"line_number":423,"context_line":"# quarantine_handoff_batches \u003d 1"},{"line_number":424,"context_line":""},{"line_number":425,"context_line":"[object-updater]"},{"line_number":426,"context_line":"# You can override the default log routing for this app here (don\u0027t use set!):"}],"source_content_type":"application/octet-stream","patch_set":7,"id":"50ba2a90_d78cd72c","line":423,"in_reply_to":"6d43bf31_6653f73b","updated":"2021-04-28 22:07:07.000000000","message":"I\u0027ll ponder this some more - I do agree that the fewer concepts that ops need to understand the better","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"12e272ee53bfc7ce182f4b2dd7347217148a2407","unresolved":true,"context_lines":[{"line_number":420,"context_line":"# default quarantine_handoff_batches value of 1 would result in up to 24"},{"line_number":421,"context_line":"# additional request to handoff nodes before a stale fragment is quarantined."},{"line_number":422,"context_line":"# It is recommended that the quarantine_handoff_batches value is at least 1."},{"line_number":423,"context_line":"# quarantine_handoff_batches \u003d 1"},{"line_number":424,"context_line":""},{"line_number":425,"context_line":"[object-updater]"},{"line_number":426,"context_line":"# You can override the default log routing for this app here (don\u0027t use set!):"}],"source_content_type":"application/octet-stream","patch_set":7,"id":"6d43bf31_6653f73b","line":423,"in_reply_to":"b66f4f1f_dc65e7ed","updated":"2021-04-28 21:49:21.000000000","message":"I went with batches rather than an absolute node count because it adapts better across different policies - per-policy config would be the ideal but maybe overly complex for this issue - so the idea is that the handoff count scales with the policy frag count.","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"}],"swift/obj/reconstructor.py":[{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":413,"context_line":"        path \u003d datafile_metadata[\u0027name\u0027]"},{"line_number":414,"context_line":""},{"line_number":415,"context_line":"        buckets \u003d defaultdict(dict)"},{"line_number":416,"context_line":"        bad_buckets \u003d defaultdict(list)"},{"line_number":417,"context_line":"        durable_buckets \u003d {}"},{"line_number":418,"context_line":"        etag_buckets \u003d {}"},{"line_number":419,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"a1d7468f_3ab5b214","line":416,"updated":"2021-04-13 18:24:02.000000000","message":"well THIS is starting to look familiar:\n\nhttps://github.com/openstack/swift/blob/master/swift/proxy/controllers/obj.py#L2113","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":515,"context_line":"            # TODO: check that the one good response came from *this* node"},{"line_number":516,"context_line":"            return (buckets.keys() \u003d\u003d [local_timestamp] and"},{"line_number":517,"context_line":"                    buckets[local_timestamp].keys() \u003d\u003d [df._frag_index] and"},{"line_number":518,"context_line":"                    bad_buckets.keys() \u003d\u003d [404])"},{"line_number":519,"context_line":""},{"line_number":520,"context_line":"        if not responses and is_solitary_fragment():"},{"line_number":521,"context_line":"            # TODO: make search depth configurable"}],"source_content_type":"text/x-python","patch_set":1,"id":"09ae6e0e_f5b4fa13","line":518,"updated":"2021-04-13 18:24:02.000000000","message":"this is nice a strict - a great place to start.  I could imagine we may have a case where exactly TWO lonely frags both come back-in to the cluster after tombstones are reaped and fail this check.\n\nIf there\u0027s a way to generalize it so it\u0027s configurable to support [204, 404, 404, 404, ...]","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"4b4f67c0bc95675d4c6eeb6e326d839c2f8eba86","unresolved":true,"context_lines":[{"line_number":515,"context_line":"            # TODO: check that the one good response came from *this* node"},{"line_number":516,"context_line":"            return (buckets.keys() \u003d\u003d [local_timestamp] and"},{"line_number":517,"context_line":"                    buckets[local_timestamp].keys() \u003d\u003d [df._frag_index] and"},{"line_number":518,"context_line":"                    bad_buckets.keys() \u003d\u003d [404])"},{"line_number":519,"context_line":""},{"line_number":520,"context_line":"        if not responses and is_solitary_fragment():"},{"line_number":521,"context_line":"            # TODO: make search depth configurable"}],"source_content_type":"text/x-python","patch_set":1,"id":"56079d25_bd2a86c2","line":518,"in_reply_to":"09ae6e0e_f5b4fa13","updated":"2021-04-15 07:37:15.000000000","message":"So this means we only quarantine when there is 1 frag and the rest 404s. What if there were only 2 frags left, we\u0027d still have the same problem. According to the old bug, Romain et el quarantined, i think, if there were \u003c n_data OKs rest 404s. But maybe that is a little cowboy unless we dig into handoffs or something.\n\nWhich is why I guess some way to mark things as dirty and we can check again to see if it\u0027s still the case might be handy. But as we\u0027ve talked about this is hard.\n\nI do like though that this will fix our exsiting single orphan issue we find in our cluster. So it might be a good first step.","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":517,"context_line":"                    buckets[local_timestamp].keys() \u003d\u003d [df._frag_index] and"},{"line_number":518,"context_line":"                    bad_buckets.keys() \u003d\u003d [404])"},{"line_number":519,"context_line":""},{"line_number":520,"context_line":"        if not responses and is_solitary_fragment():"},{"line_number":521,"context_line":"            # TODO: make search depth configurable"},{"line_number":522,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"},{"line_number":523,"context_line":"            handoffs \u003d list(itertools.islice("}],"source_content_type":"text/x-python","patch_set":1,"id":"e5657d66_7c0ce5d2","line":520,"updated":"2021-04-13 18:24:02.000000000","message":"my reading of the existing code is we NEVER did ANY requests to handoffs for rebuilds (just wait for them to revert to primaries THEN rebuild them)\n\nI\u0027m trying to decide if it\u0027d be reasonable to talk to handoffs in situations other than \"is_solitary_fragments\"","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"4b4f67c0bc95675d4c6eeb6e326d839c2f8eba86","unresolved":true,"context_lines":[{"line_number":517,"context_line":"                    buckets[local_timestamp].keys() \u003d\u003d [df._frag_index] and"},{"line_number":518,"context_line":"                    bad_buckets.keys() \u003d\u003d [404])"},{"line_number":519,"context_line":""},{"line_number":520,"context_line":"        if not responses and is_solitary_fragment():"},{"line_number":521,"context_line":"            # TODO: make search depth configurable"},{"line_number":522,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"},{"line_number":523,"context_line":"            handoffs \u003d list(itertools.islice("}],"source_content_type":"text/x-python","patch_set":1,"id":"a55c8623_ee263448","line":520,"in_reply_to":"e5657d66_7c0ce5d2","updated":"2021-04-15 07:37:15.000000000","message":"Yeah, that seems to be my read of it too. Shame we can\u0027t go look on known good last primaries and next handoffs. But maybe we\u0027re already making too many connections? That and we currently don\u0027t know who where the last good handoffs.\n\nIn the account/container work, we\u0027d might be able to infer from the db ID, not that we can infer back from that though.\n\nSorry just brain storming in gerrit 😊","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":556,"context_line":""},{"line_number":557,"context_line":"        if is_solitary_fragment():"},{"line_number":558,"context_line":"            raise df._quarantine("},{"line_number":559,"context_line":"                df._data_file, \"Solitary fragment #%s\" % df._frag_index)"},{"line_number":560,"context_line":""},{"line_number":561,"context_line":"        raise DiskFileError(\u0027Unable to reconstruct EC archive\u0027)"},{"line_number":562,"context_line":""}],"source_content_type":"text/x-python","patch_set":1,"id":"0622602c_565e492a","line":559,"updated":"2021-04-13 18:24:02.000000000","message":"yes, quaratine is the way to go\n\nI wonder if this exception is handled the same as the generic DiskFileError for \"unable to reconstruct\" - and more boardly how an ssync stream aborts after the failure.","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"3b054532d2d2213e58f060d37870c6a636c0ef4f","unresolved":true,"context_lines":[{"line_number":230,"context_line":"        self.all_local_devices \u003d self.get_local_devices()"},{"line_number":231,"context_line":"        self.rings_mtime \u003d None"},{"line_number":232,"context_line":"        self.quarantine_threshold \u003d non_negative_int("},{"line_number":233,"context_line":"            conf.get(\u0027quarantine_threshold\u0027, 0))"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":"    def get_worker_args(self, once\u003dFalse, **kwargs):"},{"line_number":236,"context_line":"        \"\"\""}],"source_content_type":"text/x-python","patch_set":2,"id":"b68ca6f8_d381f454","line":233,"updated":"2021-04-19 12:13:14.000000000","message":"So turn it off by default or should be make it 1 for those poor lonely fragments out there :)\n\nI guess safer to be off by default.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":230,"context_line":"        self.all_local_devices \u003d self.get_local_devices()"},{"line_number":231,"context_line":"        self.rings_mtime \u003d None"},{"line_number":232,"context_line":"        self.quarantine_threshold \u003d non_negative_int("},{"line_number":233,"context_line":"            conf.get(\u0027quarantine_threshold\u0027, 0))"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":"    def get_worker_args(self, once\u003dFalse, **kwargs):"},{"line_number":236,"context_line":"        \"\"\""}],"source_content_type":"text/x-python","patch_set":2,"id":"d1ea98f1_7abb8249","line":233,"in_reply_to":"b68ca6f8_d381f454","updated":"2021-04-20 18:41:42.000000000","message":"I\u0027m inclined to make this an opt-in behaviour","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":420,"context_line":"        durable_buckets \u003d {}"},{"line_number":421,"context_line":"        etag_buckets \u003d {}"},{"line_number":422,"context_line":""},{"line_number":423,"context_line":"        def make_requests(nodes):"},{"line_number":424,"context_line":"            pile \u003d GreenAsyncPile(len(nodes))"},{"line_number":425,"context_line":"            responses \u003d None"},{"line_number":426,"context_line":"            for _node in nodes:"}],"source_content_type":"text/x-python","patch_set":2,"id":"15aed5b1_a375fca4","line":423,"updated":"2021-04-19 23:11:46.000000000","message":"I\u0027m finding the closure here a little confusing -- it takes me a bit to sort out where we expect the boundaries to be for reading from and writing into the buckets or where the final set of responses is chosen. I\u0027m not sure how best to *fix* that, but thought it was worth mentioning.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":462,"context_line":"                    # find the very fragment we\u0027re trying to rebuild exists on"},{"line_number":463,"context_line":"                    # another primary node.  In this case we should stream it"},{"line_number":464,"context_line":"                    # directly from the remote node to our target instead of"},{"line_number":465,"context_line":"                    # rebuild.  But instead we ignore it."},{"line_number":466,"context_line":"                    self.logger.debug("},{"line_number":467,"context_line":"                        \u0027Found existing frag #%s at %s while rebuilding to %s\u0027,"},{"line_number":468,"context_line":"                        fi_to_rebuild, resp.full_path,"}],"source_content_type":"text/x-python","patch_set":2,"id":"30dd1a31_bec50b7a","line":465,"updated":"2021-04-19 23:11:46.000000000","message":"Hm. So if we pick up frag#2, don\u0027t see frag#1, and go to reconstruct... there\u0027s a chance we\u0027ll see frag#1 somewhere else, ignore it, then go quarantine frag#2 for lack of responses? Which makes it more difficult for frag#1 to reconstruct once it gets back to its assigned location.\n\nI kinda feel like this change should maybe bump up how we want to prioritize this TODO...","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":462,"context_line":"                    # find the very fragment we\u0027re trying to rebuild exists on"},{"line_number":463,"context_line":"                    # another primary node.  In this case we should stream it"},{"line_number":464,"context_line":"                    # directly from the remote node to our target instead of"},{"line_number":465,"context_line":"                    # rebuild.  But instead we ignore it."},{"line_number":466,"context_line":"                    self.logger.debug("},{"line_number":467,"context_line":"                        \u0027Found existing frag #%s at %s while rebuilding to %s\u0027,"},{"line_number":468,"context_line":"                        fi_to_rebuild, resp.full_path,"}],"source_content_type":"text/x-python","patch_set":2,"id":"e0834a46_673e68ce","line":465,"in_reply_to":"30dd1a31_bec50b7a","updated":"2021-04-20 18:41:42.000000000","message":"good catch - the misplaced target frag should at least count towards the threshold decision","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"3b054532d2d2213e58f060d37870c6a636c0ef4f","unresolved":true,"context_lines":[{"line_number":488,"context_line":"                    self.logger.warning(\u0027Invalid resp from %s, frag index %s \u0027"},{"line_number":489,"context_line":"                                        \u0027(missing Etag)\u0027,"},{"line_number":490,"context_line":"                                        resp.full_path, resp_frag_index)"},{"line_number":491,"context_line":"                    continue"},{"line_number":492,"context_line":""},{"line_number":493,"context_line":"                if etag !\u003d etag_buckets.setdefault(timestamp, etag):"},{"line_number":494,"context_line":"                    self.logger.error("}],"source_content_type":"text/x-python","patch_set":2,"id":"497e54bb_39b1edaa","line":491,"updated":"2021-04-19 12:13:14.000000000","message":"Should this check happen before we mark the durable_bucket as True?","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":488,"context_line":"                    self.logger.warning(\u0027Invalid resp from %s, frag index %s \u0027"},{"line_number":489,"context_line":"                                        \u0027(missing Etag)\u0027,"},{"line_number":490,"context_line":"                                        resp.full_path, resp_frag_index)"},{"line_number":491,"context_line":"                    continue"},{"line_number":492,"context_line":""},{"line_number":493,"context_line":"                if etag !\u003d etag_buckets.setdefault(timestamp, etag):"},{"line_number":494,"context_line":"                    self.logger.error("}],"source_content_type":"text/x-python","patch_set":2,"id":"40c64e7d_0684bdc3","line":491,"in_reply_to":"497e54bb_39b1edaa","updated":"2021-04-20 18:41:42.000000000","message":"probably","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"3b054532d2d2213e58f060d37870c6a636c0ef4f","unresolved":true,"context_lines":[{"line_number":497,"context_line":"                        _full_path(node, partition,"},{"line_number":498,"context_line":"                                   datafile_metadata[\u0027name\u0027], policy),"},{"line_number":499,"context_line":"                        fi_to_rebuild)"},{"line_number":500,"context_line":"                    continue"},{"line_number":501,"context_line":""},{"line_number":502,"context_line":"                if resp_frag_index not in buckets[timestamp]:"},{"line_number":503,"context_line":"                    buckets[timestamp][resp_frag_index] \u003d resp"}],"source_content_type":"text/x-python","patch_set":2,"id":"c8268016_1e2b9969","line":500,"updated":"2021-04-19 12:13:14.000000000","message":"Maybe this one too. Just not sure what might happen if we mark durable and then don\u0027t actaully use the response. Could be mark things as durable by accident? Not sure how, just seems a little iffy.\n\nSeems safer to do all the checks, then mark buckets.\n\nhaving said that, man the term bucket is a little over used, esp in obj storage maybe we should call them tuppleware 😂 (that\u0027s a joke.. I\u0027m tired ok)","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":497,"context_line":"                        _full_path(node, partition,"},{"line_number":498,"context_line":"                                   datafile_metadata[\u0027name\u0027], policy),"},{"line_number":499,"context_line":"                        fi_to_rebuild)"},{"line_number":500,"context_line":"                    continue"},{"line_number":501,"context_line":""},{"line_number":502,"context_line":"                if resp_frag_index not in buckets[timestamp]:"},{"line_number":503,"context_line":"                    buckets[timestamp][resp_frag_index] \u003d resp"}],"source_content_type":"text/x-python","patch_set":2,"id":"ea0ccbd0_ee065169","line":500,"in_reply_to":"c8268016_1e2b9969","updated":"2021-04-20 18:41:42.000000000","message":"lol. how about container ;) ? ... receptacle? pot? *scuttle* ?\n\nmore seriously, we have buckets in the proxy ec object getter so re-using the term here makes some sense (and reminds us that there must be scope to one day refactor and share some of that code :/)","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"3b054532d2d2213e58f060d37870c6a636c0ef4f","unresolved":true,"context_lines":[{"line_number":511,"context_line":""},{"line_number":512,"context_line":"        # don\u0027t try and fetch a fragment from the node we\u0027re rebuilding to"},{"line_number":513,"context_line":"        part_nodes \u003d [n for n in policy.object_ring.get_part_nodes("},{"line_number":514,"context_line":"            partition) if n[\u0027id\u0027] !\u003d node[\u0027id\u0027]]"},{"line_number":515,"context_line":"        responses \u003d make_requests(part_nodes)"},{"line_number":516,"context_line":""},{"line_number":517,"context_line":"        def is_quarantine_candidate():"}],"source_content_type":"text/x-python","patch_set":2,"id":"0dab84da_7f5c5642","line":514,"updated":"2021-04-19 12:13:14.000000000","message":"So we walk the disk, find a frag and we want to check on the neighbours and one it missing it. So we do rebuild it. The df we get is the df on this server.. so do we need to make a call to the node that\u0027s running the code.\n\nI guess if somethings happened inbetween we could get more then one timestamp, so maybe it\u0027s worth the connection. Sorry just brain storming as to why we only remove the missing node from requests and why not this node and the missing.. it saves at least one connection.\n\nMaybe I\u0027m thinking too optimised and we don\u0027t care about a extra connection here and there.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":511,"context_line":""},{"line_number":512,"context_line":"        # don\u0027t try and fetch a fragment from the node we\u0027re rebuilding to"},{"line_number":513,"context_line":"        part_nodes \u003d [n for n in policy.object_ring.get_part_nodes("},{"line_number":514,"context_line":"            partition) if n[\u0027id\u0027] !\u003d node[\u0027id\u0027]]"},{"line_number":515,"context_line":"        responses \u003d make_requests(part_nodes)"},{"line_number":516,"context_line":""},{"line_number":517,"context_line":"        def is_quarantine_candidate():"}],"source_content_type":"text/x-python","patch_set":2,"id":"93e31d28_ca1a776c","line":514,"in_reply_to":"0dab84da_7f5c5642","updated":"2021-04-20 18:41:42.000000000","message":"my guess is that the (existing) comment at line 406 explains this i.e. KISS","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":529,"context_line":"                    len(frags) \u003c\u003d self.quarantine_threshold and"},{"line_number":530,"context_line":"                    len(frags) \u003c policy.ec_ndata and  # belt and braces!"},{"line_number":531,"context_line":"                    df._frag_index in frags and"},{"line_number":532,"context_line":"                    list(bad_buckets.keys()) \u003d\u003d [404])"},{"line_number":533,"context_line":""},{"line_number":534,"context_line":"        if not responses and is_quarantine_candidate():"},{"line_number":535,"context_line":"            # Check the handoffs in case all other fragments landed on handoffs"}],"source_content_type":"text/x-python","patch_set":2,"id":"7235aede_24004a9f","line":532,"range":{"start_line":532,"start_character":20,"end_line":532,"end_character":53},"updated":"2021-04-19 23:11:46.000000000","message":"Right; this ensures we won\u0027t run into trouble because of network partitions, as refused connections or timeouts will still show up here with a status of None.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":531,"context_line":"                    df._frag_index in frags and"},{"line_number":532,"context_line":"                    list(bad_buckets.keys()) \u003d\u003d [404])"},{"line_number":533,"context_line":""},{"line_number":534,"context_line":"        if not responses and is_quarantine_candidate():"},{"line_number":535,"context_line":"            # Check the handoffs in case all other fragments landed on handoffs"},{"line_number":536,"context_line":"            # and have not yet been reverted to their primaries."},{"line_number":537,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"}],"source_content_type":"text/x-python","patch_set":2,"id":"3f67e062_3ac29aa5","line":534,"range":{"start_line":534,"start_character":11,"end_line":534,"end_character":24},"updated":"2021-04-19 23:11:46.000000000","message":"Don\u0027t we always expect to have at least one response from the node this is running on?","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":531,"context_line":"                    df._frag_index in frags and"},{"line_number":532,"context_line":"                    list(bad_buckets.keys()) \u003d\u003d [404])"},{"line_number":533,"context_line":""},{"line_number":534,"context_line":"        if not responses and is_quarantine_candidate():"},{"line_number":535,"context_line":"            # Check the handoffs in case all other fragments landed on handoffs"},{"line_number":536,"context_line":"            # and have not yet been reverted to their primaries."},{"line_number":537,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"}],"source_content_type":"text/x-python","patch_set":2,"id":"6279af71_faec5f1c","line":534,"range":{"start_line":534,"start_character":11,"end_line":534,"end_character":24},"in_reply_to":"3f67e062_3ac29aa5","updated":"2021-04-20 18:41:42.000000000","message":"responses is either empty of has \u003e\u003d ec_ndata items (line 505), which is arguably a little confusing but is a hangover from the existing code. I\u0027ll think about how that might be changed.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":534,"context_line":"        if not responses and is_quarantine_candidate():"},{"line_number":535,"context_line":"            # Check the handoffs in case all other fragments landed on handoffs"},{"line_number":536,"context_line":"            # and have not yet been reverted to their primaries."},{"line_number":537,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"},{"line_number":538,"context_line":"            handoffs \u003d list(itertools.islice("},{"line_number":539,"context_line":"                policy.object_ring.get_more_nodes(partition), 0, num_handoffs))"},{"line_number":540,"context_line":"            responses \u003d make_requests(handoffs)"}],"source_content_type":"text/x-python","patch_set":2,"id":"8382364b_6d554a74","line":537,"updated":"2021-04-19 23:11:46.000000000","message":"This really seems like it ought to be configurable. And per-policy.\n\nWhy do we give duplicated-EC just the one round of handoffs to try?","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":534,"context_line":"        if not responses and is_quarantine_candidate():"},{"line_number":535,"context_line":"            # Check the handoffs in case all other fragments landed on handoffs"},{"line_number":536,"context_line":"            # and have not yet been reverted to their primaries."},{"line_number":537,"context_line":"            num_handoffs \u003d policy.ec_n_unique_fragments"},{"line_number":538,"context_line":"            handoffs \u003d list(itertools.islice("},{"line_number":539,"context_line":"                policy.object_ring.get_more_nodes(partition), 0, num_handoffs))"},{"line_number":540,"context_line":"            responses \u003d make_requests(handoffs)"}],"source_content_type":"text/x-python","patch_set":2,"id":"757f7ffa_5439fdcf","line":537,"in_reply_to":"8382364b_6d554a74","updated":"2021-04-20 18:41:42.000000000","message":"Good point re duplicated policies.\n\nI considered making this configurable, but held off (for now at least) because once we get past the current set of handoffs, where does it make sense to stop searching?\n\nBut yeah, I guess another config option doesn\u0027t hurt.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"3b054532d2d2213e58f060d37870c6a636c0ef4f","unresolved":true,"context_lines":[{"line_number":539,"context_line":"                policy.object_ring.get_more_nodes(partition), 0, num_handoffs))"},{"line_number":540,"context_line":"            responses \u003d make_requests(handoffs)"},{"line_number":541,"context_line":""},{"line_number":542,"context_line":"        if responses:"},{"line_number":543,"context_line":"            rebuilt_fragment_iter \u003d self.make_rebuilt_fragment_iter("},{"line_number":544,"context_line":"                responses[:policy.ec_ndata], path, policy,"},{"line_number":545,"context_line":"                fi_to_rebuild)"}],"source_content_type":"text/x-python","patch_set":2,"id":"abff66d5_bf16116c","line":542,"updated":"2021-04-19 12:13:14.000000000","message":"Shoud be check that we have \u003e policy.ec_ndata responses before going to the effort of making a rebuild_fragment_iter and attempt to rebuild the object?","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":539,"context_line":"                policy.object_ring.get_more_nodes(partition), 0, num_handoffs))"},{"line_number":540,"context_line":"            responses \u003d make_requests(handoffs)"},{"line_number":541,"context_line":""},{"line_number":542,"context_line":"        if responses:"},{"line_number":543,"context_line":"            rebuilt_fragment_iter \u003d self.make_rebuilt_fragment_iter("},{"line_number":544,"context_line":"                responses[:policy.ec_ndata], path, policy,"},{"line_number":545,"context_line":"                fi_to_rebuild)"}],"source_content_type":"text/x-python","patch_set":2,"id":"8c94ee5b_36a53470","line":542,"in_reply_to":"abff66d5_bf16116c","updated":"2021-04-19 23:11:46.000000000","message":"Looks like we have that check up at L504; make_requests should always return either enough frags to reconstruct or None. We probably ought to have a docstring to that effect.","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":15343,"name":"Tim Burke","email":"tburke@nvidia.com","username":"tburke"},"change_message_id":"d6f8f2c62bcf560601ce62333e34e4ed3b6de994","unresolved":true,"context_lines":[{"line_number":564,"context_line":"            durable \u003d durable_buckets.get(local_timestamp)"},{"line_number":565,"context_line":"            self.logger.error("},{"line_number":566,"context_line":"                \u0027Unable to get enough responses (%s error responses) \u0027"},{"line_number":567,"context_line":"                \u0027to reconstruct %s %s frag#%s\u0027 % ("},{"line_number":568,"context_line":"                    sum(len(b) for b in bad_buckets.values()),"},{"line_number":569,"context_line":"                    \u0027durable\u0027 if durable else \u0027non-durable\u0027,"},{"line_number":570,"context_line":"                    path, fi_to_rebuild))"}],"source_content_type":"text/x-python","patch_set":2,"id":"19084925_b460099c","line":567,"updated":"2021-04-19 23:11:46.000000000","message":"So this is kinda weird, and I haven\u0027t dug into it enough to figure out what happened, but: While I was exercising the handoff-handling, I\n\n* took a standard 8-disk SAIO with a 4+2 EC policy,\n* shut down one of the nodes (1.conf) that had two assignments,\n* uploaded an object,\n* found a node (4.conf) with one primary and one handoff frag and deleted all other frags, then finally\n* ran the reconstructor with quarantine_threshold\u003d1.\n\nNothing got quarantined (which was good!) but I got some log lines like\n\n object-6040: Unable to get enough responses (2/4) to reconstruct durable 127.0.0.3:6030/sdb3/191/AUTH_test/c/.profile policy#0 frag#3 with ETag 0d2bebbf5990a90ba0d8af0534cf4f32 and timestamp 1618871492.48811\n object-6040: Unable to get enough responses (3 error responses) to reconstruct durable 127.0.0.3:6030/sdb3/191/AUTH_test/c/.profile policy#0 frag#3\n\nwhich seems weird -- it seems to indicate that we only made 5 requests? I expected 7 (i.e. 5 errors).","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":true,"context_lines":[{"line_number":564,"context_line":"            durable \u003d durable_buckets.get(local_timestamp)"},{"line_number":565,"context_line":"            self.logger.error("},{"line_number":566,"context_line":"                \u0027Unable to get enough responses (%s error responses) \u0027"},{"line_number":567,"context_line":"                \u0027to reconstruct %s %s frag#%s\u0027 % ("},{"line_number":568,"context_line":"                    sum(len(b) for b in bad_buckets.values()),"},{"line_number":569,"context_line":"                    \u0027durable\u0027 if durable else \u0027non-durable\u0027,"},{"line_number":570,"context_line":"                    path, fi_to_rebuild))"}],"source_content_type":"text/x-python","patch_set":2,"id":"2e4ce9a3_3f2ebe5c","line":567,"in_reply_to":"19084925_b460099c","updated":"2021-04-20 18:41:42.000000000","message":"the first batch of requests returned 2 frags and 3 errors (no request is made to the primary that we\u0027re rebuilding to). The 2 frags is already above the quarantine_threshold of 1, so the second set of requests to handoffs wasn\u0027t made.\n\nI do wonder if the logging could be better here e.g. get all the info on one log line, and detail what status codes were returned.\n\nNote: we\u0027ve never tried handoffs for the purposes of rebuilding alone - now the handoffs are tried for the purpose of quarantining, but as a consequence we might now end up rebuilding from handoffs....I guess we could choose to *not* do that rebuild from handoffs, just prevent the quarantine and move on, and leave the handoffs to revert, but then we might end up needing to do the rebuild anyway once all the frags have found their way to primaries???","commit_id":"41915f14add2c1203acd1bbfcc136076e9924867"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"98715a9cb3faedec2669799b2aea77579896eb97","unresolved":true,"context_lines":[{"line_number":418,"context_line":"        \"\"\""},{"line_number":419,"context_line":"        policy \u003d job[\u0027policy\u0027]"},{"line_number":420,"context_line":"        partition \u003d job[\u0027partition\u0027]"},{"line_number":421,"context_line":"        datafile_metadata \u003d df.get_datafile_metadata()"},{"line_number":422,"context_line":""},{"line_number":423,"context_line":"        # the fragment index we need to reconstruct is the position index"},{"line_number":424,"context_line":"        # of the node we\u0027re rebuilding to within the primary part list"}],"source_content_type":"text/x-python","patch_set":7,"id":"17aec566_fdf6ad60","line":421,"updated":"2021-04-29 07:37:27.000000000","message":"NIT: The method that calls this function does have the datafile_metadata handy so we don\u0027t necessarity need to change this method.\n\nAlthough I do like the passing of the already opened df as we get access to the metadata and everything else. so OK.\n\nUpdate: Now looking at history this func only came in last patch.. so yeah let\u0027s just change the method sig 😊","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"98715a9cb3faedec2669799b2aea77579896eb97","unresolved":true,"context_lines":[{"line_number":585,"context_line":"                # frags, or no response for the local frag timestamp: we"},{"line_number":586,"context_line":"                # possibly could quarantine, but this unexpected case may be"},{"line_number":587,"context_line":"                # worth more investigation"},{"line_number":588,"context_line":"                return False"},{"line_number":589,"context_line":""},{"line_number":590,"context_line":"            if time.time() - float(local_timestamp) \u003c\u003d df.manager.reclaim_age:"},{"line_number":591,"context_line":"                # If the fragment has not yet passed reclaim age then it is"}],"source_content_type":"text/x-python","patch_set":7,"id":"e5990936_11510d29","line":588,"updated":"2021-04-29 07:37:27.000000000","message":"I wonder if we could get some logging around finding this situation so we know if it triggers, rather then getting the lonely frag message we have been getting.. and keep getting it longer then a reclaim_age so we then decide to look.","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"98715a9cb3faedec2669799b2aea77579896eb97","unresolved":true,"context_lines":[{"line_number":592,"context_line":"                # likely that a tombstone will be reverted to this node, so"},{"line_number":593,"context_line":"                # don\u0027t quarantine it yet: better that it is cleaned up"},{"line_number":594,"context_line":"                # \u0027normally\u0027."},{"line_number":595,"context_line":"                return False"},{"line_number":596,"context_line":""},{"line_number":597,"context_line":"            bucket \u003d buckets[local_timestamp]"},{"line_number":598,"context_line":"            return (bucket.num_responses \u003c\u003d self.quarantine_threshold and"}],"source_content_type":"text/x-python","patch_set":7,"id":"a00841df_73e96d00","line":595,"updated":"2021-04-29 07:37:27.000000000","message":"Will this mean we may get a bunch of error messages until reclaim_age is hit? I guess it\u0027s local_timestamp (not now) so shouldn\u0027t be for a full reclaim_age amount of loney frag errors in logs.\n\nIn fact, if the main usecase is that an old node plugged back in after reclaim_age I guess these will just vanish and be quarantined straight away.","commit_id":"60dae671b93a48cdde71e1ed5037d91d0cc79647"},{"author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"change_message_id":"6634d33676762aa2314c3170be4673ea6e165756","unresolved":true,"context_lines":[{"line_number":400,"context_line":"        return resp"},{"line_number":401,"context_line":""},{"line_number":402,"context_line":"    def _handle_fragment_response(self, node, policy, partition, fi_to_rebuild,"},{"line_number":403,"context_line":"                                  path, buckets, error_responses, resp):"},{"line_number":404,"context_line":"        \"\"\""},{"line_number":405,"context_line":"        Place ok responses into a per-timestamp bucket. Append bad responses to"},{"line_number":406,"context_line":"        a list per-status-code in error_responses."}],"source_content_type":"text/x-python","patch_set":8,"id":"2b0e5679_3531282b","line":403,"updated":"2021-04-30 07:07:17.000000000","message":"Yeah I think this is big and important enough to warrant it\u0027s own class level method 😊","commit_id":"2eb5ec86fc09f795450fdb93226377ba0b05e9fc"}],"test/unit/obj/test_reconstructor.py":[{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":5155,"context_line":"        with mocked_http_conn(*codes, body_iter\u003dbody_iter,"},{"line_number":5156,"context_line":"                              headers\u003dheaders,"},{"line_number":5157,"context_line":"                              timestamps\u003d[self.obj_timestamp.internal] * 27):"},{"line_number":5158,"context_line":"            with self.assertRaises(DiskFileQuarantined):"},{"line_number":5159,"context_line":"                self.reconstructor.reconstruct_fa(job, node, self.df)"},{"line_number":5160,"context_line":"        error_lines \u003d self.logger.get_lines_for_level(\u0027error\u0027)"},{"line_number":5161,"context_line":"        self.assertEqual(2, len(error_lines), error_lines)"}],"source_content_type":"text/x-python","patch_set":1,"id":"472fd13d_effd7e32","line":5158,"updated":"2021-04-13 18:24:02.000000000","message":"Boom!","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":1179,"name":"Clay Gerrard","email":"clay.gerrard@gmail.com","username":"clay-gerrard"},"change_message_id":"15b615f1323f2eb9aa79e006b56b2906d98fe4c1","unresolved":true,"context_lines":[{"line_number":5164,"context_line":"                      error_lines[1])"},{"line_number":5165,"context_line":"        warning_lines \u003d self.logger.get_lines_for_level(\u0027warning\u0027)"},{"line_number":5166,"context_line":"        self.assertEqual(1, len(warning_lines), warning_lines)"},{"line_number":5167,"context_line":"        self.assertIn(\u0027Quarantined object\u0027, warning_lines[0])"},{"line_number":5168,"context_line":""},{"line_number":5169,"context_line":"    def test_reconstruct_fa_finds_duplicate_does_not_fail(self):"},{"line_number":5170,"context_line":"        job \u003d {"}],"source_content_type":"text/x-python","patch_set":1,"id":"02e9cbb8_db1e45f0","line":5167,"updated":"2021-04-13 18:24:02.000000000","message":"could we troll around on the filesystem in test_dir and make any claims about what exactly got moved where?","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"},{"author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"change_message_id":"ad3a705245e4f26f9800a9eb3e9a16ab3e6295b1","unresolved":false,"context_lines":[{"line_number":5164,"context_line":"                      error_lines[1])"},{"line_number":5165,"context_line":"        warning_lines \u003d self.logger.get_lines_for_level(\u0027warning\u0027)"},{"line_number":5166,"context_line":"        self.assertEqual(1, len(warning_lines), warning_lines)"},{"line_number":5167,"context_line":"        self.assertIn(\u0027Quarantined object\u0027, warning_lines[0])"},{"line_number":5168,"context_line":""},{"line_number":5169,"context_line":"    def test_reconstruct_fa_finds_duplicate_does_not_fail(self):"},{"line_number":5170,"context_line":"        job \u003d {"}],"source_content_type":"text/x-python","patch_set":1,"id":"89216fb2_25741249","line":5167,"in_reply_to":"02e9cbb8_db1e45f0","updated":"2021-04-20 18:41:42.000000000","message":"Done","commit_id":"db6ab15605ecf4baafaa8e5879c0d52aa1f89f28"}]}
