)]}'
{"id":"openstack%2Fswift~805607","triplet_id":"openstack%2Fswift~master~Id6541d0deabb9f7c3cfd7523833b77218448c990","project":"openstack/swift","branch":"master","topic":"fix-update-shard-spi","hashtags":[],"change_id":"Id6541d0deabb9f7c3cfd7523833b77218448c990","subject":"DNM: shard policy migration: it\u0027s still not great","status":"NEW","created":"2021-08-23 09:24:05.000000000","updated":"2021-08-31 06:40:51.000000000","submit_type":"MERGE_IF_NECESSARY","mergeable":false,"submittable":false,"total_comment_count":3,"unresolved_comment_count":1,"has_review_started":true,"meta_rev_id":"c20793e7fd2535c77e4bf9d0145ad1989c88e6f3","_number":805607,"virtual_id_number":805607,"owner":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"actions":{},"labels":{"Verified":{"disliked":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"all":[{"tag":"autogenerated:zuul:check","value":-1,"date":"2021-08-23 13:13:15.000000000","permitted_voting_range":{"min":-2,"max":2},"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]}],"values":{"-2":"Fails","-1":"Doesn\u0027t seem to work"," 0":"No score","+1":"Works for me","+2":"Verified"},"description":"","value":-1,"default_value":0,"optional":true},"Code-Review":{"all":[{"value":0,"permitted_voting_range":{"min":-1,"max":1},"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]}],"values":{"-2":"Do not merge","-1":"This patch needs further work before it can be merged"," 0":"No score","+1":"Looks good to me, but someone else must approve","+2":"Looks good to me (core reviewer)"},"description":"","default_value":0,"optional":true},"Workflow":{"all":[{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]}],"values":{"-1":"Work in progress"," 0":"Ready for reviews","+1":"Approved"},"description":"","default_value":0,"optional":true}},"removable_reviewers":[],"reviewers":{"CC":[{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"}],"REVIEWER":[{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]}]},"pending_reviewers":{},"reviewer_updates":[{"updated":"2021-08-23 10:04:43.000000000","updated_by":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"reviewer":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"state":"REVIEWER"},{"updated":"2021-08-26 03:10:46.000000000","updated_by":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"reviewer":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"state":"CC"}],"messages":[{"id":"4efe0a587f3136a494ea0dc718347b0f8f7f351f","tag":"autogenerated:gerrit:newPatchSet","author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"date":"2021-08-23 09:24:05.000000000","message":"Uploaded patch set 1.","accounts_in_message":[],"_revision_number":1},{"id":"664e79648a4f5932cdf359620dcccd834ef1ca33","tag":"autogenerated:zuul:check","author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"date":"2021-08-23 10:04:43.000000000","message":"Patch Set 1: Verified-1\n\nBuild failed (check pipeline).  For information on how to proceed, see\nhttps://docs.opendev.org/opendev/infra-manual/latest/developers.html#automated-testing\n\n\n- openstack-tox-docs https://zuul.opendev.org/t/openstack/build/d82422a6364049c286a9267e61b49de5 : SUCCESS in 9m 59s\n- swift-build-image https://zuul.opendev.org/t/openstack/build/4bedc558d01b4ed6a4f702be4e4c19c9 : FAILURE in 8m 50s (non-voting)\n- swift-build-image-py3 https://zuul.opendev.org/t/openstack/build/04b27fdee9ee48c8a0cd4443bb518764 : FAILURE in 9m 13s (non-voting)\n- swift-tox-py27 https://zuul.opendev.org/t/openstack/build/62bded5447b64522a6396107ed083cca : FAILURE in 16m 42s\n- swift-tox-py36 https://zuul.opendev.org/t/openstack/build/2c5a5aec8d29494b83e4712ce1c59507 : FAILURE in 15m 02s\n- swift-tox-py37 https://zuul.opendev.org/t/openstack/build/deee1e6109e9460fb705503bf97c39dc : FAILURE in 16m 05s\n- swift-tox-py38 https://zuul.opendev.org/t/openstack/build/4572451dffa54ea58f5b389e6d8547b5 : FAILURE in 16m 28s\n- swift-tox-py39 https://zuul.opendev.org/t/openstack/build/a07a6de4880a4c108d16140e89c85ff6 : FAILURE in 17m 33s\n- swift-tox-func-py27 https://zuul.opendev.org/t/openstack/build/042181092bb34be5a6aaf99980f055cf : SUCCESS in 17m 05s\n- swift-tox-func-encryption-py27 https://zuul.opendev.org/t/openstack/build/a8b735f549bd4fe18ab4ecb54c95b569 : SUCCESS in 17m 29s\n- swift-tox-func-ec-py27 https://zuul.opendev.org/t/openstack/build/b3176a02ec4045d3a5723eeaa167b900 : SUCCESS in 20m 19s\n- swift-tox-func-py38 https://zuul.opendev.org/t/openstack/build/d2eae2e4a2314d6dbc77e806a19e824a : SUCCESS in 18m 13s\n- swift-tox-func-encryption-py38 https://zuul.opendev.org/t/openstack/build/59643391ca9d4f3ab30b36b324d1e281 : SUCCESS in 18m 25s\n- swift-tox-func-ec-py38 https://zuul.opendev.org/t/openstack/build/9cc9ae56809847d9bc5bfa825567c24e : SUCCESS in 19m 47s\n- swift-dsvm-functional https://zuul.opendev.org/t/openstack/build/b8e153cf2a5b4f5a847fa7c2d3dfeca1 : SUCCESS in 38m 20s\n- swift-dsvm-functional-ipv6 https://zuul.opendev.org/t/openstack/build/c8165ac563004968893b80124e0cc0b7 : SUCCESS in 38m 39s\n- swift-tox-lower-constraints https://zuul.opendev.org/t/openstack/build/546ca2401d3d4d3095ae453484a0e823 : FAILURE in 14m 31s\n- openstack-tox-pep8 https://zuul.opendev.org/t/openstack/build/f5ea31b26eeb4734bb294db42d5397fd : SUCCESS in 6m 30s\n- swift-multinode-rolling-upgrade https://zuul.opendev.org/t/openstack/build/83164617a4c6497094ea613ba1123861 : SUCCESS in 27m 19s","accounts_in_message":[],"_revision_number":1},{"id":"380140aed070afd061dd01f1d0dd56b2f6307f6a","tag":"autogenerated:gerrit:newPatchSet","author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"date":"2021-08-23 12:12:23.000000000","message":"Uploaded patch set 2.","accounts_in_message":[],"_revision_number":2},{"id":"3cc658454d16815382fc8970f5ef54f5f2e16e0e","tag":"autogenerated:zuul:check","author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"date":"2021-08-23 13:13:15.000000000","message":"Patch Set 2: Verified-1\n\nBuild failed (check pipeline).  For information on how to proceed, see\nhttps://docs.opendev.org/opendev/infra-manual/latest/developers.html#automated-testing\n\n\n- openstack-tox-docs https://zuul.opendev.org/t/openstack/build/b832690811b24e86a8c07b79c8b0be43 : SUCCESS in 10m 38s\n- swift-build-image https://zuul.opendev.org/t/openstack/build/33b1b50c753640bda8791b448dbb590a : FAILURE in 7m 43s (non-voting)\n- swift-build-image-py3 https://zuul.opendev.org/t/openstack/build/bdc1b7ce85cc452fa1916244b93af6fc : FAILURE in 7m 43s (non-voting)\n- swift-tox-py27 https://zuul.opendev.org/t/openstack/build/8801a9254983443db4dc463da72a17bd : FAILURE in 18m 53s\n- swift-tox-py36 https://zuul.opendev.org/t/openstack/build/4a1e6bb7b12b440ab9573c87a0d75098 : FAILURE in 17m 32s\n- swift-tox-py37 https://zuul.opendev.org/t/openstack/build/252feac0116340859169478d5d7d84a0 : FAILURE in 18m 39s\n- swift-tox-py38 https://zuul.opendev.org/t/openstack/build/9457a0a92a55421e9ea099abd5c3d250 : FAILURE in 16m 54s\n- swift-tox-py39 https://zuul.opendev.org/t/openstack/build/2820cd9a125b48f9bd49734f23bd5d11 : FAILURE in 18m 04s\n- swift-tox-func-py27 https://zuul.opendev.org/t/openstack/build/d93c9fe7fe8643da91932332e5a384c3 : SUCCESS in 21m 58s\n- swift-tox-func-encryption-py27 https://zuul.opendev.org/t/openstack/build/1604b6addfe948828501338dea0e6c91 : SUCCESS in 22m 22s\n- swift-tox-func-ec-py27 https://zuul.opendev.org/t/openstack/build/243e8f9e81c14fabb3886cb929f1943e : SUCCESS in 17m 50s\n- swift-tox-func-py38 https://zuul.opendev.org/t/openstack/build/7666affd01fb45bda665c0604132ae04 : SUCCESS in 16m 27s\n- swift-tox-func-encryption-py38 https://zuul.opendev.org/t/openstack/build/fba3db5a65d94c22a74ef8745faea15c : SUCCESS in 21m 32s\n- swift-tox-func-ec-py38 https://zuul.opendev.org/t/openstack/build/f44ebef35c8d43758df0c74166c3a1f2 : SUCCESS in 19m 04s\n- swift-dsvm-functional https://zuul.opendev.org/t/openstack/build/230434bfc9af4a8fb8fe98a73ea66efd : SUCCESS in 39m 32s\n- swift-dsvm-functional-ipv6 https://zuul.opendev.org/t/openstack/build/0bcac4835d454576a163406ec861e8ba : SUCCESS in 43m 50s\n- swift-tox-lower-constraints https://zuul.opendev.org/t/openstack/build/d6cb7aeb82e34718accea993e427ac6a : FAILURE in 17m 39s\n- openstack-tox-pep8 https://zuul.opendev.org/t/openstack/build/f70ec6536fa4473e92c94294ddf14403 : SUCCESS in 7m 22s\n- swift-multinode-rolling-upgrade https://zuul.opendev.org/t/openstack/build/0c930791fd01437c8a5189bb03665b29 : SUCCESS in 31m 30s","accounts_in_message":[],"_revision_number":2},{"id":"77c948d1e05db5ca1d71561c26fa00f2f032e5c2","tag":"autogenerated:zuul:check-arm64","author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"date":"2021-08-23 14:34:00.000000000","message":"Patch Set 2:\n\nBuild succeeded (ARM64 pipeline).\n\n- swift-tox-py38-arm64 https://zuul.opendev.org/t/openstack/build/2ce60f9f05d042bb973e33cf461d77a9 : FAILURE in 37m 28s (non-voting)\n- swift-tox-py39-arm64 https://zuul.opendev.org/t/openstack/build/6c4b85ac4e474dbc991b265a56d7815b : FAILURE in 39m 52s (non-voting)\n- swift-probetests-centos-8-arm64 https://zuul.opendev.org/t/openstack/build/08029c98e7b04da4a316c53f18b44757 : TIMED_OUT in 2h 01m 51s (non-voting)\n- swift-tox-func-encryption-py38-arm64 https://zuul.opendev.org/t/openstack/build/82bc376ddaa242d2a5650ae86f5db761 : SUCCESS in 1h 15m 21s (non-voting)\n- swift-tox-func-py38-arm64 https://zuul.opendev.org/t/openstack/build/37c0341fdce74a5297025e257db4811d : SUCCESS in 1h 05m 17s (non-voting)","accounts_in_message":[],"_revision_number":2},{"id":"6c2ecd0d974844637e20b13f6f0d0adf6ea42140","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-26 03:10:46.000000000","message":"Patch Set 2:\n\nTo be honest, maybe I\u0027m just not seeing it, but I think the problem isn\u0027t the shards.. maybe I\u0027m too close to the code, but they\u0027re, in my opinion, doing the correct thing. We should always pick the lastest PUT not lastest recreated (that isn\u0027t always the latest).\n\nSo I think we need to patch to correct the reconciler behaviour. if there isn\u0027t a recreated container or isn\u0027t a container that is newer put and another recreated then till pick the latest. So the answer should be fixing it\u0027s bad behaviour not making the shards _also_ incorrectly behave.","accounts_in_message":[],"_revision_number":2},{"id":"0f6f5398d04342342f374f0092aa310c8ce4308f","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-26 03:15:12.000000000","message":"Patch Set 2:\n\n(1 comment)","accounts_in_message":[],"_revision_number":2},{"id":"bae12347f51adb098a454bd00ae2770971338f69","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-26 03:35:16.000000000","message":"Patch Set 2:\n\nI\u0027m guessing here, but is the assumption of `cmp_policy_info` is that if we have an info that has been recreated, one that hasn\u0027t and they don\u0027t agree on the SPI. Then the one that hasn\u0027t _MUST_ be same SPI as the recreated one USED to the have before it was recreated.. and that\u0027s why recreated should be _Always_ prioritised?\n\nBecause I\u0027m happy to say.. sure.. if we\u0027re not taking timestamps into account. But we do. So I argue that check might actaully be reduntant. I\u0027m happy for the recreated one to win, if it is infact PUT later then the other info.\n\nSmells a little bit like a bad assumption from the reconciler, either that or I\u0027m having trouble seeing the logic.\n\nI mean even if we look at, pick the info that isn\u0027t deleted. is that still correct. If the one that\u0027s deleted is _newer_ then wont it replicate the delete to the other and then a policy migration is a waste of time? or maybe the other not deleted is infact deleted with objects.. is that why it should win? I think the more I look at the reconciler code I\u0027m getting more confused with the logic... I\u0027ve been staring at it too long I think, maybe I\u0027m getting code blind :P Will go take a break and come back and look at it again later.","accounts_in_message":[],"_revision_number":2},{"id":"18cfc19e1637cbbc76aaef52e5a1e09efc7a1e00","author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"date":"2021-08-26 09:06:47.000000000","message":"Patch Set 2:\n\n(1 comment)\n\n\u003e Patch Set 2:\n\u003e \n\u003e I\u0027m guessing here, but is the assumption of `cmp_policy_info` is that if we have an info that has been recreated, one that hasn\u0027t and they don\u0027t agree on the SPI. Then the one that hasn\u0027t _MUST_ be same SPI as the recreated one USED to the have before it was recreated.. and that\u0027s why recreated should be _Always_ prioritised?\n\u003e \n\u003e Because I\u0027m happy to say.. sure.. if we\u0027re not taking timestamps into account. But we do. So I argue that check might actaully be reduntant. I\u0027m happy for the recreated one to win, if it is infact PUT later then the other info.\n\u003e \n\u003e Smells a little bit like a bad assumption from the reconciler, either that or I\u0027m having trouble seeing the logic.\n\u003e \n\u003e I mean even if we look at, pick the info that isn\u0027t deleted. is that still correct. If the one that\u0027s deleted is _newer_ then wont it replicate the delete to the other and then a policy migration is a waste of time? or maybe the other not deleted is infact deleted with objects.. is that why it should win? I think the more I look at the reconciler code I\u0027m getting more confused with the logic... I\u0027ve been staring at it too long I think, maybe I\u0027m getting code blind :P Will go take a break and come back and look at it again later.","accounts_in_message":[],"_revision_number":2},{"id":"1971df41dfcca994a0f3b449b144784ae005671d","author":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"date":"2021-08-26 09:15:02.000000000","message":"Patch Set 2:\n\nIf we consider how we could end up with a created at t3 that is newer than a recreated (created at t0, deleted at t1, recreated at t2), then that must imply a split brain where the created at t3 was unaware of any of t0, t1, t2 events. So the split-brain allowed created at t3 when it should not in theory have been allowed.\n\nI guess the thinking was that therefore the recreated event at t1/t2 should win over the bogus created at t3???\n\nI\u0027m trying to boil this down to a \u0027principle\u0027! Elsewhere the principle is that newest wins, but actually there is also a sub-text to that which is that replaying a sequence of global events *on a single node* should always result in the same final outcome on every node. In this case, replaying the events at t0, t1, t2, t3 on a single node would result in a 400 at t3 and the container would remain recreated at t1/t2. So that should be the outcome of replication.\n\nI think an etherpad, and eventually a doc, on this topic would be very helpful to future us.","accounts_in_message":[],"_revision_number":2},{"id":"afb324fce77a78e5bd46ca5967aa9dcc22d85b62","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-28 06:35:13.000000000","message":"Patch Set 2:\n\n(1 comment)\n\n\u003e Patch Set 2:\n\u003e \n\u003e If we consider how we could end up with a created at t3 that is newer than a recreated (created at t0, deleted at t1, recreated at t2), then that must imply a split brain where the created at t3 was unaware of any of t0, t1, t2 events. So the split-brain allowed created at t3 when it should not in theory have been allowed.\n\u003e \n\u003e I guess the thinking was that therefore the recreated event at t1/t2 should win over the bogus created at t3???\n\u003e \n\u003e I\u0027m trying to boil this down to a \u0027principle\u0027! Elsewhere the principle is that newest wins, but actually there is also a sub-text to that which is that replaying a sequence of global events *on a single node* should always result in the same final outcome on every node. In this case, replaying the events at t0, t1, t2, t3 on a single node would result in a 400 at t3 and the container would remain recreated at t1/t2. So that should be the outcome of replication.\n\u003e \n\u003e I think an etherpad, and eventually a doc, on this topic would be very helpful to future us.\n\nAhh, I think it\u0027s finally thinking in what your saying here. Totally agree on the \u0027axiom\u0027! To play devil\u0027s advocate for a second though... if how can we consider the SPI different at t3. I means if in a 3 replica system 2 said t1/t2 recreated and the other said t3 new PUT.. yeah ok. But the fact that the t3 was allowed, means it must have gotten quorum somehow (can we use write affinity on containers maybe?), how do we know it isn\u0027t actually the correct behaviour.\n\nIe how do we know that T1/T2 recreated was deleted and reclaimed everywhere before T3 happened execept on a split brain or downed node. In that case T3 is totally valid and actaully the correct answer.. but yes I guess that fails your axiom.. if simply replayed.\n\nI guess on Monday I\u0027ll try and write some tests to test out the behviour. Although we know that currenlty the reconciler will always choose the recreated one with the current code.","accounts_in_message":[],"_revision_number":2},{"id":"86e0bdd117a5ff44857e73723b5026788fb36018","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-29 23:31:29.000000000","message":"Patch Set 2:\n\nHave a day to sleep and think on it. I do like the idea of what would happen if we re-ran the requests (like a journal) on a single node (ie the axiom)\n\nSo a PUT with a different SPI on an older recreated container with a different SPI would fail. This suddenly makes the cmp_policy_info make alot more sense. It first checks for one of them being deleted, because if one is, then replaying requests means the non deleted should be allowed, next looking at a recreated, and if one is it \"should\" win as this is another allowed change in SPI.\nBut in both these cases it doesn\u0027t really take timestamps into account. So we could imagine edgecases where the comparison and response isn\u0027t correct (but let\u0027s ingore that for now). Because the final check is the \"ok both there so let\u0027s check timestamps\", but this last check totally fails the axiom because a PUT of a different SPI isn\u0027t allowed if the container already exists.\n\nThe fact that this last check exists at all is what made me wonder if we should just do a timestamp check.. but this breaks the axiom (although it already does). But if we should always replicate to what could be replayed on a single node.. then what do we do. Remove the last check? Or do we allow PUTs to a new policy? (though that could cause alot of churn so sounds scary)","accounts_in_message":[],"_revision_number":2},{"id":"c20793e7fd2535c77e4bf9d0145ad1989c88e6f3","author":{"_account_id":7233,"name":"Matthew Oliver","email":"matt@oliver.net.au","username":"mattoliverau"},"date":"2021-08-31 06:40:51.000000000","message":"Patch Set 2:\n\nHad a chat to Al about this yesterday. Been thinking about it.\n\nCould we just simplify the root \u003c--\u003e shard logic when it comes to SPI. Shards are \"extensions\" for a root. They\u0027re a fragment of a root container\u0027s metadata that will keep in sync with other replicas of that same fragment. Because they come and go (as things shrink or cleave again) I\u0027m not sure how well locking the root put and delete TS to the shards would really play out.\n\nBut can we go the other way.. we already have landed a patch that in essence makes the shards storage for whatever objects the root puts in at whatever policy the root supplies. The question then is how would be enqueue the wrong objects to the reconciler. Can we just simplify the whole thing. A shard\u0027s SPI is flexable, it\u0027ll update it\u0027s SPI (I guess from the sharder) with whatever the root\u0027s it is talking to has. Then enqueue will happen during the shard/fragment replication (as now).\nThere is a possility of some flapping if shards SPIs are out of sync.. but they should all colalease in the end.. and if anything in the dicussions where, we\u0027ve shown how out of character a SPI change really is and how ofter would this happen? If we end up enqueue a bunch of objects (incorrectly) and then flap back then the reconciler will just noop them in the queue right?\n\nThis would be similar to what we had back in the bottom of this patchset, but rather then tracking the lastest, remove that and just change the SPI?\n\nJust a random solution.","accounts_in_message":[],"_revision_number":2}],"current_revision_number":2,"current_revision":"60e3767a0b2b77a05ea0976982cc7dd2342b450d","revisions":{"f13f29f03c76dfec93b93d0c2756feb072b4dd94":{"kind":"REWORK","_number":1,"created":"2021-08-23 09:24:05.000000000","uploader":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"ref":"refs/changes/07/805607/1","fetch":{"anonymous http":{"url":"https://review.opendev.org/openstack/swift","ref":"refs/changes/07/805607/1","commands":{"Checkout":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/1 \u0026\u0026 git checkout FETCH_HEAD","Cherry Pick":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/1 \u0026\u0026 git cherry-pick FETCH_HEAD","Format Patch":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/1 \u0026\u0026 git format-patch -1 --stdout FETCH_HEAD","Pull":"git pull https://review.opendev.org/openstack/swift refs/changes/07/805607/1"}}},"commit":{"parents":[{"commit":"7ba8915fbcd636bfebbd3ad73f8c558a52ac3eb3","subject":"use reconciler stuff to fix shards","web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/7ba8915fbcd636bfebbd3ad73f8c558a52ac3eb3"}]}],"author":{"name":"Alistair Coles","email":"alistairncoles@gmail.com","date":"2021-08-23 09:13:38.000000000","tz":60},"committer":{"name":"Alistair Coles","email":"alistairncoles@gmail.com","date":"2021-08-23 09:23:25.000000000","tz":60},"subject":"DNM: shard policy migration: it\u0027s still not great","message":"DNM: shard policy migration: it\u0027s still not great\n\nAssuming shard status_changed_at is newer than root then shard policy\nwill be udpated indiscriminately to whatever a root sends it, and so\nmay well bounce around until the roots get in sync.\n\nChange-Id: Id6541d0deabb9f7c3cfd7523833b77218448c990\n","web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/f13f29f03c76dfec93b93d0c2756feb072b4dd94"}],"resolve_conflicts_web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/f13f29f03c76dfec93b93d0c2756feb072b4dd94"}]},"branch":"refs/heads/master"},"60e3767a0b2b77a05ea0976982cc7dd2342b450d":{"kind":"REWORK","_number":2,"created":"2021-08-23 12:12:23.000000000","uploader":{"_account_id":7847,"name":"Alistair Coles","email":"alistairncoles@gmail.com","username":"acoles"},"ref":"refs/changes/07/805607/2","fetch":{"anonymous http":{"url":"https://review.opendev.org/openstack/swift","ref":"refs/changes/07/805607/2","commands":{"Checkout":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/2 \u0026\u0026 git checkout FETCH_HEAD","Cherry Pick":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/2 \u0026\u0026 git cherry-pick FETCH_HEAD","Format Patch":"git fetch https://review.opendev.org/openstack/swift refs/changes/07/805607/2 \u0026\u0026 git format-patch -1 --stdout FETCH_HEAD","Pull":"git pull https://review.opendev.org/openstack/swift refs/changes/07/805607/2"}}},"commit":{"parents":[{"commit":"7ba8915fbcd636bfebbd3ad73f8c558a52ac3eb3","subject":"use reconciler stuff to fix shards","web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/7ba8915fbcd636bfebbd3ad73f8c558a52ac3eb3"}]}],"author":{"name":"Alistair Coles","email":"alistairncoles@gmail.com","date":"2021-08-23 09:13:38.000000000","tz":60},"committer":{"name":"Alistair Coles","email":"alistairncoles@gmail.com","date":"2021-08-23 12:12:14.000000000","tz":60},"subject":"DNM: shard policy migration: it\u0027s still not great","message":"DNM: shard policy migration: it\u0027s still not great\n\nAssuming shard status_changed_at is newer than root then shard policy\nwill be udpated indiscriminately to whatever a root sends it, and so\nmay well bounce around until the roots get in sync.\n\nChange-Id: Id6541d0deabb9f7c3cfd7523833b77218448c990\n","web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/60e3767a0b2b77a05ea0976982cc7dd2342b450d"}],"resolve_conflicts_web_links":[{"name":"gitea","tooltip":"Open in GitWeb","url":"https://opendev.org/openstack/swift/commit/60e3767a0b2b77a05ea0976982cc7dd2342b450d"}]},"branch":"refs/heads/master"}},"requirements":[],"submit_records":[{"rule_name":"gerrit~DefaultSubmitRule","status":"OK","labels":[{"label":"Verified","status":"MAY","applied_by":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]}},{"label":"Code-Review","status":"MAY"},{"label":"Workflow","status":"MAY"}]}],"submit_requirements":[{"name":"Verified","description":"Verified in gate by CI","status":"UNSATISFIED","is_legacy":false,"submittability_expression_result":{"expression":"label:Verified\u003dMAX AND -label:Verified\u003dMIN","fulfilled":false,"status":"FAIL","passing_atoms":[],"failing_atoms":["label:Verified\u003dMAX","label:Verified\u003dMIN"],"atom_explanations":{"label:Verified\u003dMAX":"","label:Verified\u003dMIN":""}}},{"name":"Code-Review","description":"Code reviewed by core reviewer","status":"UNSATISFIED","is_legacy":false,"submittability_expression_result":{"expression":"label:Code-Review\u003dMAX AND -label:Code-Review\u003dMIN","fulfilled":false,"status":"FAIL","passing_atoms":[],"failing_atoms":["label:Code-Review\u003dMAX","label:Code-Review\u003dMIN"],"atom_explanations":{"label:Code-Review\u003dMAX":"","label:Code-Review\u003dMIN":""}}},{"name":"Workflow","description":"Approved for gate by core reviewer","status":"UNSATISFIED","is_legacy":false,"submittability_expression_result":{"expression":"label:Workflow\u003dMAX AND -label:Workflow\u003dMIN","fulfilled":false,"status":"FAIL","passing_atoms":[],"failing_atoms":["label:Workflow\u003dMAX","label:Workflow\u003dMIN"],"atom_explanations":{"label:Workflow\u003dMAX":"","label:Workflow\u003dMIN":""}}}]}
