)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"22f266261705d83a95770b14c05917551bed915c","unresolved":true,"context_lines":[{"line_number":27,"context_line":"related child rows together in a single database transaction. Doing"},{"line_number":28,"context_line":"this will keep the database query packet sizes within limits while"},{"line_number":29,"context_line":"allowing use of higher values for max_rows."},{"line_number":30,"context_line":""},{"line_number":31,"context_line":"Change-Id: I2209bf1b3320901cf603ec39163cf923b25b0359"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"3b139c1f_8fd51485","line":30,"updated":"2023-03-10 02:15:46.000000000","message":"Should this be a bug? Or a blueprint? Or neither? I wasn\u0027t sure.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"a2a873d127387ed5474114bbf28269501871ed0b","unresolved":true,"context_lines":[{"line_number":27,"context_line":"related child rows together in a single database transaction. Doing"},{"line_number":28,"context_line":"this will keep the database query packet sizes within limits while"},{"line_number":29,"context_line":"allowing use of higher values for max_rows."},{"line_number":30,"context_line":""},{"line_number":31,"context_line":"Change-Id: I2209bf1b3320901cf603ec39163cf923b25b0359"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"ff543ca4_19ebde99","line":30,"in_reply_to":"3b139c1f_8fd51485","updated":"2023-06-15 10:38:51.000000000","message":"i would say this is a perfromace/scalablity bug. it could be seen as a specless blueprint but since this issue can result in a gradual degerdation of the system performance over time i think its more on the bug size the a opetimisation feature.\n\nso i would be ok with this being backported if we saw an need upstream as a bug.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":27,"context_line":"related child rows together in a single database transaction. Doing"},{"line_number":28,"context_line":"this will keep the database query packet sizes within limits while"},{"line_number":29,"context_line":"allowing use of higher values for max_rows."},{"line_number":30,"context_line":""},{"line_number":31,"context_line":"Change-Id: I2209bf1b3320901cf603ec39163cf923b25b0359"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"367e7f16_a1f1bb34","line":30,"in_reply_to":"b37c2585_13f6f50e","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"78d946759d21d5c45ad824ab6006dbc5fe5256b2","unresolved":true,"context_lines":[{"line_number":27,"context_line":"related child rows together in a single database transaction. Doing"},{"line_number":28,"context_line":"this will keep the database query packet sizes within limits while"},{"line_number":29,"context_line":"allowing use of higher values for max_rows."},{"line_number":30,"context_line":""},{"line_number":31,"context_line":"Change-Id: I2209bf1b3320901cf603ec39163cf923b25b0359"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"b37c2585_13f6f50e","line":30,"in_reply_to":"ff543ca4_19ebde99","updated":"2023-06-15 18:32:22.000000000","message":"That\u0027s a good point on the gradual degradation of performance over time and it makes it more clear that the issue is more a bug than an enhancement.\n\nI\u0027ll open a bug and add a release note.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"a2a873d127387ed5474114bbf28269501871ed0b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"7484950d_612f036f","updated":"2023-06-15 10:38:51.000000000","message":"if you file a bug for this it would be nice to also add a release note to let people know about this change in behavior as it should allow them to increase the max rows they use.\n\nalthough personally i would tend to prefer only limiting by age.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"d29812fe3d42282ed16666ed9457b57954789ff1","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"f39206f4_d5c8308e","updated":"2023-03-15 01:13:51.000000000","message":"recheck KeyError: AsyncDeviceEventsHandler.Waiter(instance_uuid\u003d3e97a63d-18cc-4248-9cc7-404f8b0143b5, device_name\u003dnet1, event_types\u003d{\u003cclass \u0027nova.virt.libvirt.event.DeviceRemovedEvent\u0027\u003e, \u003cclass \u0027nova.virt.libvirt.event.DeviceRemovalFailedEvent\u0027\u003e})\n\nTraceback (most recent call last):\n  File \"/opt/stack/tempest/tempest/api/compute/servers/test_attach_interfaces.py\", line 345, in test_reassign_port_between_servers\n    self.wait_for_port_detach(port_id)\n  File \"/opt/stack/tempest/tempest/api/compute/servers/test_attach_interfaces.py\", line 127, in wait_for_port_detach\n    raise lib_exc.TimeoutException(message)\ntempest.lib.exceptions.TimeoutException: Request timed out\nDetails: Port e07c8c4b-10ca-49e2-b447-d9a797ec4b37 failed to detach (device_id 3e97a63d-18cc-4248-9cc7-404f8b0143b5) within the required time (196 s).","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"bbaba9a2a66db3b451f5e0a3c512fd3c5bbf1e3d","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":1,"id":"cb937cd6_d892cb51","in_reply_to":"48bd69c0_0b0ac243","updated":"2023-06-15 21:29:46.000000000","message":"i would prefer to replace them with https://github.com/ovh/osarchiver\ni understand why the orginially existed but everntully i think we shoudl get out of hte busiess of doing this in nova and leave archival and audit logging to external tools.\n\ni know we have discuss cascade and i think but imn not certen that the issue was with either unique constratis or forign keys and the fact that the shadow tabels dont fully align with the normal ones but i dont quite remember\n\nyour are right tha thtere was a problem with cascade at one point but it probaly could be made work.\n\n\nin anycae we ideally want a resolution that will work for master and older branches i think for your current issue.\n\nbut eventully i would like to adress this in a non backporable way by removing the shadow tables and having external archiving if others agreed with that direction.\n\npersonally i find the soft deleted rows useful but i have never had to use the showdow tables because if we we got that far its already too late to use them to fix things.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"78d946759d21d5c45ad824ab6006dbc5fe5256b2","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"8950f199_0a094143","in_reply_to":"7484950d_612f036f","updated":"2023-06-15 18:32:22.000000000","message":"Good call on the release note, I will add one.\n\nI think the \"catch\" even with limiting only by age is that max_rows defaults to 1000 regardless. So if you have an environment with a very large amount of deleted rows, max_rows\u003d1000 might be too large and you would have to figure out to pass a lower --max_rows, which isn\u0027t great 🫤","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"25d09be6aef8127528a88dbf5f1cee9b60179a02","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":1,"id":"48bd69c0_0b0ac243","in_reply_to":"7e4865f7_a593a926","updated":"2023-06-15 21:01:42.000000000","message":"Right, but what I mean is if we do one transaction per instance, then it would be limiting the query size in a way that the operator couldn\u0027t increase it if they wanted to. It would be an unchangeable (small) query size.\n\nI agree cascade would be nice but I thought that in the past (albeit years ago) there was some reason why cascade would just literally not work. Like someone tried it and there was some error or blocker, and a larger scale refactoring would be needed to make it work. Maybe my memory is wrong but if it\u0027s not, I or someone has to address whatever is preventing cascade from being able to work before we can do it. Either way I think it\u0027s definitely worth investigating because if it will work, like you said it would be way easier.\n\nI hesitate on removing shadow tables entirely because of the usefulness they have sometimes had when doing forensic debugging of customer deployments.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"aacb4801941254d1cd182fb314a837b90a7c3ac1","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"ac326105_bc303e90","in_reply_to":"8950f199_0a094143","updated":"2023-06-15 19:55:16.000000000","message":"\u003e I think the \"catch\" even with limiting only by age is that max_rows defaults to 1000 regardless. So if you have an environment with a very large amount of deleted rows, max_rows\u003d1000 might be too large and you would have to figure out to pass a lower --max_rows, which isn\u0027t great 🫤\n\nWriting that makes me second guess how I\u0027m doing this.. because executing archival of one \"tree\" of rows at a time means that max_rows is no longer letting the user control the number of rows in the delete+insert database transaction (which was its original purpose).\n\nSo really I think it probably needs to do something more like execute archival of multiple \"trees\" such that the total number of rows archived across trees in a transaction is max_rows. That way the operator is still able to control the size of the database transaction.\n\nI thought of it before but I was trying to avoid adding more complexity to an already complex process ... Now I\u0027m back to being wary about taking away the ability to control query size with max_rows.\n\nI\u0027m going to give it a try and see how it looks.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"57750ff4c597766e73d6a89510090123e2da7b77","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":1,"id":"7e4865f7_a593a926","in_reply_to":"ac326105_bc303e90","updated":"2023-06-15 20:39:12.000000000","message":"so the reason to contol query size is to prevent dos against the db by the preiodic\ntriging a large operation.\n\nif we are doing one transaction per instance i think that cant happen and really we dont need max rows as much as a max instnaces. if you wanted to limit the over all impact.\n\n\nof course we could jsut make our lieve eisher and use cascade or remove showdow tables entirely...","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"178f7aa6c29778a7e70ae4a162a2ec1e8358cca2","unresolved":true,"context_lines":[],"source_content_type":"","patch_set":1,"id":"5742d71e_2f44b72b","in_reply_to":"cb937cd6_d892cb51","updated":"2023-06-16 06:42:20.000000000","message":"I agree it would be best for us to develop a better way to handle database cleaning in general. But you are right, at the moment I\u0027m trying to fix an immediate problem affecting large scale deployments, which will ideally be backported. Properly addressing database cleaning is something I think we can work on next.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c8f36c62dda5eb9d1f96732feb78bdd253daad98","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"8fcad38f_c7a451d1","updated":"2023-06-16 12:28:25.000000000","message":"+1 since directionally this is fine\nsome nits inline","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"178f7aa6c29778a7e70ae4a162a2ec1e8358cca2","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"5d81c097_db5d872c","updated":"2023-06-16 06:42:20.000000000","message":"Uploading what I have currently for using max_rows to control the approximate size of a single database transaction, so that operators can still tune that based on their environments.","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"4be48b9ab6dfec3d993217f5ba68f1e3b1589079","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"22575452_1af83717","updated":"2023-06-20 11:23:19.000000000","message":"(Also, avoiding recheck per melwitt\u0027s comments)","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"577d5fd6af945a5f7ff503e76eda480856821835","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"2c979229_1455327f","updated":"2023-06-20 11:22:54.000000000","message":"Nice work. This seems an eminently sensible way to approach this issue.","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"3f12164e98545c1ea63ee91dfb8c97c18ac785ee","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"6b04af2e_8ded9db5","updated":"2023-06-17 00:43:02.000000000","message":"Same exact failures in nova-live-migration and nova-grenade-multinode bug 1940425 and bug 2002782. Not going to recheck anymore for now.","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"c4fb2658466db9c8f34f7d57baf7af8a00098657","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":3,"id":"a0ce3abd_8c8a241f","updated":"2023-06-16 21:58:26.000000000","message":"recheck bug 1940425 and bug 2002782","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"d1d7fbc25518ad565b1d09384ad77f010b4c98e0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"f8cf6224_b6c5945d","updated":"2023-06-21 17:34:29.000000000","message":"Still good","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"1eec9c10c64056d688f6dd1f3dfaa88c0ef7552a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"65fb0c7c_a96634aa","updated":"2023-06-26 10:03:47.000000000","message":"recheck another unrelated failure of ceph-multistore job","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"8ee6c5c07e6de51539faafde21b3d1bd47da7fff","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"0b814c94_730c8add","updated":"2023-06-22 22:31:11.000000000","message":"recheck bug 2024859","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"7ece4e8bd493d159a91e77cc944ab155a30246db","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"d1baa969_5cd6ed2b","updated":"2023-06-27 04:30:59.000000000","message":"recheck ceph job workaround has been merged so it should pass now","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"d55d17699e4f49ed660451c1378c35474c6bdfd4","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"daaa6680_5fc24e9f","updated":"2023-06-22 01:42:35.000000000","message":"recheck https://review.opendev.org/c/openstack/tempest/+/886496 has merged","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"0ea1b8b879c1aacd5abdc645a82ec6d3cdf6e2b2","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"fb24803a_d8cd8ca3","updated":"2023-06-22 17:50:22.000000000","message":"recheck nova-multi-cell tempest.lib.exceptions.TimeoutException: Request timed out\nDetails: Command: \u0027set -eu -o pipefail; PATH\u003d$PATH:/sbin:/usr/sbin; sudo mke2fs -t ext4 /dev/vdb\u0027 executed on host \u0027172.24.5.133\u0027.\n\nand\n\nnova-ceph-multistore Jun 22 15:00:27.890918 np0034404705 nova-compute[98621]: ERROR oslo_messaging.rpc.server   File \"/usr/lib/python3/dist-packages/libvirt.py\", line 1534, in detachDeviceFlags\nJun 22 15:00:27.890918 np0034404705 nova-compute[98621]: ERROR oslo_messaging.rpc.server     raise libvirtError(\u0027virDomainDetachDeviceFlags() failed\u0027)\nJun 22 15:00:27.890918 np0034404705 nova-compute[98621]: ERROR oslo_messaging.rpc.server libvirt.libvirtError: Unable to read from monitor: Connection reset by peer\n\nand\n\ntempest-ipv6-only Jun 22 15:24:18.778762 np0034404706 nova-compute[82742]: ERROR oslo_messaging.rpc.server   File \"/usr/lib/python3/dist-packages/libvirt.py\", line 1534, in detachDeviceFlags\nJun 22 15:24:18.778762 np0034404706 nova-compute[82742]: ERROR oslo_messaging.rpc.server     raise libvirtError(\u0027virDomainDetachDeviceFlags() failed\u0027)\nJun 22 15:24:18.778762 np0034404706 nova-compute[82742]: ERROR oslo_messaging.rpc.server libvirt.libvirtError: internal error: End of file from qemu monitor (vm\u003d\u0027instance-00000019\u0027)","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"be8fcd712c9f293c27d9a4db757db9fa89ae810f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"48441de2_43ada938","updated":"2023-06-28 15:10:28.000000000","message":"recheck openstack-tox-py39 POST_FAILURE\n\n```\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now (man-in-the-middle attack)!\nIt is also possible that a host key has just been changed.\nThe fingerprint for the ECDSA key sent by the remote host is\nSHA256:ejGCTXnt8bvh3zXmik3Yy4ZGigR5dEW7M1aMebG9Dvs.\nPlease contact your system administrator.\nAdd correct host key in /var/lib/zuul/builds/3e2ccc5f23a14481bb64f20f581cf40f/work/.ssh/known_hosts to get rid of this message.\nOffending RSA key in /var/lib/zuul/builds/3e2ccc5f23a14481bb64f20f581cf40f/work/.ssh/known_hosts:5\n  remove with:\n  ssh-keygen -f \"/var/lib/zuul/builds/3e2ccc5f23a14481bb64f20f581cf40f/work/.ssh/known_hosts\" -R \"172.99.67.80\"\nECDSA host key for 172.99.67.80 has changed and you have requested strict checking.\nHost key verification failed.\nrsync: connection unexpectedly closed (0 bytes received so far) [Receiver]\nrsync error: unexplained error (code 255) at io.c(228) [Receiver\u003d3.2.3]\n```","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"b460f323cc74cb4503882831a6adf5775b7b889f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"1eec2954_7739677e","updated":"2023-06-28 04:24:58.000000000","message":"recheck openstacksdk-functional-devstack TIMED_OUT","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"0c0ac9b9cabf85ebadcb288dc984e0504d9767e5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"fe55a96f_49a8668c","updated":"2023-06-23 01:05:32.000000000","message":"recheck ssh timeout\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n  File \"/opt/stack/tempest/tempest/common/utils/__init__.py\", line 70, in wrapper\n    return f(*func_args, **func_kwargs)\n  File \"/opt/stack/tempest/tempest/api/compute/servers/test_server_actions.py\", line 927, in test_rebuild_volume_backed_server\n    linux_client.validate_authentication()\n  File \"/opt/stack/tempest/tempest/lib/common/utils/linux/remote_client.py\", line 31, in wrapper\n    return function(self, *args, **kwargs)\n  File \"/opt/stack/tempest/tempest/lib/common/utils/linux/remote_client.py\", line 123, in validate_authentication\n    self.ssh_client.test_connection_auth()\n  File \"/opt/stack/tempest/tempest/lib/common/ssh.py\", line 245, in test_connection_auth\n    connection \u003d self._get_ssh_connection()\n  File \"/opt/stack/tempest/tempest/lib/common/ssh.py\", line 155, in _get_ssh_connection\n    raise exceptions.SSHTimeout(host\u003dself.host,\ntempest.lib.exceptions.SSHTimeout: Connection to the 172.24.5.211 via SSH timed out.\nUser: cirros, Password: rebuildPassw0rd","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"a6dcd337c872d40b5ddadf46af3589a118c91837","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"71006743_93a72737","updated":"2023-06-23 03:17:50.000000000","message":"recheck ssh timeout\n\n```\nTraceback (most recent call last):\n  File \"/opt/stack/tempest/tempest/common/utils/__init__.py\", line 70, in wrapper\n    return f(*func_args, **func_kwargs)\n  File \"/opt/stack/tempest/tempest/api/compute/servers/test_server_actions.py\", line 927, in test_rebuild_volume_backed_server\n    linux_client.validate_authentication()\n  File \"/opt/stack/tempest/tempest/lib/common/utils/linux/remote_client.py\", line 31, in wrapper\n    return function(self, *args, **kwargs)\n  File \"/opt/stack/tempest/tempest/lib/common/utils/linux/remote_client.py\", line 123, in validate_authentication\n    self.ssh_client.test_connection_auth()\n  File \"/opt/stack/tempest/tempest/lib/common/ssh.py\", line 245, in test_connection_auth\n    connection \u003d self._get_ssh_connection()\n  File \"/opt/stack/tempest/tempest/lib/common/ssh.py\", line 155, in _get_ssh_connection\n    raise exceptions.SSHTimeout(host\u003dself.host,\ntempest.lib.exceptions.SSHTimeout: Connection to the 172.24.5.205 via SSH timed out.\nUser: cirros, Password: rebuildPassw0rd\n```","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"25a1ec286ae5ecef570bc14b7df782f9e0b0a773","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"0178a708_b066d71c","updated":"2023-07-02 03:40:44.000000000","message":"recheck tempest-integrated-compute TIMED_OUT","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"5c5198069f121301bbd472ad35aa6f9aaa8197cf","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"4b4a8c81_b5099830","updated":"2023-06-27 23:28:26.000000000","message":"recheck tempest-integrated-compute TIMED_OUT","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"},{"author":{"_account_id":7634,"name":"Takashi Natsume","email":"takanattie@gmail.com","username":"natsumet"},"change_message_id":"25da640b997f7efea2901fb6b30ed122a5d9eb85","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"b7232821_1a29385c","updated":"2023-06-25 12:15:01.000000000","message":"recheck timeout","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"}],"nova/cmd/manage.py":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c8f36c62dda5eb9d1f96732feb78bdd253daad98","unresolved":true,"context_lines":[{"line_number":228,"context_line":"        \"\"\"Print the current database version.\"\"\""},{"line_number":229,"context_line":"        print(migration.db_version())"},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"    @args(\u0027--max_rows\u0027, type\u003dint, metavar\u003d\u0027\u003cnumber\u003e\u0027, dest\u003d\u0027max_rows\u0027,"},{"line_number":232,"context_line":"          help\u003d\u0027Maximum number of deleted rows to archive. Defaults to 1000. \u0027"},{"line_number":233,"context_line":"               \u0027Note that this number does not include the corresponding \u0027"},{"line_number":234,"context_line":"               \u0027rows, if any, that are removed from the API database for \u0027"},{"line_number":235,"context_line":"               \u0027deleted instances.\u0027)"},{"line_number":236,"context_line":"    @args(\u0027--before\u0027, metavar\u003d\u0027\u003cdate\u003e\u0027,"},{"line_number":237,"context_line":"          help\u003d(\u0027Archive rows that have been deleted before this date. \u0027"},{"line_number":238,"context_line":"                \u0027Accepts date strings in the default format output by the \u0027"}],"source_content_type":"text/x-python","patch_set":2,"id":"397bfec9_895ef7e1","line":235,"range":{"start_line":231,"start_character":3,"end_line":235,"end_character":36},"updated":"2023-06-16 12:28:25.000000000","message":"this should be updated to reflect the new behavior","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"08a7167a12e4d81bc3ff27a632b76c3a92a9425c","unresolved":true,"context_lines":[{"line_number":228,"context_line":"        \"\"\"Print the current database version.\"\"\""},{"line_number":229,"context_line":"        print(migration.db_version())"},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"    @args(\u0027--max_rows\u0027, type\u003dint, metavar\u003d\u0027\u003cnumber\u003e\u0027, dest\u003d\u0027max_rows\u0027,"},{"line_number":232,"context_line":"          help\u003d\u0027Maximum number of deleted rows to archive. Defaults to 1000. \u0027"},{"line_number":233,"context_line":"               \u0027Note that this number does not include the corresponding \u0027"},{"line_number":234,"context_line":"               \u0027rows, if any, that are removed from the API database for \u0027"},{"line_number":235,"context_line":"               \u0027deleted instances.\u0027)"},{"line_number":236,"context_line":"    @args(\u0027--before\u0027, metavar\u003d\u0027\u003cdate\u003e\u0027,"},{"line_number":237,"context_line":"          help\u003d(\u0027Archive rows that have been deleted before this date. \u0027"},{"line_number":238,"context_line":"                \u0027Accepts date strings in the default format output by the \u0027"}],"source_content_type":"text/x-python","patch_set":2,"id":"48229a99_e6fa8a5a","line":235,"range":{"start_line":231,"start_character":3,"end_line":235,"end_character":36},"in_reply_to":"397bfec9_895ef7e1","updated":"2023-06-16 17:52:50.000000000","message":"Ack, agree and will update.","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"9eb1e02d54d080c1f44e8ff3d1dbc4d42f43a802","unresolved":false,"context_lines":[{"line_number":228,"context_line":"        \"\"\"Print the current database version.\"\"\""},{"line_number":229,"context_line":"        print(migration.db_version())"},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"    @args(\u0027--max_rows\u0027, type\u003dint, metavar\u003d\u0027\u003cnumber\u003e\u0027, dest\u003d\u0027max_rows\u0027,"},{"line_number":232,"context_line":"          help\u003d\u0027Maximum number of deleted rows to archive. Defaults to 1000. \u0027"},{"line_number":233,"context_line":"               \u0027Note that this number does not include the corresponding \u0027"},{"line_number":234,"context_line":"               \u0027rows, if any, that are removed from the API database for \u0027"},{"line_number":235,"context_line":"               \u0027deleted instances.\u0027)"},{"line_number":236,"context_line":"    @args(\u0027--before\u0027, metavar\u003d\u0027\u003cdate\u003e\u0027,"},{"line_number":237,"context_line":"          help\u003d(\u0027Archive rows that have been deleted before this date. \u0027"},{"line_number":238,"context_line":"                \u0027Accepts date strings in the default format output by the \u0027"}],"source_content_type":"text/x-python","patch_set":2,"id":"c712fbaf_e80afc15","line":235,"range":{"start_line":231,"start_character":3,"end_line":235,"end_character":36},"in_reply_to":"48229a99_e6fa8a5a","updated":"2023-06-16 19:08:41.000000000","message":"Done","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c8f36c62dda5eb9d1f96732feb78bdd253daad98","unresolved":true,"context_lines":[{"line_number":404,"context_line":"             \u0027cell1.instances\u0027: 5}"},{"line_number":405,"context_line":"        :param cctxt: Cell-targeted nova.context.RequestContext if archiving"},{"line_number":406,"context_line":"            across all cells"},{"line_number":407,"context_line":"        :param max_rows: Maximum number of deleted rows to archive"},{"line_number":408,"context_line":"        :param until_complete: Whether to run continuously until all deleted"},{"line_number":409,"context_line":"            rows are archived"},{"line_number":410,"context_line":"        :param verbose: Whether to print how many rows were archived per table"}],"source_content_type":"text/x-python","patch_set":2,"id":"ac2f368a_b7aca68b","line":407,"range":{"start_line":407,"start_character":6,"end_line":407,"end_character":66},"updated":"2023-06-16 12:28:25.000000000","message":"we might want to mention here and more imporantly in the help for the command that it is a soft limit","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":404,"context_line":"             \u0027cell1.instances\u0027: 5}"},{"line_number":405,"context_line":"        :param cctxt: Cell-targeted nova.context.RequestContext if archiving"},{"line_number":406,"context_line":"            across all cells"},{"line_number":407,"context_line":"        :param max_rows: Maximum number of deleted rows to archive"},{"line_number":408,"context_line":"        :param until_complete: Whether to run continuously until all deleted"},{"line_number":409,"context_line":"            rows are archived"},{"line_number":410,"context_line":"        :param verbose: Whether to print how many rows were archived per table"}],"source_content_type":"text/x-python","patch_set":2,"id":"1fdcd944_1852a9a8","line":407,"range":{"start_line":407,"start_character":6,"end_line":407,"end_character":66},"in_reply_to":"6347cd64_4059de69","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"08a7167a12e4d81bc3ff27a632b76c3a92a9425c","unresolved":true,"context_lines":[{"line_number":404,"context_line":"             \u0027cell1.instances\u0027: 5}"},{"line_number":405,"context_line":"        :param cctxt: Cell-targeted nova.context.RequestContext if archiving"},{"line_number":406,"context_line":"            across all cells"},{"line_number":407,"context_line":"        :param max_rows: Maximum number of deleted rows to archive"},{"line_number":408,"context_line":"        :param until_complete: Whether to run continuously until all deleted"},{"line_number":409,"context_line":"            rows are archived"},{"line_number":410,"context_line":"        :param verbose: Whether to print how many rows were archived per table"}],"source_content_type":"text/x-python","patch_set":2,"id":"6347cd64_4059de69","line":407,"range":{"start_line":407,"start_character":6,"end_line":407,"end_character":66},"in_reply_to":"ac2f368a_b7aca68b","updated":"2023-06-16 17:52:50.000000000","message":"I think that\u0027s a good idea. It also is (and always has been, at least as far back as Queens) max rows per table -- not max rows total. A lot of times when I go to work on stuff in this area I forget and have to re-realize that. So I\u0027ll add words to indicate that too.","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d4f96f5dbde5ed4a73553ce1306adb217f9c6b19","unresolved":true,"context_lines":[{"line_number":232,"context_line":"          help\u003d\u0027Maximum number of deleted rows to archive per table. Defaults \u0027"},{"line_number":233,"context_line":"               \u0027to 1000. Note that this number is a soft limit and does not \u0027"},{"line_number":234,"context_line":"               \u0027include the corresponding rows, if any, that are removed \u0027"},{"line_number":235,"context_line":"               \u0027from the API database for deleted instances.\u0027)"},{"line_number":236,"context_line":"    @args(\u0027--before\u0027, metavar\u003d\u0027\u003cdate\u003e\u0027,"},{"line_number":237,"context_line":"          help\u003d(\u0027Archive rows that have been deleted before this date. \u0027"},{"line_number":238,"context_line":"                \u0027Accepts date strings in the default format output by the \u0027"}],"source_content_type":"text/x-python","patch_set":5,"id":"201401bb_a2f34b43","line":235,"updated":"2023-06-22 13:50:35.000000000","message":"+1 thanks for the help text updates.","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"}],"nova/db/main/api.py":[{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bb7a90c5938616a78ed56cb6a2f5e58c3441cb37","unresolved":true,"context_lines":[{"line_number":4425,"context_line":"    #"},{"line_number":4426,"context_line":"    # extras \u003d {tablename: number_of_extra_rows_archived}"},{"line_number":4427,"context_line":"    extras \u003d collections.defaultdict(int)"},{"line_number":4428,"context_line":"    if records:"},{"line_number":4429,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4430,"context_line":"        # for each deleted row, one at a time. We do it this way because in a"},{"line_number":4431,"context_line":"        # large scale database with potentially hundreds of thousands of"}],"source_content_type":"text/x-python","patch_set":1,"id":"d4151ce0_c8d927a6","line":4428,"updated":"2023-06-07 16:08:35.000000000","message":"Note to reviewers: this looks like a large change but it\u0027s mostly because of the change in indentation of the entire code block.\n\nWhat is actually different are two things:\n\n* A loop \"for record in records:\" has been added around the code block\n* In various function calls within the indented code block, the list arg \"records\" has been changed to a list of one record from the loop \"[record]\"","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":4425,"context_line":"    #"},{"line_number":4426,"context_line":"    # extras \u003d {tablename: number_of_extra_rows_archived}"},{"line_number":4427,"context_line":"    extras \u003d collections.defaultdict(int)"},{"line_number":4428,"context_line":"    if records:"},{"line_number":4429,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4430,"context_line":"        # for each deleted row, one at a time. We do it this way because in a"},{"line_number":4431,"context_line":"        # large scale database with potentially hundreds of thousands of"}],"source_content_type":"text/x-python","patch_set":1,"id":"c916cf27_7492f8e1","line":4428,"in_reply_to":"0bd19d85_ad72c6a3","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"4797da507a56845d3d5009ad27d99ce69e818588","unresolved":true,"context_lines":[{"line_number":4425,"context_line":"    #"},{"line_number":4426,"context_line":"    # extras \u003d {tablename: number_of_extra_rows_archived}"},{"line_number":4427,"context_line":"    extras \u003d collections.defaultdict(int)"},{"line_number":4428,"context_line":"    if records:"},{"line_number":4429,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4430,"context_line":"        # for each deleted row, one at a time. We do it this way because in a"},{"line_number":4431,"context_line":"        # large scale database with potentially hundreds of thousands of"}],"source_content_type":"text/x-python","patch_set":1,"id":"0bd19d85_ad72c6a3","line":4428,"in_reply_to":"d4151ce0_c8d927a6","updated":"2023-06-16 06:47:32.000000000","message":"This loop is being used to accumulate insert and delete statements to execute for the archive and the loop will break when it has accumulated \u003e\u003d max_rows row \"moves\" (insert+delete pair).","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bb7a90c5938616a78ed56cb6a2f5e58c3441cb37","unresolved":true,"context_lines":[{"line_number":4441,"context_line":"        # staying below database limits."},{"line_number":4442,"context_line":"        for record in records:"},{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"}],"source_content_type":"text/x-python","patch_set":1,"id":"81c81ecb_969d165a","line":4444,"range":{"start_line":4444,"start_character":60,"end_line":4444,"end_character":68},"updated":"2023-06-07 16:08:35.000000000","message":"This used to be \"records\".","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":true,"context_lines":[{"line_number":4441,"context_line":"        # staying below database limits."},{"line_number":4442,"context_line":"        for record in records:"},{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"}],"source_content_type":"text/x-python","patch_set":1,"id":"ef7ec528_31edea03","line":4444,"range":{"start_line":4444,"start_character":60,"end_line":4444,"end_character":68},"in_reply_to":"31ec49ca_6d8aac01","updated":"2023-06-16 19:08:07.000000000","message":"\u003e also, note to self/others we are first building the queries that will be executed\n\u003e \n\u003e and then they are invoked from inside the try block\n\u003e \n\u003e so a failure of a single parent will not impact the exection of the other instance deletion\n\nJust noting that this is no longer true with PS2. PS2 will behave the same as today, if a DBReferenceError is raised when archiving a table, that table will be skipped.","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"a2a873d127387ed5474114bbf28269501871ed0b","unresolved":true,"context_lines":[{"line_number":4441,"context_line":"        # staying below database limits."},{"line_number":4442,"context_line":"        for record in records:"},{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"}],"source_content_type":"text/x-python","patch_set":1,"id":"31ec49ca_6d8aac01","line":4444,"range":{"start_line":4444,"start_character":60,"end_line":4444,"end_character":68},"in_reply_to":"81c81ecb_969d165a","updated":"2023-06-15 10:38:51.000000000","message":"ah right, the itnerface is expecting a list so you have to convert the loop variable to the correct data type.\n\nalso, note to self/others we are first building the queries that will be executed\n\nand then they are invoked from inside the try block\n\nso a failure of a single parent will not impact the exection of the other instance deletion","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"577d5fd6af945a5f7ff503e76eda480856821835","unresolved":false,"context_lines":[{"line_number":4441,"context_line":"        # staying below database limits."},{"line_number":4442,"context_line":"        for record in records:"},{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"}],"source_content_type":"text/x-python","patch_set":1,"id":"cc43f5db_b43f864f","line":4444,"range":{"start_line":4444,"start_character":60,"end_line":4444,"end_character":68},"in_reply_to":"ef7ec528_31edea03","updated":"2023-06-20 11:22:54.000000000","message":"Ack","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bb7a90c5938616a78ed56cb6a2f5e58c3441cb37","unresolved":true,"context_lines":[{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"},{"line_number":4448,"context_line":"            # that refer to this table via FK constraints. fk_inserts and"},{"line_number":4449,"context_line":"            # fk_deletes will be prepended to by _get_fk_stmts if referring"}],"source_content_type":"text/x-python","patch_set":1,"id":"945adc08_b5685256","line":4446,"range":{"start_line":4446,"start_character":53,"end_line":4446,"end_character":61},"updated":"2023-06-07 16:08:35.000000000","message":"This used to be \"records\".","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":4443,"context_line":"            insert \u003d shadow_table.insert().from_select("},{"line_number":4444,"context_line":"                columns, sql.select(table).where(column.in_([record]))"},{"line_number":4445,"context_line":"            ).inline()"},{"line_number":4446,"context_line":"            delete \u003d table.delete().where(column.in_([record]))"},{"line_number":4447,"context_line":"            # Walk FK relationships and add insert/delete statements for rows"},{"line_number":4448,"context_line":"            # that refer to this table via FK constraints. fk_inserts and"},{"line_number":4449,"context_line":"            # fk_deletes will be prepended to by _get_fk_stmts if referring"}],"source_content_type":"text/x-python","patch_set":1,"id":"ff3a823d_433ed60a","line":4446,"range":{"start_line":4446,"start_character":53,"end_line":4446,"end_character":61},"in_reply_to":"945adc08_b5685256","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bb7a90c5938616a78ed56cb6a2f5e58c3441cb37","unresolved":true,"context_lines":[{"line_number":4449,"context_line":"            # fk_deletes will be prepended to by _get_fk_stmts if referring"},{"line_number":4450,"context_line":"            # rows are found by FK constraints."},{"line_number":4451,"context_line":"            fk_inserts, fk_deletes \u003d _get_fk_stmts("},{"line_number":4452,"context_line":"                metadata, conn, table, column, [record])"},{"line_number":4453,"context_line":""},{"line_number":4454,"context_line":"            # NOTE(tssurya): In order to facilitate the deletion of records"},{"line_number":4455,"context_line":"            # from instance_mappings, request_specs and instance_group_member"}],"source_content_type":"text/x-python","patch_set":1,"id":"3b6e5464_64cbe924","line":4452,"range":{"start_line":4452,"start_character":47,"end_line":4452,"end_character":55},"updated":"2023-06-07 16:08:35.000000000","message":"This used to be \"records\".","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":4449,"context_line":"            # fk_deletes will be prepended to by _get_fk_stmts if referring"},{"line_number":4450,"context_line":"            # rows are found by FK constraints."},{"line_number":4451,"context_line":"            fk_inserts, fk_deletes \u003d _get_fk_stmts("},{"line_number":4452,"context_line":"                metadata, conn, table, column, [record])"},{"line_number":4453,"context_line":""},{"line_number":4454,"context_line":"            # NOTE(tssurya): In order to facilitate the deletion of records"},{"line_number":4455,"context_line":"            # from instance_mappings, request_specs and instance_group_member"}],"source_content_type":"text/x-python","patch_set":1,"id":"0667fe64_7b0fdf9b","line":4452,"range":{"start_line":4452,"start_character":47,"end_line":4452,"end_character":55},"in_reply_to":"3b6e5464_64cbe924","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bb7a90c5938616a78ed56cb6a2f5e58c3441cb37","unresolved":true,"context_lines":[{"line_number":4458,"context_line":"            # uuids of the archived instances are queried and returned."},{"line_number":4459,"context_line":"            if tablename \u003d\u003d \"instances\":"},{"line_number":4460,"context_line":"                query_select \u003d sql.select(table.c.uuid).where("},{"line_number":4461,"context_line":"                    table.c.id.in_([record])"},{"line_number":4462,"context_line":"                )"},{"line_number":4463,"context_line":"                with conn.begin():"},{"line_number":4464,"context_line":"                    rows \u003d conn.execute(query_select).fetchall()"}],"source_content_type":"text/x-python","patch_set":1,"id":"b5da97de_26c7bca2","line":4461,"range":{"start_line":4461,"start_character":35,"end_line":4461,"end_character":43},"updated":"2023-06-07 16:08:35.000000000","message":"This used to be \"records\".","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":false,"context_lines":[{"line_number":4458,"context_line":"            # uuids of the archived instances are queried and returned."},{"line_number":4459,"context_line":"            if tablename \u003d\u003d \"instances\":"},{"line_number":4460,"context_line":"                query_select \u003d sql.select(table.c.uuid).where("},{"line_number":4461,"context_line":"                    table.c.id.in_([record])"},{"line_number":4462,"context_line":"                )"},{"line_number":4463,"context_line":"                with conn.begin():"},{"line_number":4464,"context_line":"                    rows \u003d conn.execute(query_select).fetchall()"}],"source_content_type":"text/x-python","patch_set":1,"id":"1737ebd1_071b97a0","line":4461,"range":{"start_line":4461,"start_character":35,"end_line":4461,"end_character":43},"in_reply_to":"b5da97de_26c7bca2","updated":"2023-06-16 19:08:07.000000000","message":"Done","commit_id":"d4f16979c825380995f790302fc99ec2feaa1c84"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c8f36c62dda5eb9d1f96732feb78bdd253daad98","unresolved":true,"context_lines":[{"line_number":4480,"context_line":"            num_rows_in_batch +\u003d 1 + len(fk_inserts)"},{"line_number":4481,"context_line":""},{"line_number":4482,"context_line":"            if max_rows is not None and num_rows_in_batch \u003e\u003d max_rows:"},{"line_number":4483,"context_line":"                break"},{"line_number":4484,"context_line":""},{"line_number":4485,"context_line":"        # NOTE(tssurya): In order to facilitate the deletion of records"},{"line_number":4486,"context_line":"        # from instance_mappings, request_specs and instance_group_member"}],"source_content_type":"text/x-python","patch_set":2,"id":"e2c8457a_af4f8f4a","line":4483,"updated":"2023-06-16 12:28:25.000000000","message":"i see ya so in the unlikely event that an instance had 100,000 rows assocated with it for some reason max_rows\u003d5 would still allow that to be deleted because the algortiom is add instnace untill the sum of the rows is larger then the max value.\nbasically a do while loop usign an itorator over the records.\n\ni think that makes sense.","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"577d5fd6af945a5f7ff503e76eda480856821835","unresolved":false,"context_lines":[{"line_number":4480,"context_line":"            num_rows_in_batch +\u003d 1 + len(fk_inserts)"},{"line_number":4481,"context_line":""},{"line_number":4482,"context_line":"            if max_rows is not None and num_rows_in_batch \u003e\u003d max_rows:"},{"line_number":4483,"context_line":"                break"},{"line_number":4484,"context_line":""},{"line_number":4485,"context_line":"        # NOTE(tssurya): In order to facilitate the deletion of records"},{"line_number":4486,"context_line":"        # from instance_mappings, request_specs and instance_group_member"}],"source_content_type":"text/x-python","patch_set":2,"id":"e6c6b35a_9d14ed57","line":4483,"in_reply_to":"58e2943e_b23ad97a","updated":"2023-06-20 11:22:54.000000000","message":"Ack","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"74c45ec26e6187f0ad71a4df4e125aaa90d03afa","unresolved":true,"context_lines":[{"line_number":4480,"context_line":"            num_rows_in_batch +\u003d 1 + len(fk_inserts)"},{"line_number":4481,"context_line":""},{"line_number":4482,"context_line":"            if max_rows is not None and num_rows_in_batch \u003e\u003d max_rows:"},{"line_number":4483,"context_line":"                break"},{"line_number":4484,"context_line":""},{"line_number":4485,"context_line":"        # NOTE(tssurya): In order to facilitate the deletion of records"},{"line_number":4486,"context_line":"        # from instance_mappings, request_specs and instance_group_member"}],"source_content_type":"text/x-python","patch_set":2,"id":"58e2943e_b23ad97a","line":4483,"in_reply_to":"e2c8457a_af4f8f4a","updated":"2023-06-16 19:08:07.000000000","message":"Right. This is done to prevent a deleted instance to remain in the database with only a partial set of its associated rows as that could cause errors when listing or showing a deleted instance (main case being tenant usage API).","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"4797da507a56845d3d5009ad27d99ce69e818588","unresolved":true,"context_lines":[{"line_number":4500,"context_line":"            # Group the insert and delete in a transaction."},{"line_number":4501,"context_line":"            with conn.begin():"},{"line_number":4502,"context_line":"                for statement in statements_in_batch:"},{"line_number":4503,"context_line":"                    result \u003d conn.execute(statement)"},{"line_number":4504,"context_line":"                    result_tablename \u003d statement.table.name"},{"line_number":4505,"context_line":"                    # Add to archived row counts if not a shadow table."},{"line_number":4506,"context_line":"                    if not result_tablename.startswith(_SHADOW_TABLE_PREFIX):"}],"source_content_type":"text/x-python","patch_set":2,"id":"d67dd7e0_3c3a9435","line":4503,"updated":"2023-06-16 06:47:32.000000000","message":"This is just running through the list of accumulated insert and delete statements from the earlier loop. It is ordered like [child1, child1, parent1, child2, child2, child2, parent2, ...] i.e. a list of complete parent + child \"trees\". The ordering is to avoid FK constraint violations when executing the statements.","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"c8f36c62dda5eb9d1f96732feb78bdd253daad98","unresolved":false,"context_lines":[{"line_number":4500,"context_line":"            # Group the insert and delete in a transaction."},{"line_number":4501,"context_line":"            with conn.begin():"},{"line_number":4502,"context_line":"                for statement in statements_in_batch:"},{"line_number":4503,"context_line":"                    result \u003d conn.execute(statement)"},{"line_number":4504,"context_line":"                    result_tablename \u003d statement.table.name"},{"line_number":4505,"context_line":"                    # Add to archived row counts if not a shadow table."},{"line_number":4506,"context_line":"                    if not result_tablename.startswith(_SHADOW_TABLE_PREFIX):"}],"source_content_type":"text/x-python","patch_set":2,"id":"3b30cfa5_68fcab26","line":4503,"in_reply_to":"d67dd7e0_3c3a9435","updated":"2023-06-16 12:28:25.000000000","message":"Ack","commit_id":"5b4d43efc286087fb2545c5d0f042f7d48069f1d"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"577d5fd6af945a5f7ff503e76eda480856821835","unresolved":false,"context_lines":[{"line_number":4439,"context_line":"    # instance UUIDs in the case of the \u0027instances\u0027 table."},{"line_number":4440,"context_line":"    records_in_batch \u003d []"},{"line_number":4441,"context_line":""},{"line_number":4442,"context_line":"    if records:"},{"line_number":4443,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4444,"context_line":"        # for each deleted row, one at a time. We do it this way to keep track"},{"line_number":4445,"context_line":"        # of and limit the total number of rows that will be archived in a"}],"source_content_type":"text/x-python","patch_set":3,"id":"94d6c733_6a4f8b16","line":4442,"updated":"2023-06-20 11:22:54.000000000","message":"nit: a nice (separate) change would be to instead return early if this is false","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"cc43e23aff16819c235e2d0f80dea1ba1ff4e278","unresolved":false,"context_lines":[{"line_number":4439,"context_line":"    # instance UUIDs in the case of the \u0027instances\u0027 table."},{"line_number":4440,"context_line":"    records_in_batch \u003d []"},{"line_number":4441,"context_line":""},{"line_number":4442,"context_line":"    if records:"},{"line_number":4443,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4444,"context_line":"        # for each deleted row, one at a time. We do it this way to keep track"},{"line_number":4445,"context_line":"        # of and limit the total number of rows that will be archived in a"}],"source_content_type":"text/x-python","patch_set":3,"id":"f9ab086c_92c4d3a9","line":4442,"in_reply_to":"94d6c733_6a4f8b16","updated":"2023-06-20 11:25:41.000000000","message":"Or maybe not separate since it would make this part of the change smaller...though the below lines would then be de-dedented so much of a muchness","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"bae9a16e33671a6062bec5740d3ee6aa635f1525","unresolved":false,"context_lines":[{"line_number":4439,"context_line":"    # instance UUIDs in the case of the \u0027instances\u0027 table."},{"line_number":4440,"context_line":"    records_in_batch \u003d []"},{"line_number":4441,"context_line":""},{"line_number":4442,"context_line":"    if records:"},{"line_number":4443,"context_line":"        # (melwitt): We will gather rows related by foreign key relationship"},{"line_number":4444,"context_line":"        # for each deleted row, one at a time. We do it this way to keep track"},{"line_number":4445,"context_line":"        # of and limit the total number of rows that will be archived in a"}],"source_content_type":"text/x-python","patch_set":3,"id":"2ea246de_356821e2","line":4442,"in_reply_to":"f9ab086c_92c4d3a9","updated":"2023-06-20 19:23:30.000000000","message":"Oh yeah, good call. I think it\u0027s worth doing in this patch, will update.","commit_id":"50d6218b4285e20465eff874dc4f2168c875ac73"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d4f96f5dbde5ed4a73553ce1306adb217f9c6b19","unresolved":true,"context_lines":[{"line_number":4429,"context_line":"    if not records:"},{"line_number":4430,"context_line":"        # Nothing to archive, so return."},{"line_number":4431,"context_line":"        return rows_archived, deleted_instance_uuids, extras"},{"line_number":4432,"context_line":""},{"line_number":4433,"context_line":"    # Keep track of how many rows we accumulate for the insert+delete database"},{"line_number":4434,"context_line":"    # transaction and cap it as soon as it is \u003e\u003d max_rows. Because we will"},{"line_number":4435,"context_line":"    # archive all child rows of a parent row along with the parent at the same"}],"source_content_type":"text/x-python","patch_set":5,"id":"104547b6_945a082f","line":4432,"updated":"2023-06-22 13:50:35.000000000","message":"ya the early return helps here.","commit_id":"697fa3c000696da559e52b664c04cbd8d261c037"}]}
