)]}'
{".zuul.yaml":[{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"6b2adb8425655b2249e93e5baf20a98752d7ff7f","unresolved":true,"context_lines":[{"line_number":79,"context_line":""},{"line_number":80,"context_line":"- job:"},{"line_number":81,"context_line":"    name: devstack-plugin-ceph-cephfs-native"},{"line_number":82,"context_line":"    description: |"},{"line_number":83,"context_line":"      Runs manila tempest plugin tests with Native CephFS as a manila back"},{"line_number":84,"context_line":"      end (DHSS\u003dFalse)"},{"line_number":85,"context_line":"    parent: manila-tempest-plugin-cephfs-native"}],"source_content_type":"text/x-yaml","patch_set":11,"id":"6c953dfc_75a833e2","line":82,"updated":"2023-04-25 21:39:33.000000000","message":"The nodeset here is set to focal fossa in the parent (manila-tempest-plugin-cephfs-native); i wouldn\u0027t mind overriding that to jammy and seeing if that\u0027s working","commit_id":"985284153166fa82119a870fa89fce0879ade43e"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"1c0916cb5ceca76085d3d4dc0e854842d3230d3e","unresolved":false,"context_lines":[{"line_number":79,"context_line":""},{"line_number":80,"context_line":"- job:"},{"line_number":81,"context_line":"    name: devstack-plugin-ceph-cephfs-native"},{"line_number":82,"context_line":"    description: |"},{"line_number":83,"context_line":"      Runs manila tempest plugin tests with Native CephFS as a manila back"},{"line_number":84,"context_line":"      end (DHSS\u003dFalse)"},{"line_number":85,"context_line":"    parent: manila-tempest-plugin-cephfs-native"}],"source_content_type":"text/x-yaml","patch_set":11,"id":"a25e147b_c6223c05","line":82,"in_reply_to":"6c953dfc_75a833e2","updated":"2023-04-26 02:49:04.000000000","message":"will do this in a separate change: https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/881519/","commit_id":"985284153166fa82119a870fa89fce0879ade43e"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"6ce409582f029ab263285d9c1f913e5fb74acac9","unresolved":true,"context_lines":[{"line_number":193,"context_line":"            voting: false"},{"line_number":194,"context_line":"        # - devstack-plugin-ceph-tempest-fedora-latest"},{"line_number":195,"context_line":"        # - devstack-plugin-ceph-multinode-tempest-py3"},{"line_number":196,"context_line":"        # - devstack-plugin-ceph-multinode-tempest-cephadm:"},{"line_number":197,"context_line":"        #     voting: false"},{"line_number":198,"context_line":"        # - devstack-plugin-ceph-master-tempest:"},{"line_number":199,"context_line":"        #     voting: false"},{"line_number":200,"context_line":"    gate:"},{"line_number":201,"context_line":"      jobs:"},{"line_number":202,"context_line":"        - devstack-plugin-ceph-tempest-cephadm"}],"source_content_type":"text/x-yaml","patch_set":22,"id":"fb8b5f9f_77d15c5a","line":199,"range":{"start_line":196,"start_character":0,"end_line":199,"end_character":27},"updated":"2023-05-03 19:03:04.000000000","message":"I can clean these up in a future patch","commit_id":"8bec3f2abb30c0c0e8977fb209cfb34993c0dd0d"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"71ccb1f9db9eec8498421b8a0db1edcc6dae5f68","unresolved":true,"context_lines":[{"line_number":199,"context_line":"        #     voting: false"},{"line_number":200,"context_line":"    gate:"},{"line_number":201,"context_line":"      jobs:"},{"line_number":202,"context_line":"        - devstack-plugin-ceph-tempest-cephadm"},{"line_number":203,"context_line":""},{"line_number":204,"context_line":"- project:"},{"line_number":205,"context_line":"    templates:"}],"source_content_type":"text/x-yaml","patch_set":22,"id":"80ba7899_2e6072a2","line":202,"range":{"start_line":202,"start_character":10,"end_line":202,"end_character":46},"updated":"2023-05-03 19:01:23.000000000","message":"devstack-plugin-ceph-tempest-py3","commit_id":"8bec3f2abb30c0c0e8977fb209cfb34993c0dd0d"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":4146,"name":"Clark Boylan","email":"cboylan@sapwetik.org","username":"cboylan"},"change_message_id":"de797cdc5fe1235791665ccf5a10a30344d51809","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"a6b54bd3_d397a2ff","updated":"2022-11-22 20:04:17.000000000","message":"recheck hold is in place so that we can debug on the node.","commit_id":"5941bb9df5b87b1d98da5424465c2f2b29fd34eb"},{"author":{"_account_id":22873,"name":"Martin Kopec","email":"mkopec@redhat.com","username":"mkopec"},"change_message_id":"41a7f737c0596a42dcb83f4fb82b61905904b84b","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":4,"id":"07e5d6e0_7cdccfbe","updated":"2022-11-29 14:33:47.000000000","message":"recheck is this failing even after we merged the depends-on in tempest?","commit_id":"30159f1d3c0fe3bb616cff35290e31d9b32dfdf9"},{"author":{"_account_id":13252,"name":"Dr. Jens Harbott","display_name":"Jens Harbott (frickler)","email":"frickler@offenerstapel.de","username":"jrosenboom"},"change_message_id":"b6d588a46574c2278e22a6b795e24727894a25f7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":5,"id":"fa245f13_c0bdedc2","updated":"2022-12-27 10:06:45.000000000","message":"recheck get fresh logs","commit_id":"ef81ae9712f0797fdb0059e2a90271e879aa9688"},{"author":{"_account_id":22873,"name":"Martin Kopec","email":"mkopec@redhat.com","username":"mkopec"},"change_message_id":"9fb0f0b1006b955420fbf82420ce66e3e524beff","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"4ac58d7b_733ee9ef","updated":"2023-03-14 14:57:36.000000000","message":"recheck \u0027could not find executable python\u0027 is quite weird, let\u0027s see if it still happens, in the meantime i\u0027m checking what kind of tempest env we use in the job and where we inherit from ","commit_id":"b67b8f481b2d9f03a799ef03696b93657d2cadad"},{"author":{"_account_id":22873,"name":"Martin Kopec","email":"mkopec@redhat.com","username":"mkopec"},"change_message_id":"5efb5600b704980f575ecf8c7820b5a6f4db04ef","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"aca168b7_6eda0ab8","updated":"2023-02-21 15:08:54.000000000","message":"recheck to see fresh logs ","commit_id":"b67b8f481b2d9f03a799ef03696b93657d2cadad"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"5ddc56718955d2db5735d4989618a4a1483f5345","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":8,"id":"c45e1e14_e3d7634e","updated":"2023-04-24 22:48:55.000000000","message":"recheck not sure if this is supposed to work or not, but the logs have expired so I can\u0027t see","commit_id":"d228a52cfdc06522cdc81d66bb79d829726c648f"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"5492991cb9632e7d5dbe90ef4c6e3be50dedf454","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":9,"id":"eaf5a4b1_8e638f62","updated":"2023-04-25 16:41:38.000000000","message":"Lots of fails in the job, but it did \"work\" in terms of it\u0027s running on jammy with rbd enabled. The early ones seem like normal \"failed to detach\" cinder volume things. However, towards the end once we got stuck before the timeout, I see a lot of this:\n\n```\nApr 25 15:13:31.133655 np0033843288 nova-compute[90118]: WARNING nova.storage.rbd_utils [None req-053496f7-16e4-4580-b63e-87671e2db3cc tempest-VolumesExtendAttachedTest-709183219 tempest-VolumesExtendAttachedTest-709183219-project-member] rbd remove bf927bed-4c6d-42d3-ab5e-56e24b16b8a5_disk in pool vms failed: rbd.ImageBusy: [errno 16] RBD image is busy (error removing image)\n```\n\nwhich has me worried that something has changed and we\u0027re going to have deeper problems. I will squash the patch below into this one and we\u0027ll get another run to see what happens.","commit_id":"cf7d9acc1487f9b754a98fcaa527de452b06709d"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"7f546834d16ec0cc825a378e04f7372745f0bf11","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":10,"id":"6133dbd4_39c1fad0","updated":"2023-04-25 19:02:59.000000000","message":"So the latest run looks much better, although there are a lot of volume attach failures. Cinder is configured for ceph, so it\u0027s possible that these are related, although ceph itself seems to be otherwise working, so I\u0027m not really sure. I will recheck just for an additional data point.","commit_id":"48aaaaf7189ff0e5f44f998177438083f19b1c40"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"02f0eb019cf0b55929e2e524c08f3238b2a58fbb","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":10,"id":"aee45006_e5faa178","updated":"2023-04-25 19:03:06.000000000","message":"recheck moar data","commit_id":"48aaaaf7189ff0e5f44f998177438083f19b1c40"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"fa923339f51db7be3715c6e3bf2e6d666f442bb3","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":10,"id":"6d18e982_41f56524","updated":"2023-04-25 20:38:22.000000000","message":"yeah, looks pretty much the same, I think something is not happy in ceph land.","commit_id":"48aaaaf7189ff0e5f44f998177438083f19b1c40"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"1c0916cb5ceca76085d3d4dc0e854842d3230d3e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":13,"id":"a2fdefcf_2d5c051b","updated":"2023-04-26 02:49:04.000000000","message":"the last run of the cephadm job timed out.. tests took \u003e98 minutes.. but there were several failures with detaching volumes just like the package based ceph job.. \n\n\ni am attempting a higher test concurrency to see if we can avoid the timeout and get the job to complete","commit_id":"5cafa810266f21b44427e582cb33c5617b6289a0"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"987531e2fa8e68d0c431c5bfd97cc7c35efb09c4","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":14,"id":"87bc3f2d_bbd00c65","updated":"2023-04-26 14:04:32.000000000","message":"Increasing the timeout isn\u0027t going to help. We\u0027re timing out because each test that gets hung up on detach spends almost 10 minutes waiting for it to happen before failing. With low concurrency and lots to do, we just time out before we get finished. We\u0027ve got plenty of runs that show the failures that didn\u0027t happen to timeout (running on faster nodes for faster setup probably).","commit_id":"9c2aba0786b27d99fb0cc12188654515f9dcedcf"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"e17d5db159e414986ac3c05d27ff23e9f598e853","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":14,"id":"677eeb25_3176a04f","updated":"2023-04-26 09:06:11.000000000","message":"Just increased the timeout on the cephadm job to see whether it eventually tells us more.","commit_id":"9c2aba0786b27d99fb0cc12188654515f9dcedcf"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"cd11e0b1bb9306fc2608a692ffa885d267abb446","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":16,"id":"48a585bc_f1dd6e18","updated":"2023-04-26 18:18:07.000000000","message":"The cephadm job looks really excellent. The failure in the regular job is because the ceph-osd got OOM-killed:\n\nhttps://zuul.opendev.org/t/openstack/build/a6ece980be6c4def9bd578ff501a99f2/log/controller/logs/syslog.txt#5027\n\n```\nApr 26 17:33:09 np0033857608 kernel: Out of memory: Killed process 24771 (ceph-osd) total-vm:3731520kB, anon-rss:587444kB, file-rss:0kB, shmem-rss:0kB, UID:64045 pgtables:2328kB oom_score_adj:0\n```\n\nThat was with the mysql memory optimization, so I\u0027m not sure what we could do other than perhaps reduce the concurrency to less than 4 but more than zero. We\u0027ll see what the next run looks like. I suppose it could also be a memory leak that is improved in the later releases that the cephadm job is using. We\u0027ll see what the next run(s) look like.","commit_id":"071058ece05c6fe6c4a6852254bc93dde1ba66ea"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"ef866df80f6f9486c3de67423d5a0cc02fbb8cd7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":18,"id":"d032813c_49c1ca13","updated":"2023-04-26 23:44:04.000000000","message":"LGTM; thanks for the changes, Dan.\n\nI think we need wider knowledge around how this repository will change; but we can keep that out of this patch. Right now, the single node cephadm job runs on Ubuntu 22.04/Jammy Jellyfish - and that in itself is a good win.. this patch breaks the native-cephfs job that\u0027s defined elsewhere; a fix will be committed to get that working again. \n\nIn terms of next steps, I was hoping we can drop all the jobs using the package based deployment, default \"CEPHADM_DEPLOY\" to True and retire the \"ceph\" script. Hoping we can get this all done in 2023.2.","commit_id":"561ad9c13b676cd7dffb6c9dd5e9177ef2f43612"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"dbe300bccb68d071f679f2fdb4f0d34e75bb6d5c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"00f7a5f5_9a1c634d","updated":"2023-04-27 18:33:41.000000000","message":"Putting -W on here so it\u0027s clear we need to wait until nova works, which it currently doesn\u0027t. I\u0027m also happy to do the rename (or drop) of the legacy distro job in this patch, or we can do it later. I just want to make sure we can make the nova job (and glance runs it too) work on cephadm/jammy before we merge this.","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"f6ec9fd95ec0be64a59085a8b3eb6a4316b170c6","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"7a13eb3f_57fa898d","updated":"2023-04-27 19:23:24.000000000","message":"Yep, I\u0027ll Needed-By on here if I update it again:\n\nhttps://review.opendev.org/c/openstack/nova/+/881585?tab\u003dchange-view-tab-header-zuul-results-summary\n\nI initially assumed it would \"just work\" on the nova job as well, but I\u0027m down a pretty bad rabbit hole as this seems to tickle a volume detach bug we see 1% of the time normally, but 100% of the time with the new ceph :/","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"210d789ebb6fec62f9c5fe66a6a6fc208ef1fd2d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"5b70a151_14306090","updated":"2023-04-27 16:11:43.000000000","message":"removed the dependency cycle; think Francesco wanted to get the jobs submitted in a different order..","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"ebc40b8ec8a1e5616fedffa4fcafc13cea62c029","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"0a0bc218_948b1d6c","in_reply_to":"00f7a5f5_9a1c634d","updated":"2023-04-27 19:21:50.000000000","message":"Thank you... Can you please link the jobs you\u0027re referring to? Perhaps with a \"Needed-By\" or a comment here? I\u0027m unsure i know them..","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"bbd5ce61e750b0961099d9d1b3519255dd894095","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"459828c4_14651935","in_reply_to":"5b70a151_14306090","updated":"2023-04-27 17:16:03.000000000","message":"yeah sorry for the noise, I was trying to experiment with some rbd (mostly cinder) related jobs.","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"1483a30496fe83d336236a2cd974d78456a58e31","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"9cc99811_d4bed810","in_reply_to":"7a13eb3f_57fa898d","updated":"2023-04-27 19:51:31.000000000","message":"ah: \"nova-ceph-multistore\"; and it logs the ImageBusy exception in the logs that I noticed on the cephfs job I was trying to fix up... \n\n\nApr 27 00:42:53.761938 np0033860003 nova-compute[98001]: WARNING nova.storage.rbd_utils [-] rbd remove f56e5103-2b98-4656-b7c2-e0f3be41745c_disk.config in pool vms failed: rbd.ImageBusy: [errno 16] RBD image is busy (error removing image)\n\nhttps://zuul.opendev.org/t/openstack/build/3b1b9e7dbfa848e48a67ab6932506ba7/log/controller/logs/screen-n-cpu.txt#37982\n\n\nThe job has a swap size override to 4096... don\u0027t know if your bump to 8192 impacted the job to run better here.. (https://review.opendev.org/c/openstack/nova/+/881585/5/.zuul.yaml#603)","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"74385422756914681cb0c076c4b75ff69039f844","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":20,"id":"1f8d78cf_e26b2a6b","in_reply_to":"9cc99811_d4bed810","updated":"2023-04-27 21:29:57.000000000","message":"I think it\u0027s likely related to us doing things with the instances before they\u0027re fully booting, but we\u0027ll see.\n\nThe swap thing shouldn\u0027t be a huge deal - I mostly did it for the base job which kept OOMing. The cephadm one wasn\u0027t that I ever saw. I also dropped concurrency to 3 since I added that.","commit_id":"b67ae521a0d1957020493c5c2fa946762ce24df9"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"5fa0d91de7bb32c24a82beab43ea42eee28cac94","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":21,"id":"c41717b6_ab1e61b6","updated":"2023-05-03 18:26:12.000000000","message":"lgtm, thanks","commit_id":"ea1f82a8b13ac3941d36378c3b1e7a897f1aee6e"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"674faaa534f8d575f62b6889efe4170f3bad95a6","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"d79e9d50_b5f86cff","updated":"2023-05-05 20:18:57.000000000","message":"We\u0027ll watch the ctp change merge and then re-enqueue this change.. \n\nhttps://docs.opendev.org/opendev/infra-manual/latest/developers.html#gate-pipeline","commit_id":"41b6a8c227190a9b52d29a078425321f96240d92"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"10f9af5fa0f86ecf888288aabdf553513b7950e3","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"e6cc5b35_7e3b324e","updated":"2023-05-05 20:50:59.000000000","message":"dep merged","commit_id":"41b6a8c227190a9b52d29a078425321f96240d92"},{"author":{"_account_id":8556,"name":"Ghanshyam Maan","display_name":"Ghanshyam Maan","email":"gmaan.os14@gmail.com","username":"ghanshyam"},"change_message_id":"f98e9d5e9fe1fe512d7dc9c053cb46e0220a2832","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"607edb39_c718bb54","updated":"2023-05-05 20:09:06.000000000","message":"lgtm","commit_id":"41b6a8c227190a9b52d29a078425321f96240d92"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"9ee752898bb1ab44e6ccd5a377d076126269bf83","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":23,"id":"6716cb0c_91e1e3ef","updated":"2023-05-03 22:10:48.000000000","message":"recheck\n\nA volume backup test failed an assert. Doesn\u0027t seem related since it passed here before;","commit_id":"41b6a8c227190a9b52d29a078425321f96240d92"}],"devstack/files/debs/devstack-plugin-ceph":[{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"71ccb1f9db9eec8498421b8a0db1edcc6dae5f68","unresolved":true,"context_lines":[{"line_number":1,"context_line":"xfsprogs"},{"line_number":2,"context_line":"qemu-block-extra"},{"line_number":3,"context_line":"catatonit"},{"line_number":4,"context_line":"podman"}],"source_content_type":"application/octet-stream","patch_set":21,"id":"1cf3e38c_35336f54","line":4,"range":{"start_line":2,"start_character":0,"end_line":4,"end_character":6},"updated":"2023-05-03 19:01:23.000000000","message":"to confirm, these are not necessary for rpm?","commit_id":"ea1f82a8b13ac3941d36378c3b1e7a897f1aee6e"}]}
