)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":32926,"name":"Jiri Podivin","display_name":"jpodivin","email":"jpodivin@redhat.com","username":"jpodivin"},"change_message_id":"a2121f0c2ea0709595c52e3591a1fd4e54d99c06","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"ded407dd_7dd7746d","updated":"2023-04-13 12:24:45.000000000","message":"There is a small linter issue you need to resolve first. Otherwise you won\u0027t reach the beefier part of CI.","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"},{"author":{"_account_id":34423,"name":"Fernando Díaz Bravo","email":"fdiazbra@redhat.com","username":"fdiazbra"},"change_message_id":"9bfb91da659d863ce9c0a557f7882f6e6cc5e07e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":1,"id":"4c1a31f3_04b5dd02","updated":"2023-04-13 11:57:21.000000000","message":"This change is only removing the connection to the ceph mon container and using cephadm to get the health status. This could break the upgrades to 17 workflow because we execute this validation before pointing the new validations repo. Do you think we need to maintain the connection to the ceph mon container?","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"},{"author":{"_account_id":32926,"name":"Jiri Podivin","display_name":"jpodivin","email":"jpodivin@redhat.com","username":"jpodivin"},"change_message_id":"02b15684f7c66179a3f150334b48dbb99bdfab67","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"6041bbbb_976a40bc","updated":"2023-04-17 07:47:32.000000000","message":"Will this patch require any backports?","commit_id":"db7bb94968f86658c00ec339c2c4a278f0a29149"},{"author":{"_account_id":32926,"name":"Jiri Podivin","display_name":"jpodivin","email":"jpodivin@redhat.com","username":"jpodivin"},"change_message_id":"af34fa2a713fbc97bf8506303b9ecb9696c8f1f5","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"a3365ac0_decd0c6a","in_reply_to":"34f22f2b_3dd4357d","updated":"2023-04-17 09:26:35.000000000","message":"Thanks. One more question, does this have a bugzilla? If so please add the id to the commit message.","commit_id":"db7bb94968f86658c00ec339c2c4a278f0a29149"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8e87194f381f92adf335744bced6a30154db7873","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"34f22f2b_3dd4357d","in_reply_to":"6041bbbb_976a40bc","updated":"2023-04-17 09:15:13.000000000","message":"I think Wallaby should be the target here.","commit_id":"db7bb94968f86658c00ec339c2c4a278f0a29149"},{"author":{"_account_id":34423,"name":"Fernando Díaz Bravo","email":"fdiazbra@redhat.com","username":"fdiazbra"},"change_message_id":"b411e42fdd44d8ce44d1c9910515cd8a0734b11f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"fb02028b_775f7e1c","in_reply_to":"a3365ac0_decd0c6a","updated":"2023-04-17 09:33:15.000000000","message":"No BZ, should I create one for tracking?","commit_id":"db7bb94968f86658c00ec339c2c4a278f0a29149"},{"author":{"_account_id":32926,"name":"Jiri Podivin","display_name":"jpodivin","email":"jpodivin@redhat.com","username":"jpodivin"},"change_message_id":"b15d1ff660e89083f737cf646351ca876e24ae89","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":2,"id":"3299f58f_7d25dce0","in_reply_to":"fb02028b_775f7e1c","updated":"2023-04-17 13:28:12.000000000","message":"It is a good idea, if we want a downstream backport. Otherwise it\u0027s just a matter of preference.","commit_id":"db7bb94968f86658c00ec339c2c4a278f0a29149"}],"roles/ceph/tasks/ceph-health.yaml":[{"author":{"_account_id":6816,"name":"Jesse Pretorius","email":"jesse@odyssey4.me","username":"jesse-pretorius"},"change_message_id":"49a2b21399b472911aa3009e28b7f25323dfc454","unresolved":true,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"    - name: Get ceph health"},{"line_number":20,"context_line":"      become: true"},{"line_number":21,"context_line":"      shell: \"cephadm shell -- {{ ceph_cluster_name.stdout }} health\""},{"line_number":22,"context_line":"      register: ceph_health"},{"line_number":23,"context_line":""},{"line_number":24,"context_line":"    - name: Check ceph health"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"e66f49d4_a82b49c9","line":21,"updated":"2023-04-13 12:02:32.000000000","message":"@Francesco, @Upgrades Please check my logic.\n\nIn the 16.2-\u003e17.1 we first update the undercloud\u0027s repositories and update validations, then do pre-upgrade validations.\n\nIf this particular validation is used in pre-upgrade validations then is cephadm already there, or does the transition to cephadm only happen later?\n\nIf the transition is later, and this validation is used in the pre-upgrade checks, then we would need to have a check for the presence of cephadm and fall back to the container use (podman only).","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"},{"author":{"_account_id":6816,"name":"Jesse Pretorius","email":"jesse@odyssey4.me","username":"jesse-pretorius"},"change_message_id":"0dd5d6ff266573a18d713e3826c379567691e0da","unresolved":false,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"    - name: Get ceph health"},{"line_number":20,"context_line":"      become: true"},{"line_number":21,"context_line":"      shell: \"cephadm shell -- {{ ceph_cluster_name.stdout }} health\""},{"line_number":22,"context_line":"      register: ceph_health"},{"line_number":23,"context_line":""},{"line_number":24,"context_line":"    - name: Check ceph health"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"eeceea5b_85d30d18","line":21,"in_reply_to":"9e0e05fa_1b242087","updated":"2023-04-14 11:28:53.000000000","message":"Done","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"d91b8fbc91bc1bb5a2b9572dbeeefb0426de2cdf","unresolved":true,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"    - name: Get ceph health"},{"line_number":20,"context_line":"      become: true"},{"line_number":21,"context_line":"      shell: \"cephadm shell -- {{ ceph_cluster_name.stdout }} health\""},{"line_number":22,"context_line":"      register: ceph_health"},{"line_number":23,"context_line":""},{"line_number":24,"context_line":"    - name: Check ceph health"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"f3802cfe_8f026118","line":21,"in_reply_to":"e66f49d4_a82b49c9","updated":"2023-04-13 21:43:07.000000000","message":"Right, I think you had a good point. I really depends when the pre-upgrade validation are run.\nIf you plan to: \n\n1. Upgrade the undercloud\n2. Upgrade \u0026\u0026 Adopt Ceph\n3. Run the pre-upgrade validations\n\nThen cephadm will be already there, and it\u0027s ok rely on that command to get the status of the cluster (`sudo cephadm shell -- ceph health` should be enough).\nHowever, if the pre-upgrade validations are used before step #2 (which would make sense as you want to check the status of the Ceph cluster before going through the upgrade procedure), then `cephadm` won\u0027t be there (the cluster is still Ceph 4 and we should rely on the old command).\nI\u0027m ok about removing the docker references and the `set_fact` on the container_client (it has been used for corner cases during 13 to 16), but moving from `podman exec ceph-mon-controller-x ceph --cluster health` to the cephadm based command really depends on the order of the steps mentioned above.","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"},{"author":{"_account_id":6816,"name":"Jesse Pretorius","email":"jesse@odyssey4.me","username":"jesse-pretorius"},"change_message_id":"849b16e7277e518787f4b8aba4c5fdb3d97eafff","unresolved":true,"context_lines":[{"line_number":18,"context_line":""},{"line_number":19,"context_line":"    - name: Get ceph health"},{"line_number":20,"context_line":"      become: true"},{"line_number":21,"context_line":"      shell: \"cephadm shell -- {{ ceph_cluster_name.stdout }} health\""},{"line_number":22,"context_line":"      register: ceph_health"},{"line_number":23,"context_line":""},{"line_number":24,"context_line":"    - name: Check ceph health"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"9e0e05fa_1b242087","line":21,"in_reply_to":"f3802cfe_8f026118","updated":"2023-04-14 11:28:34.000000000","message":"According to https://opendev.org/openstack/tripleo-validations/src/commit/447bd2f9f84408f5a3476e564994e4de7a16c7be/playbooks/ceph-health.yaml#L10-L12 this check isn\u0027t in the pre-upgrade group... so I guess we don\u0027t need to be concerned in this patch what the upgrade ordering is.\n\nIf this check is added to the pre-upgrade group, then that same patch would need to also cater for the situation mentioned above.\n\nSorry for the noise!","commit_id":"547dba2fd397d9ca8dafc602b1dee90b787ac5c8"}]}
