)]}'
{".zuul.yaml":[{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"ccc593d1d4226c6b6cfdb4eeae0fa8bafbbf9055","unresolved":true,"context_lines":[{"line_number":134,"context_line":"        TEST_MASTER: true"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"- job:"},{"line_number":137,"context_line":"    name: devstack-plugin-ceph-tempest-py3-cephadm"},{"line_number":138,"context_line":"    parent: tempest-full-py3"},{"line_number":139,"context_line":"    description: |"},{"line_number":140,"context_line":"      Integration tests that runs with the ceph devstack plugin and py3."}],"source_content_type":"text/x-yaml","patch_set":72,"id":"78d14571_b77edab7","line":137,"updated":"2022-06-06 15:35:39.000000000","message":"can\u0027t this job inherit from devstack-plugin-ceph-tempest-py3-base, redefining just a minimal amount of variables, and just be called devstack-plugin-ceph-tempest-cephadm ?","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"1dcd9ab8d4ba79bb25a00022f8c6a312cfe2accc","unresolved":true,"context_lines":[{"line_number":134,"context_line":"        TEST_MASTER: true"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"- job:"},{"line_number":137,"context_line":"    name: devstack-plugin-ceph-tempest-py3-cephadm"},{"line_number":138,"context_line":"    parent: tempest-full-py3"},{"line_number":139,"context_line":"    description: |"},{"line_number":140,"context_line":"      Integration tests that runs with the ceph devstack plugin and py3."}],"source_content_type":"text/x-yaml","patch_set":72,"id":"cc83f5c0_9e607f42","line":137,"in_reply_to":"20f1df84_798fa120","updated":"2022-06-06 20:14:55.000000000","message":"I don\u0027t see the logs of build 67, do you remember the error?","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"7fe8628521da7ba09fb6573d09f96521de2e3f40","unresolved":true,"context_lines":[{"line_number":134,"context_line":"        TEST_MASTER: true"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"- job:"},{"line_number":137,"context_line":"    name: devstack-plugin-ceph-tempest-py3-cephadm"},{"line_number":138,"context_line":"    parent: tempest-full-py3"},{"line_number":139,"context_line":"    description: |"},{"line_number":140,"context_line":"      Integration tests that runs with the ceph devstack plugin and py3."}],"source_content_type":"text/x-yaml","patch_set":72,"id":"20f1df84_798fa120","line":137,"in_reply_to":"78d14571_b77edab7","updated":"2022-06-06 16:41:32.000000000","message":"I tried it in my first attempt [1] but it didn\u0027t work.\nDo you have suggestions to make 67 working?\n\n\n[1] https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/826484/67..72/.zuul.yaml","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"68fe4bf7eda224d5e66e055f8bcf1fb9c0ead703","unresolved":true,"context_lines":[{"line_number":134,"context_line":"        TEST_MASTER: true"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"- job:"},{"line_number":137,"context_line":"    name: devstack-plugin-ceph-tempest-py3-cephadm"},{"line_number":138,"context_line":"    parent: tempest-full-py3"},{"line_number":139,"context_line":"    description: |"},{"line_number":140,"context_line":"      Integration tests that runs with the ceph devstack plugin and py3."}],"source_content_type":"text/x-yaml","patch_set":72,"id":"e2a37428_38680fbb","line":137,"in_reply_to":"cc83f5c0_9e607f42","updated":"2022-06-07 06:12:45.000000000","message":"no, retrying to push the two jobs (basic + multinode) step by step.\nI\u0027m starting (PS73) with the base job.\nLet\u0027s see the CI in action.","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"8d2cc9d8abef19683bdfcd1510e515433392ee38","unresolved":false,"context_lines":[{"line_number":134,"context_line":"        TEST_MASTER: true"},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"- job:"},{"line_number":137,"context_line":"    name: devstack-plugin-ceph-tempest-py3-cephadm"},{"line_number":138,"context_line":"    parent: tempest-full-py3"},{"line_number":139,"context_line":"    description: |"},{"line_number":140,"context_line":"      Integration tests that runs with the ceph devstack plugin and py3."}],"source_content_type":"text/x-yaml","patch_set":72,"id":"e01ef3be_5e85d4b4","line":137,"in_reply_to":"e2a37428_38680fbb","updated":"2022-06-17 08:06:26.000000000","message":"only one test for devstack-plugin-ceph-tempest-py3-base failed in PS73 with a timeout (test_snapshot_create_delete_with_volume_in_use) and devstack-plugin-ceph-tempest-py3-cephadm is passing. I\u0027ll submit a new patch set with a couple of env vars that doesn\u0027t require to be redefined and changing the job name to the one proposed by Luigi.","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"ccc593d1d4226c6b6cfdb4eeae0fa8bafbbf9055","unresolved":true,"context_lines":[{"line_number":167,"context_line":"      tempest_test_blacklist: \u0027{{ ansible_user_dir }}/{{ zuul.projects[\"opendev.org/openstack/devstack-plugin-ceph\"].src_dir }}/tempest_skiplist.txt\u0027"},{"line_number":168,"context_line":""},{"line_number":169,"context_line":"- job:"},{"line_number":170,"context_line":"    name: devstack-plugin-ceph-multinode-tempest-py3-cephadm"},{"line_number":171,"context_line":"    parent: tempest-multinode-full-py3"},{"line_number":172,"context_line":"    description: |"},{"line_number":173,"context_line":"      Integration tests that runs the ceph device plugin across multiple"}],"source_content_type":"text/x-yaml","patch_set":72,"id":"0fe853a4_915c02d9","line":170,"updated":"2022-06-06 15:35:39.000000000","message":"ditto here (inheriting from devstack-plugin-ceph-multinode-tempest-py3, naming it devstack-plugin-ceph-multinode-tempest-cephadm)","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"5ca41c56ff1d72247af6bd6100f0a33d73c9695a","unresolved":false,"context_lines":[{"line_number":167,"context_line":"      tempest_test_blacklist: \u0027{{ ansible_user_dir }}/{{ zuul.projects[\"opendev.org/openstack/devstack-plugin-ceph\"].src_dir }}/tempest_skiplist.txt\u0027"},{"line_number":168,"context_line":""},{"line_number":169,"context_line":"- job:"},{"line_number":170,"context_line":"    name: devstack-plugin-ceph-multinode-tempest-py3-cephadm"},{"line_number":171,"context_line":"    parent: tempest-multinode-full-py3"},{"line_number":172,"context_line":"    description: |"},{"line_number":173,"context_line":"      Integration tests that runs the ceph device plugin across multiple"}],"source_content_type":"text/x-yaml","patch_set":72,"id":"9d5a08b2_0daabb62","line":170,"in_reply_to":"0fe853a4_915c02d9","updated":"2022-06-17 08:24:19.000000000","message":"Done","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":53,"context_line":"      The ceph cluster is deployed using cephadm"},{"line_number":54,"context_line":"    vars:"},{"line_number":55,"context_line":"      tempest_concurrency: 1"},{"line_number":56,"context_line":"      devstack_localrc:"},{"line_number":57,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":58,"context_line":"        CEPHADM_SAVE_RESOURCES: true"},{"line_number":59,"context_line":"        CEPHADM_DEV_OSD: true"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"e19511b9_0a74489b","line":56,"updated":"2022-06-20 16:24:33.000000000","message":"See below for some comments","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":false,"context_lines":[{"line_number":53,"context_line":"      The ceph cluster is deployed using cephadm"},{"line_number":54,"context_line":"    vars:"},{"line_number":55,"context_line":"      tempest_concurrency: 1"},{"line_number":56,"context_line":"      devstack_localrc:"},{"line_number":57,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":58,"context_line":"        CEPHADM_SAVE_RESOURCES: true"},{"line_number":59,"context_line":"        CEPHADM_DEV_OSD: true"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"bd72b68e_7f624472","line":56,"in_reply_to":"e19511b9_0a74489b","updated":"2022-06-20 21:01:07.000000000","message":"Ack","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":158,"context_line":"    vars:"},{"line_number":159,"context_line":"      devstack_localrc:"},{"line_number":160,"context_line":"        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false"},{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"d2e68716_801b3207","line":161,"updated":"2022-06-20 16:24:33.000000000","message":"why is this variable needed here but not for devstack-plugin-ceph-tempest-cephadm ?","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":false,"context_lines":[{"line_number":158,"context_line":"    vars:"},{"line_number":159,"context_line":"      devstack_localrc:"},{"line_number":160,"context_line":"        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false"},{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"35c70baf_cd3c7699","line":161,"in_reply_to":"d2e68716_801b3207","updated":"2022-06-20 21:01:07.000000000","message":"imo this can be removed, it doesn\u0027t make sense in ca cephadm driven deployment where the container image/tag should be specified. However, let\u0027s keep the ceph_version_as_a_parameter for a follow up review.\n/me removing this var from the job, thanks for pointing that out.","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":160,"context_line":"        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false"},{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"f3440885_034e46b8","line":163,"updated":"2022-06-20 16:24:33.000000000","message":"This is the default, please remove it","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":false,"context_lines":[{"line_number":160,"context_line":"        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: false"},{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"5466548d_78a95556","line":163,"in_reply_to":"f3440885_034e46b8","updated":"2022-06-20 21:01:07.000000000","message":"Done","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"17579900_35dd30a2","line":164,"updated":"2022-06-20 16:24:33.000000000","message":"Can\u0027t this value be the default?","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":false,"context_lines":[{"line_number":161,"context_line":"        CEPH_RELEASE: \"pacific\""},{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"a888de10_9e3c6fdf","line":164,"in_reply_to":"17579900_35dd30a2","updated":"2022-06-20 21:01:07.000000000","message":"yes makes sense in this context.","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""}],"source_content_type":"text/x-yaml","patch_set":76,"id":"bb48b4a3_717b6eb5","line":165,"updated":"2022-06-20 16:24:33.000000000","message":"Can\u0027t this value be the bumped to 30 by default if CEPHADMIN_DEPLOY is true?","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"1cdd63287189972fb323bab2148e3469459b905f","unresolved":true,"context_lines":[{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""}],"source_content_type":"text/x-yaml","patch_set":76,"id":"dd9ad680_231acfa2","line":165,"in_reply_to":"5470c849_504951db","updated":"2022-06-20 21:16:57.000000000","message":"If a full tempest run when devstack-plugin-ceph is used requires a bigger size for the loopback disk, I\u0027d say let\u0027s have that size as default.\nAny other job which uses this plugin would need that.\nIt can always be reduced if needed.","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":true,"context_lines":[{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""}],"source_content_type":"text/x-yaml","patch_set":76,"id":"5470c849_504951db","line":165,"in_reply_to":"bb48b4a3_717b6eb5","updated":"2022-06-20 21:01:07.000000000","message":"This is a job specific value, needed to make sure tempest is happy, let\u0027s see if this logic can be moved in `plugin.sh` but I don\u0027t think we should change it in the cephadm code","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"96b197ed63cbd8578b513c34312217b2f41da9bb","unresolved":false,"context_lines":[{"line_number":162,"context_line":"        CEPHADM_DEPLOY: true"},{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""}],"source_content_type":"text/x-yaml","patch_set":76,"id":"166774a2_db0c7c3c","line":165,"in_reply_to":"dd9ad680_231acfa2","updated":"2022-06-21 07:01:37.000000000","message":"Done","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""},{"line_number":169,"context_line":"- project-template:"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"7304a6fa_fcd0f8d6","line":166,"updated":"2022-06-20 16:24:33.000000000","message":"Was it needed for debugging purposes? Can\u0027t it be enabled automatically when needed using some other environmental condition?","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":true,"context_lines":[{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""},{"line_number":169,"context_line":"- project-template:"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"8e1153b7_67340252","line":166,"in_reply_to":"7304a6fa_fcd0f8d6","updated":"2022-06-20 21:01:07.000000000","message":"This parameter is not not here for debugging purposes: when the ceph cluster is deployed, in order to save resources (mostly mem resources) consumed by cephadm in CI, we can disable it (we have a similar pattern in TripleO).\nIt makes sense in a scenario where everything is co-located, but if you\u0027re deploying a devstack environment for dev purposes (hence outside of CI), it\u0027s useful interact with the ceph cluster through the orch interface/cli.\nI\u0027m going to set it to true by default, we can change the behavior as long as we need it.","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"9589e851aabaa560a8b13e92035c4d2293310b89","unresolved":false,"context_lines":[{"line_number":163,"context_line":"        CEPHADM_DEV_OSD: true"},{"line_number":164,"context_line":"        TARGET_DEV_OSD_DIR: /opt/stack"},{"line_number":165,"context_line":"        CEPH_LOOPBACK_DISK_SIZE: 30G"},{"line_number":166,"context_line":"        CEPHADM_SAVE_RESOURCES: True"},{"line_number":167,"context_line":"      tempest_concurrency: 1"},{"line_number":168,"context_line":""},{"line_number":169,"context_line":"- project-template:"}],"source_content_type":"text/x-yaml","patch_set":76,"id":"6296f38b_ba1be43e","line":166,"in_reply_to":"8e1153b7_67340252","updated":"2022-06-20 21:01:53.000000000","message":"Done","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"}],"/PATCHSET_LEVEL":[{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"734f060f87ed29634dd23a1f3169680ffa65a306","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":10,"id":"e64c652c_bd4473d9","updated":"2022-02-09 18:41:31.000000000","message":"still wip","commit_id":"17a109b7d6e7a979c0b6e64fcf2c364a9cdadb80"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"eecdcbf40de88f9f81fd6c84c64c53f0709930a6","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":12,"id":"fb762e12_d3423f3d","updated":"2022-02-16 15:58:13.000000000","message":"Still WIP","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":31324,"name":"Eliad Cohen","email":"eliadcohen@gmail.com","username":"eliadcohen"},"change_message_id":"455dad5dba6c9c888d0723622aa8493afbb91c23","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"5cdc2096_43811e18","updated":"2022-03-10 18:50:16.000000000","message":"Hope this helps, hope I understood the need","commit_id":"3f4e059d704fb41a02907364a7d194b77f6ed74c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"2225f2b774c00f42e0b4d28f5eb1075781d920a8","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":36,"id":"39b034fe_4385887b","updated":"2022-03-21 10:01:08.000000000","message":"recheck","commit_id":"3c5f6707f24f7f7e0e7db3234d4a29317d813e07"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"af32cef0dec098f861c106e6666e54e5edc5b6df","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":37,"id":"2d4b7827_3132677a","updated":"2022-03-21 18:16:15.000000000","message":"I quickly went through the CephFS related code. Looking great! Minor comments","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"e1ed9470d8238b98c063c2fd722c8519f4075b9f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":39,"id":"bbc00a94_63b6b768","updated":"2022-03-22 14:02:45.000000000","message":"Thanks, Francesco! The CephFS and ganesha deployments look good to me.","commit_id":"dbca415cb8962a353c6256ced010cfaf33db9778"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"e72da9cc54238709de6ee247b9af0ed3769c9ac9","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":40,"id":"81f60ac7_17b4d336","updated":"2022-03-28 07:41:20.000000000","message":"recheck","commit_id":"144a00c11870d934fba6c7960bdabd467cd17b34"},{"author":{"_account_id":9303,"name":"Abhishek Kekane","email":"akekane@redhat.com","username":"abhishekkekane"},"change_message_id":"b09a1f915cda2f1877296d5a986fc8c5dad1a1f6","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":60,"id":"ad5f93d1_7640743d","updated":"2022-04-19 07:44:27.000000000","message":"Glance multistore side changes looks good to me, just one question are we going to document new local.conf parameters somewhere?\n\nLike I have introduced $GLANCE_ENABLE_MULTIPLE_STORES which needs to be set True to configure multiple stores.","commit_id":"0edfd187a4492de8bf1448e688ca5643a0b64aa0"},{"author":{"_account_id":9303,"name":"Abhishek Kekane","email":"akekane@redhat.com","username":"abhishekkekane"},"change_message_id":"a63397adc63c99c280cd6214d75759a44781aa13","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":60,"id":"78881e99_94fa1773","in_reply_to":"06ad28cc_8f58a1c5","updated":"2022-04-19 14:34:21.000000000","message":"Ack, I will take care of documenting the change.","commit_id":"0edfd187a4492de8bf1448e688ca5643a0b64aa0"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"5b237654c0c7850750e8d56d12d61b3cfcc8a815","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":60,"id":"06ad28cc_8f58a1c5","in_reply_to":"ad5f93d1_7640743d","updated":"2022-04-19 08:01:14.000000000","message":"Yeah, as GLANCE_ENABLE_MULTIPLE_STORES is a new parameter I guess it\u0027s worth having some documentation explaining how it can be used. This change can go either on a separated (follow up) review, or you can add it within the context of the glance proposed changes [1] [2] [3] (and we can reference them here I guess).\nI\u0027m pretty sure [1][2][3] will land before this patch as we\u0027re still enabling testing and make it work w/ manila cephfs native [4] (we have a few tempest failures).\nwdyt? would you just add some doc on the glance changes [2]? It\u0027s the same repo so when we rebase this patch we\u0027re getting the doc changes as well.\n\n[1] https://review.opendev.org/c/openstack/devstack/+/741654\n[2] https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/741801/\n[3] https://review.opendev.org/c/openstack/devstack/+/741802\n[4] https://review.opendev.org/c/openstack/manila-tempest-plugin/+/834220","commit_id":"0edfd187a4492de8bf1448e688ca5643a0b64aa0"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"761f7754d6a014b29b4497bd5466bc8cf92d81fc","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":64,"id":"9741b216_83175e19","updated":"2022-05-11 08:31:43.000000000","message":"Last PS aligns this code w/ [1]\n\n[1] https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/782624","commit_id":"a1fb7180264e086e8d59dbd89d8f55e2773a88c5"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b6c517ce7653889f8bb556aedc96a9cc5ad2e650","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":64,"id":"905489a9_40b0dbbd","updated":"2022-05-16 09:38:47.000000000","message":"recheck","commit_id":"a1fb7180264e086e8d59dbd89d8f55e2773a88c5"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"3bf5ec833d95b155ccfd03c8c638e37dbdd5d53c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":65,"id":"53a70497_72304b68","updated":"2022-05-17 10:08:28.000000000","message":"recheck","commit_id":"38acb43f7297149c35d058acb4185ddade984f66"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"2084c16c91da828347b45290d06823db1f11deb0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":66,"id":"2df2d1ef_d30d046b","updated":"2022-06-06 09:22:06.000000000","message":"This change has, at this point, all the elements to have a ceph cluster deployed by cephadm.\nSince this code is growing a lot, if there\u0027s enough quorum I would like to move forward with this review and merge it.\nNote that:\n\n1. cephadm isn\u0027t the default way used by the devstack CI, but we need CEPHADM_DEPLOY\u003dTrue to enable this way of deploying Ceph\n\n2. As a follow up of this patch, we can propose a new set of jobs (DNM jobs) to test this scenario in CI.\n\n3. When the CI jobs are green, stable, and we know that everything is covered, we can propose a default change and put the cephadm jobs as \"Voting\".\n\nAlso, having this code merged helps the manila team to move forward with the development of the ceph-nfs driver change. \nThis driver change is required to think about making the switch to cephadm in the future.","commit_id":"28f9afd4814e8a176fc1a18fa53e59c366eeaa9d"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"1b5f05e3feae30e0d17bc99ab23f3292e036a1ca","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":66,"id":"fd033801_5dc671fe","in_reply_to":"2df2d1ef_d30d046b","updated":"2022-06-06 09:27:31.000000000","message":"Any reason for not adding at least a few non-voting and/or experimental jobs to this review? It would make sense to make sure we are not breaking anything between the change itself and a future follow-up change with the jobs. We would know the results were the expected ones in this same review.","commit_id":"28f9afd4814e8a176fc1a18fa53e59c366eeaa9d"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"aac04f8ee794247dc9c34841f70b273d48f5465c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":66,"id":"023442be_308b1ef6","in_reply_to":"fd033801_5dc671fe","updated":"2022-06-06 09:35:49.000000000","message":"+1, I\u0027ll propose a follow up patch based on [1], while I guess manila has to do the same (non-voting jobs based on cephadm) on manila-tempest-plugin (like [2])\n\n[1] https://review.opendev.org/c/openstack/devstack-plugin-ceph/+/834223\n[2] https://review.opendev.org/c/openstack/manila-tempest-plugin/+/834220","commit_id":"28f9afd4814e8a176fc1a18fa53e59c366eeaa9d"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"665576c632046b920e6df4f0598321fb199aeace","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":72,"id":"b4f86281_8ad95bf1","updated":"2022-06-06 12:20:13.000000000","message":"recheck","commit_id":"e803150687bad08ddc968fc453d5e192f755a03e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c24e9b5c614279dc856d4c32a9dea093d31a4f0","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":73,"id":"04734c17_a437b49c","updated":"2022-06-07 12:57:46.000000000","message":"recheck","commit_id":"3e307a7f54039e2ec9112aec218abff0be3e94ad"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"73152ecf5847a429363afe6bf08f3aa4365b60c8","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":73,"id":"6b0236d8_0c700c8d","updated":"2022-06-13 11:36:45.000000000","message":"recheck","commit_id":"3e307a7f54039e2ec9112aec218abff0be3e94ad"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"e69e1924b9e443909e57ca8cbbf682426af8133d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":73,"id":"c4a82658_a8b4b7a5","updated":"2022-06-13 16:46:04.000000000","message":"recheck","commit_id":"3e307a7f54039e2ec9112aec218abff0be3e94ad"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":76,"id":"2b5d6bd7_8b5b9d7f","updated":"2022-06-20 16:24:33.000000000","message":"Before merging this, let\u0027s try to reduce the amount of parameters that a user needs to specify to enable the new deployment model. Ideally you should just need to set one variable. But for sure a few of them can be removed.","commit_id":"286021b89465034034f5f81bb952710e2b4244b2"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":78,"id":"41b63df7_14a9dbdb","updated":"2022-06-21 19:27:49.000000000","message":"Thanks Francesco and Victoria; some comments inline.. ","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"e506581beb7b5735608dee3f62c22d682d3eaf3c","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":81,"id":"404a978a_10116cc2","updated":"2022-06-22 11:00:06.000000000","message":"We\u0027re losing connection to the database; weirdly in jobs which shouldn\u0027t be affected by this change: \n\nhttps://zuul.opendev.org/t/openstack/build/25821326f6d440e9b3b0a3aedbbb642b/log/controller/logs/screen-c-api.txt?severity\u003d4 ","commit_id":"db626be7063f12d720fdd0ff9cca60eaf0fd656e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"d6164c472ccd977646d8f08127a009271f37445f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":81,"id":"824e34ac_7a730d08","updated":"2022-06-22 07:37:44.000000000","message":"recheck","commit_id":"db626be7063f12d720fdd0ff9cca60eaf0fd656e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"319465dfa37e0d586d306e461c174129845053bc","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":81,"id":"b5f19c9c_c46c09a8","updated":"2022-06-22 10:58:46.000000000","message":"recheck","commit_id":"db626be7063f12d720fdd0ff9cca60eaf0fd656e"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"54b429781c392e02bbd498c5286395cae9e753ec","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"2fa88a42_eb99c701","updated":"2022-06-22 16:01:22.000000000","message":"A note on the CephFS job failures:\n\n1) the ceph native job\u0027s failed a flaky test\n2) the ceph nfs job has failed IPv6 tests - there\u0027s an open bug: https://bugs.launchpad.net/manila/+bug/1940324 ","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"cfd4771c8685e73261257748f4e96ee1f35de718","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"c2c6e678_7b19117e","updated":"2022-06-22 15:58:19.000000000","message":"This LGTM; my only recommendation would be to move the new \"*-cephadm\" jobs to an experimental pipeline until they\u0027re stable. Right now, they\u0027re timing out - I think one strategy could be to split the job and run a fewer number of tests if it helps. ","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"2e2d8a62aad0323cca3ddd42382bf6f6e1a4a37f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"90debb65_e3d64a69","updated":"2022-06-23 08:03:21.000000000","message":"recheck","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"0cb0955a1a335e4455963bdada4a6eac3469786d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"46f39cf9_4fb5982a","in_reply_to":"44ce7318_230e10dc","updated":"2022-06-27 15:00:40.000000000","message":"I\u0027ve explicitly asked to include the job definitions in this patch, and I still think it\u0027s the correct way.","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"e6338dbaa62cc350c04c11515975f617101b7664","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"c9d2d9f2_e4e846e4","in_reply_to":"588a930d_97036a76","updated":"2022-06-23 15:36:39.000000000","message":"Hey Goutham, we didn\u0027t have a brainstorming session on the CI but this is a good point. As long as we have 1 ceph cluster deployed, it can be shared as long as keyrings and config are properly distributed to the other nodes.\nThis shouldn\u0027t be too much effort but we have to investigate how to do that.","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"d9b864a72e62d2cbadfe6ad61f986a53b5e297b7","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"44ce7318_230e10dc","in_reply_to":"b1a62513_9adbe016","updated":"2022-06-27 14:48:24.000000000","message":"Ack; yeah \"experimental\" jobs end up getting ignored \n\ni think we need help here to debug the jobs and explore alternatives to get them to pass. I don\u0027t see value in keeping the jobs at all in this patch if no one is going to fix them in the short term. \n\nWe could place the job definitions into another patch to get someone started on this if you think it\u0027s a good idea....","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"75ea8465e3ab407c2e2e7609c2ce5d995beeae7e","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"588a930d_97036a76","in_reply_to":"c2c6e678_7b19117e","updated":"2022-06-23 13:07:01.000000000","message":"I was also wondering if we considered running all the jobs multi-node where we have a dedicated single node ceph cluster serving a parallel devstack... dunno how much of an effort it is, i feel like it\u0027s worth doing as the resource consumption rises","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"fff3df687833d636249985cf391c9e6dcc49f49a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":82,"id":"b1a62513_9adbe016","in_reply_to":"c9d2d9f2_e4e846e4","updated":"2022-06-27 13:40:39.000000000","message":"Disagree on the experimental pipeline. You can also remove them, they are going to be forgotten. The point here is moving towards making the new jobs the default, which means they need to stay visible.","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"3bc06b8f76c776eabb8539862932346290b417bd","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":83,"id":"b75a9c1f_ba0b3ff7","updated":"2022-06-27 17:29:13.000000000","message":"LGTM; thanks Francesco. ","commit_id":"bba01bdd2f7c50635bb7656e8c043064b422e085"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"dfba5c790e40b04d82643db394997ffa977f9e9f","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":83,"id":"d2570872_59fa760a","updated":"2022-06-29 16:43:27.000000000","message":"Lets get this in; and keep working on it! Thanks Francesco, Victoria and everyone!","commit_id":"bba01bdd2f7c50635bb7656e8c043064b422e085"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b1b6f15cf6a2abcfb6dbb0a2ec132704a231dd87","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":83,"id":"07efb368_6604efbb","updated":"2022-06-30 08:07:22.000000000","message":"recheck","commit_id":"bba01bdd2f7c50635bb7656e8c043064b422e085"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"9d5205a7dc7e724c1a399b3f3d198a1df6ab7981","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":83,"id":"0b9e44fb_fede7022","updated":"2022-06-29 19:04:02.000000000","message":"recheck\n\ndevstack patch that this change depends on has merged","commit_id":"bba01bdd2f7c50635bb7656e8c043064b422e085"}],"devstack/lib/cephadm":[{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"eaa70d3bd3af9eb8c2f811969ca048ecc1bcbbfa","unresolved":true,"context_lines":[{"line_number":187,"context_line":"# Install ceph: create a loopback device to be used as osd"},{"line_number":188,"context_line":"function dd_ceph (){"},{"line_number":189,"context_line":" sudo dd if\u003d/dev/zero of\u003d/var/lib/ceph-osd.img bs\u003d1 count\u003d0 seek\u003d7G"},{"line_number":190,"context_line":" sudo losetup /dev/loop2 /var/lib/ceph-osd.img"},{"line_number":191,"context_line":" sudo pvcreate  /dev/loop2"},{"line_number":192,"context_line":" sudo vgcreate ceph_vg /dev/loop2"},{"line_number":193,"context_line":" sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg"}],"source_content_type":"application/x-shellscript","patch_set":1,"id":"7f388a29_3fa967af","line":190,"range":{"start_line":190,"start_character":14,"end_line":190,"end_character":24},"updated":"2022-01-26 15:21:40.000000000","message":"Will want to use \"losetup -f\" and consume the device name it prints out rather than specifying /dev/loop2.","commit_id":"875dee68923bdb761c0da3f649fa3623637fa990"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"1dfa9d7c636cf815f242ef2c781fca99490042ea","unresolved":false,"context_lines":[{"line_number":187,"context_line":"# Install ceph: create a loopback device to be used as osd"},{"line_number":188,"context_line":"function dd_ceph (){"},{"line_number":189,"context_line":" sudo dd if\u003d/dev/zero of\u003d/var/lib/ceph-osd.img bs\u003d1 count\u003d0 seek\u003d7G"},{"line_number":190,"context_line":" sudo losetup /dev/loop2 /var/lib/ceph-osd.img"},{"line_number":191,"context_line":" sudo pvcreate  /dev/loop2"},{"line_number":192,"context_line":" sudo vgcreate ceph_vg /dev/loop2"},{"line_number":193,"context_line":" sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg"}],"source_content_type":"application/x-shellscript","patch_set":1,"id":"606dfc9d_8f2d1ef3","line":190,"range":{"start_line":190,"start_character":14,"end_line":190,"end_character":24},"in_reply_to":"7f388a29_3fa967af","updated":"2022-01-27 13:27:04.000000000","message":"Ack","commit_id":"875dee68923bdb761c0da3f649fa3623637fa990"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"efd80b1b318081ee3204fdbd2fc2bd4ef848f6dc","unresolved":true,"context_lines":[{"line_number":192,"context_line":""},{"line_number":193,"context_line":"# Install ceph: create a loopback device to be used as osd"},{"line_number":194,"context_line":"function dd_ceph {"},{"line_number":195,"context_line":"    free_device\u003d$(losetup -f)"},{"line_number":196,"context_line":"    sudo dd if\u003d/dev/zero of\u003d/var/lib/ceph-osd.img bs\u003d1 count\u003d0 seek\u003d7G"},{"line_number":197,"context_line":"    sudo losetup $free_device /var/lib/ceph-osd.img"},{"line_number":198,"context_line":"    sudo pvcreate  $free_device"}],"source_content_type":"application/x-shellscript","patch_set":5,"id":"5c4844f4_3e8a1936","line":195,"updated":"2022-01-27 15:33:57.000000000","message":"Sorry, this still races between line 195 and 197 if someone else is manipulating loopback devices -- you want something more like\n    DEV\u003d`sudo losetup -f --show /var/lib/ceph-osd.img\"\n    sudo pvcreate $DEV\n\netc","commit_id":"0f95a9553134a6debe2c64e2f32681393cf4cff7"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"0e0c9fdd82310ccc5918c5027ce1e5b7b638ba1c","unresolved":true,"context_lines":[{"line_number":192,"context_line":""},{"line_number":193,"context_line":"# Install ceph: create a loopback device to be used as osd"},{"line_number":194,"context_line":"function dd_ceph {"},{"line_number":195,"context_line":"    free_device\u003d$(losetup -f)"},{"line_number":196,"context_line":"    sudo dd if\u003d/dev/zero of\u003d/var/lib/ceph-osd.img bs\u003d1 count\u003d0 seek\u003d7G"},{"line_number":197,"context_line":"    sudo losetup $free_device /var/lib/ceph-osd.img"},{"line_number":198,"context_line":"    sudo pvcreate  $free_device"}],"source_content_type":"application/x-shellscript","patch_set":5,"id":"a151ccfc_7b376a9f","line":195,"in_reply_to":"5c4844f4_3e8a1936","updated":"2022-01-27 15:40:41.000000000","message":"Like this?\n\n    osd_dev\u003d$(sudo losetup -f --show /var/lib/ceph-osd.img)\n    sudo pvcreate  $osd_dev\n    sudo vgcreate ceph_vg $osd_dev\n    sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg\n    DEVICES+\u003d(\"ceph_lv_data\")","commit_id":"0f95a9553134a6debe2c64e2f32681393cf4cff7"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"57ce3302fdf18b0bbce4b2c2c823806beb2d0ce5","unresolved":false,"context_lines":[{"line_number":192,"context_line":""},{"line_number":193,"context_line":"# Install ceph: create a loopback device to be used as osd"},{"line_number":194,"context_line":"function dd_ceph {"},{"line_number":195,"context_line":"    free_device\u003d$(losetup -f)"},{"line_number":196,"context_line":"    sudo dd if\u003d/dev/zero of\u003d/var/lib/ceph-osd.img bs\u003d1 count\u003d0 seek\u003d7G"},{"line_number":197,"context_line":"    sudo losetup $free_device /var/lib/ceph-osd.img"},{"line_number":198,"context_line":"    sudo pvcreate  $free_device"}],"source_content_type":"application/x-shellscript","patch_set":5,"id":"6b2a2bd6_dd89a359","line":195,"in_reply_to":"a151ccfc_7b376a9f","updated":"2022-01-27 16:47:11.000000000","message":"Yes, looks great.","commit_id":"0f95a9553134a6debe2c64e2f32681393cf4cff7"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"cf9d9dfb7440b595d7ba70a685fa6aac3c1374eb","unresolved":true,"context_lines":[{"line_number":97,"context_line":"# Pre-install ceph: install required dependencies"},{"line_number":98,"context_line":"function install_deps {"},{"line_number":99,"context_line":"    install_package jq"},{"line_number":100,"context_line":"    # install_package podman"},{"line_number":101,"context_line":"}"},{"line_number":102,"context_line":""},{"line_number":103,"context_line":"# Pre-install ceph: show cluster status"}],"source_content_type":"application/x-shellscript","patch_set":7,"id":"5d31e691_74afd679","line":100,"updated":"2022-01-27 16:59:55.000000000","message":"Need to look for a decent way to get docker/podman, name and availability of the package vary depending on the distro being used","commit_id":"cc8dce1c52955bf4b58e985f6a9171af07a27fab"},{"author":{"_account_id":31324,"name":"Eliad Cohen","email":"eliadcohen@gmail.com","username":"eliadcohen"},"change_message_id":"455dad5dba6c9c888d0723622aa8493afbb91c23","unresolved":true,"context_lines":[{"line_number":97,"context_line":"# Pre-install ceph: install required dependencies"},{"line_number":98,"context_line":"function install_deps {"},{"line_number":99,"context_line":"    install_package jq"},{"line_number":100,"context_line":"    # install_package podman"},{"line_number":101,"context_line":"}"},{"line_number":102,"context_line":""},{"line_number":103,"context_line":"# Pre-install ceph: show cluster status"}],"source_content_type":"application/x-shellscript","patch_set":7,"id":"c45c4181_f11f5269","line":100,"in_reply_to":"5d31e691_74afd679","updated":"2022-03-10 18:50:16.000000000","message":"Docker has this: https://get.docker.com/\nAlso in their docs - \ncurl -fsSL https://get.docker.com -o get-docker.sh\nsh get-docker.sh\n\nIs that what you meant?","commit_id":"cc8dce1c52955bf4b58e985f6a9171af07a27fab"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"102d274fdcb5efb8edca8de3559752aab104d62c","unresolved":false,"context_lines":[{"line_number":97,"context_line":"# Pre-install ceph: install required dependencies"},{"line_number":98,"context_line":"function install_deps {"},{"line_number":99,"context_line":"    install_package jq"},{"line_number":100,"context_line":"    # install_package podman"},{"line_number":101,"context_line":"}"},{"line_number":102,"context_line":""},{"line_number":103,"context_line":"# Pre-install ceph: show cluster status"}],"source_content_type":"application/x-shellscript","patch_set":7,"id":"4990689f_dc21b043","line":100,"in_reply_to":"b5cc8bf8_ffa7c84a","updated":"2022-03-14 22:24:44.000000000","message":"Resolving this comment now","commit_id":"cc8dce1c52955bf4b58e985f6a9171af07a27fab"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"6a072f2db02a9cae49f65b1033ef7fe4334b76e2","unresolved":true,"context_lines":[{"line_number":97,"context_line":"# Pre-install ceph: install required dependencies"},{"line_number":98,"context_line":"function install_deps {"},{"line_number":99,"context_line":"    install_package jq"},{"line_number":100,"context_line":"    # install_package podman"},{"line_number":101,"context_line":"}"},{"line_number":102,"context_line":""},{"line_number":103,"context_line":"# Pre-install ceph: show cluster status"}],"source_content_type":"application/x-shellscript","patch_set":7,"id":"b5cc8bf8_ffa7c84a","line":100,"in_reply_to":"c45c4181_f11f5269","updated":"2022-03-11 12:04:33.000000000","message":"Thanks Eliad! Yes, exactly what I needed. Still, will try to modify things to use podman since it ships natively with most distros now.","commit_id":"cc8dce1c52955bf4b58e985f6a9171af07a27fab"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"61fe7ccce16657bfc6bab427d178ea33aea657a3","unresolved":true,"context_lines":[{"line_number":370,"context_line":"    cluster\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":371,"context_line":"    if [ -n \"$cluster\" ]; then"},{"line_number":372,"context_line":"        sudo cephadm rm-cluster --zap-osds --fsid \"$FSID\" --force"},{"line_number":373,"context_line":"        delete_osd_dev"},{"line_number":374,"context_line":"        echo \"[CEPHADM] Cluster deleted\""},{"line_number":375,"context_line":"    fi"},{"line_number":376,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":10,"id":"c3e4f938_b3be26ae","line":373,"range":{"start_line":373,"start_character":8,"end_line":373,"end_character":22},"updated":"2022-02-10 12:38:14.000000000","message":"I don\u0027t need this line here, as you can use devices !\u003d *_dev, fixing in the next PS","commit_id":"17a109b7d6e7a979c0b6e64fcf2c364a9cdadb80"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"11c9a5dd4643799f7492036a601f0dfbb3cc2779","unresolved":false,"context_lines":[{"line_number":370,"context_line":"    cluster\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":371,"context_line":"    if [ -n \"$cluster\" ]; then"},{"line_number":372,"context_line":"        sudo cephadm rm-cluster --zap-osds --fsid \"$FSID\" --force"},{"line_number":373,"context_line":"        delete_osd_dev"},{"line_number":374,"context_line":"        echo \"[CEPHADM] Cluster deleted\""},{"line_number":375,"context_line":"    fi"},{"line_number":376,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":10,"id":"f0fa10be_cddfa62f","line":373,"range":{"start_line":373,"start_character":8,"end_line":373,"end_character":22},"in_reply_to":"c3e4f938_b3be26ae","updated":"2022-02-24 09:48:13.000000000","message":"Done","commit_id":"17a109b7d6e7a979c0b6e64fcf2c364a9cdadb80"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"32342033b48b7a416357bca8bb5d0fde921f7b1d","unresolved":true,"context_lines":[{"line_number":299,"context_line":"    # Two pools are generated by this action"},{"line_number":300,"context_line":"    # - $FSNAME.FSNAME.data"},{"line_number":301,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":302,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":303,"context_line":"        --keyring $KEYRING -- ceph orch apply mds \"$FSNAME\" \\"},{"line_number":304,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":305,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":306,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\" \\"},{"line_number":307,"context_line":"        --placement\u003d\"$HOSTNAME\""}],"source_content_type":"application/x-shellscript","patch_set":12,"id":"7a09a996_df65cd8d","line":304,"range":{"start_line":302,"start_character":0,"end_line":304,"end_character":31},"updated":"2022-02-23 17:19:47.000000000","message":"check if this is necessary, the line below is supposed to create mds daemon/s as well..","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"515b724cc01b993ee5daf80a7ffcec6d47c79dc2","unresolved":true,"context_lines":[{"line_number":299,"context_line":"    # Two pools are generated by this action"},{"line_number":300,"context_line":"    # - $FSNAME.FSNAME.data"},{"line_number":301,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":302,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":303,"context_line":"        --keyring $KEYRING -- ceph orch apply mds \"$FSNAME\" \\"},{"line_number":304,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":305,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":306,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\" \\"},{"line_number":307,"context_line":"        --placement\u003d\"$HOSTNAME\""}],"source_content_type":"application/x-shellscript","patch_set":12,"id":"ce99858f_682d9166","line":304,"range":{"start_line":302,"start_character":0,"end_line":304,"end_character":31},"in_reply_to":"7a09a996_df65cd8d","updated":"2022-02-24 08:18:03.000000000","message":"+1, when `ceph fs volume create ` is called, 2 mds daemons are created. We can problably remove that line and save 1 command.","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"9b854d9a210c846ff1ef68ffc9dbf02b5f243fc1","unresolved":false,"context_lines":[{"line_number":299,"context_line":"    # Two pools are generated by this action"},{"line_number":300,"context_line":"    # - $FSNAME.FSNAME.data"},{"line_number":301,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":302,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":303,"context_line":"        --keyring $KEYRING -- ceph orch apply mds \"$FSNAME\" \\"},{"line_number":304,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":305,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":306,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\" \\"},{"line_number":307,"context_line":"        --placement\u003d\"$HOSTNAME\""}],"source_content_type":"application/x-shellscript","patch_set":12,"id":"59ee1c0c_d593a595","line":304,"range":{"start_line":302,"start_character":0,"end_line":304,"end_character":31},"in_reply_to":"ce99858f_682d9166","updated":"2022-02-24 08:54:36.000000000","message":"Done","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"d7ecb5b86a0483cb31e306340fddb453016c2824","unresolved":true,"context_lines":[{"line_number":304,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":305,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":306,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\" \\"},{"line_number":307,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":308,"context_line":"}"},{"line_number":309,"context_line":""},{"line_number":310,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":12,"id":"4098bfde_86a7cacc","line":307,"range":{"start_line":307,"start_character":8,"end_line":307,"end_character":31},"updated":"2022-02-23 17:21:03.000000000","message":"this doesn\u0027t seem to need \"--placement\": https://docs.ceph.com/en/latest/cephfs/fs-volumes/#fs-volumes","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"5f12a58c106ddb28c9db5e91199708da1279fdd5","unresolved":false,"context_lines":[{"line_number":304,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":305,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":306,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\" \\"},{"line_number":307,"context_line":"        --placement\u003d\"$HOSTNAME\""},{"line_number":308,"context_line":"}"},{"line_number":309,"context_line":""},{"line_number":310,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":12,"id":"c7d31f1a_d7624e68","line":307,"range":{"start_line":307,"start_character":8,"end_line":307,"end_character":31},"in_reply_to":"4098bfde_86a7cacc","updated":"2022-02-24 08:57:23.000000000","message":"Done","commit_id":"ae18d31b8f27204340e5531e12f094ae33acaef3"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"a11a91a8de308f10d00423528a9fdb2e288f1542","unresolved":true,"context_lines":[{"line_number":29,"context_line":"ATTEMPTS\u003d30"},{"line_number":30,"context_line":"CONTAINER_IMAGE\u003d${CONTAINER_IMAGE:-\u0027quay.io/ceph/ceph:v16.2.7\u0027}"},{"line_number":31,"context_line":"DEVICES\u003d()"},{"line_number":32,"context_line":"FSID\u003d\"4b5c8c0a-ff60-454b-a1b4-9747aa737d19\""},{"line_number":33,"context_line":"KEY_EXPORT_DIR\u003d\"/etc/ceph\""},{"line_number":34,"context_line":"KEYS\u003d(\"client.openstack\") # at least the client.openstack default key should be created"},{"line_number":35,"context_line":"MIN_OSDS\u003d1"}],"source_content_type":"application/x-shellscript","patch_set":30,"id":"8e473236_f453b3d8","line":32,"range":{"start_line":32,"start_character":0,"end_line":32,"end_character":5},"updated":"2022-03-14 21:48:54.000000000","message":"It looks like it would be better to call \"uuidgen\" for this during deployment rather than always using the same fsid?\n\nThis seems likely to lead to hard to debug issues on repeated runs etc. in developer environments.","commit_id":"d7231c5eedc95dae79f90b510120c38d65933ae7"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"bd79a997410e783010f9b2286d63c9364151223a","unresolved":false,"context_lines":[{"line_number":29,"context_line":"ATTEMPTS\u003d30"},{"line_number":30,"context_line":"CONTAINER_IMAGE\u003d${CONTAINER_IMAGE:-\u0027quay.io/ceph/ceph:v16.2.7\u0027}"},{"line_number":31,"context_line":"DEVICES\u003d()"},{"line_number":32,"context_line":"FSID\u003d\"4b5c8c0a-ff60-454b-a1b4-9747aa737d19\""},{"line_number":33,"context_line":"KEY_EXPORT_DIR\u003d\"/etc/ceph\""},{"line_number":34,"context_line":"KEYS\u003d(\"client.openstack\") # at least the client.openstack default key should be created"},{"line_number":35,"context_line":"MIN_OSDS\u003d1"}],"source_content_type":"application/x-shellscript","patch_set":30,"id":"9be836e4_7432eaf2","line":32,"range":{"start_line":32,"start_character":0,"end_line":32,"end_character":5},"in_reply_to":"8e473236_f453b3d8","updated":"2022-03-14 22:12:30.000000000","message":"Agree, this should be generated in every run","commit_id":"d7231c5eedc95dae79f90b510120c38d65933ae7"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"af32cef0dec098f861c106e6666e54e5edc5b6df","unresolved":true,"context_lines":[{"line_number":317,"context_line":"function cephfs_config {"},{"line_number":318,"context_line":"    # Two pools are generated by this action"},{"line_number":319,"context_line":"    # - $FSNAME.FSNAME.data"},{"line_number":320,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":321,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":322,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":323,"context_line":"    if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} \u003d\u003d \u0027True\u0027 ]]; then"}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"6697270e_3b197344","line":320,"updated":"2022-03-21 18:16:15.000000000","message":"and deploys MDS daemons","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b670b1356f70e401f9fc4d1b9706ddca093667fa","unresolved":false,"context_lines":[{"line_number":317,"context_line":"function cephfs_config {"},{"line_number":318,"context_line":"    # Two pools are generated by this action"},{"line_number":319,"context_line":"    # - $FSNAME.FSNAME.data"},{"line_number":320,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":321,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":322,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":323,"context_line":"    if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} \u003d\u003d \u0027True\u0027 ]]; then"}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"7d8d6ec8_4d8ecbdb","line":320,"in_reply_to":"6697270e_3b197344","updated":"2022-03-21 20:06:35.000000000","message":"Done","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"af32cef0dec098f861c106e6666e54e5edc5b6df","unresolved":true,"context_lines":[{"line_number":321,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":322,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":323,"context_line":"    if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} \u003d\u003d \u0027True\u0027 ]]; then"},{"line_number":324,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":325,"context_line":"            --keyring $KEYRING -- ceph fs flag set enable_multiple true \\"},{"line_number":326,"context_line":"            --yes-i-really-mean-it"},{"line_number":327,"context_line":"        # Enable snapshots in CephFS."}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"661d7cdb_5d3c109b","line":324,"updated":"2022-03-21 18:16:15.000000000","message":"This is automatically enabled in new pacific(16.2.x) Ceph clusters\nhttps://docs.ceph.com/en/latest/releases/pacific/#cephfs-distributed-file-system\nSo I think you can remove this if you\u0027re testing only new pacific Ceph clusters.","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b670b1356f70e401f9fc4d1b9706ddca093667fa","unresolved":false,"context_lines":[{"line_number":321,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":322,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":323,"context_line":"    if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} \u003d\u003d \u0027True\u0027 ]]; then"},{"line_number":324,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":325,"context_line":"            --keyring $KEYRING -- ceph fs flag set enable_multiple true \\"},{"line_number":326,"context_line":"            --yes-i-really-mean-it"},{"line_number":327,"context_line":"        # Enable snapshots in CephFS."}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"d616309a_49ec20cc","line":324,"in_reply_to":"661d7cdb_5d3c109b","updated":"2022-03-21 20:06:35.000000000","message":"oh nice! Thanks for looking into it: given we have multiple_fs enabled by default, I\u0027m going to remove this part in the next PS.","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"af32cef0dec098f861c106e6666e54e5edc5b6df","unresolved":true,"context_lines":[{"line_number":324,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":325,"context_line":"            --keyring $KEYRING -- ceph fs flag set enable_multiple true \\"},{"line_number":326,"context_line":"            --yes-i-really-mean-it"},{"line_number":327,"context_line":"        # Enable snapshots in CephFS."},{"line_number":328,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":329,"context_line":"            --keyring $KEYRING -- ceph fs set $FSNAME allow_new_snaps true \\"},{"line_number":330,"context_line":"            --yes-i-really-mean-it"}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"f95c9581_9906b49a","line":327,"updated":"2022-03-21 18:16:15.000000000","message":"I think snapshots have been enabled by default for file systems since nautilus. Snapshots for multiple file systems are expected to work since pacific. Maybe you can remove the following too?","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b670b1356f70e401f9fc4d1b9706ddca093667fa","unresolved":false,"context_lines":[{"line_number":324,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":325,"context_line":"            --keyring $KEYRING -- ceph fs flag set enable_multiple true \\"},{"line_number":326,"context_line":"            --yes-i-really-mean-it"},{"line_number":327,"context_line":"        # Enable snapshots in CephFS."},{"line_number":328,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":329,"context_line":"            --keyring $KEYRING -- ceph fs set $FSNAME allow_new_snaps true \\"},{"line_number":330,"context_line":"            --yes-i-really-mean-it"}],"source_content_type":"application/x-shellscript","patch_set":37,"id":"b166153a_8d55a649","line":327,"in_reply_to":"f95c9581_9906b49a","updated":"2022-03-21 20:06:35.000000000","message":"Done","commit_id":"39de5141fdefe2a65ea1e0bf4e12de6398da3f32"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"4cdafcaf8d4737703e6a0475fed89cf67d28708a","unresolved":true,"context_lines":[{"line_number":49,"context_line":"FSNAME\u003d${FSNAME:-\u0027cephfs\u0027}"},{"line_number":50,"context_line":"NFS_PORT\u003d12345"},{"line_number":51,"context_line":"CEPHFS_CLIENT\u003d0"},{"line_number":52,"context_line":"CEPHFS_CLIENT_LOG\u003d\"/var/log/ceph-$CEPHFS_CLIENT_NAME.log\""},{"line_number":53,"context_line":"CEPHFS_CLIENT_NAME\u003d\"client.manila\""},{"line_number":54,"context_line":"CEPHFS_MULTIPLE_FILESYSTEMS\u003d${CEPHFS_MULTIPLE_FILESYSTEMS:-False}"},{"line_number":55,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":49,"id":"24b344ac_7a8f8d7c","line":52,"range":{"start_line":52,"start_character":33,"end_line":52,"end_character":52},"updated":"2022-04-05 22:27:55.000000000","message":"Doesn\u0027t line 53 need to come first here?","commit_id":"ad3c9d4b846fb9e947d0ce024fd9fd96751c1f77"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"ca09090529aa8ceb5073b365bd0bc9820e12cd03","unresolved":false,"context_lines":[{"line_number":49,"context_line":"FSNAME\u003d${FSNAME:-\u0027cephfs\u0027}"},{"line_number":50,"context_line":"NFS_PORT\u003d12345"},{"line_number":51,"context_line":"CEPHFS_CLIENT\u003d0"},{"line_number":52,"context_line":"CEPHFS_CLIENT_LOG\u003d\"/var/log/ceph-$CEPHFS_CLIENT_NAME.log\""},{"line_number":53,"context_line":"CEPHFS_CLIENT_NAME\u003d\"client.manila\""},{"line_number":54,"context_line":"CEPHFS_MULTIPLE_FILESYSTEMS\u003d${CEPHFS_MULTIPLE_FILESYSTEMS:-False}"},{"line_number":55,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":49,"id":"f61a553e_de5c6b6e","line":52,"range":{"start_line":52,"start_character":33,"end_line":52,"end_character":52},"in_reply_to":"24b344ac_7a8f8d7c","updated":"2022-04-06 06:08:04.000000000","message":"Done","commit_id":"ad3c9d4b846fb9e947d0ce024fd9fd96751c1f77"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"dd0ee139abe09a58de33ae13595b3271de6386c1","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"1b87e0f1_cf4f5c7b","line":498,"updated":"2022-04-13 10:01:38.000000000","message":"I guess this is WIP, but I think there may be some kind of timeout - or maybe not important, as this is not needed when you unstack, so not a big problem if it\u0027s stuck?","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"4f9044cf123be862845c4fef9b4238296d6ddf41","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"5218a4b7_a45a024e","line":498,"in_reply_to":"1b87e0f1_cf4f5c7b","updated":"2022-04-13 10:08:16.000000000","message":"We can fail if the cluster is not deleted on the first try, or we can implement a timeout","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"74254e515e7b13c4ae777f447f9f690f1a068f98","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"52f0c069_22ef5e68","line":498,"in_reply_to":"5218a4b7_a45a024e","updated":"2022-04-13 10:26:20.000000000","message":"Right, and I agree we should improve this function to make sure we\u0027re properly covering unstack/clean.\nI\u0027m wondering if this can be part of a follow up patch where we can focus on small changes as this code is growing a lot :D \nAnyway this is a good point, thanks Luigi for your input!","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":10459,"name":"Luigi Toscano","email":"ltoscano@redhat.com","username":"ltoscano"},"change_message_id":"6d439657d7ec5d3c33396d3d74b21c57eb579b3e","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"96318f2c_bda0a78f","line":498,"in_reply_to":"52f0c069_22ef5e68","updated":"2022-06-20 16:24:33.000000000","message":"Can you please add at least a FIXME here so that it\u0027s not forgotten?","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"8c70868e59575dbb1ba3011722dd15f6b53cf942","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"db1874b8_f677bcdb","line":498,"in_reply_to":"96318f2c_bda0a78f","updated":"2022-06-20 21:01:07.000000000","message":"I see there\u0027s a counter here to address a scenario where the cluster is not removed.\nI thought Vic already fixed it, and I don\u0027t have any issue with unstack.\n@Vic do we need a FIXME here? I remember a PS where you fixed the fsid w/ tr -d \\","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"6cc5a018628d1e42d41f9aefdf286c5cb55f6f8d","unresolved":true,"context_lines":[{"line_number":495,"context_line":"    fi"},{"line_number":496,"context_line":"    "},{"line_number":497,"context_line":"    CLUSTER_DELETED \u003d 1"},{"line_number":498,"context_line":"    while : ; do"},{"line_number":499,"context_line":"        CLUSTER_FSID\u003d$(sudo cephadm ls | jq \u0027.[]\u0027 | jq \u0027select(.name | test(\"^mon*\")).fsid\u0027)"},{"line_number":500,"context_line":"        if [ -n $CLUSTER_FSID ]; then"},{"line_number":501,"context_line":"            sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force"}],"source_content_type":"application/x-shellscript","patch_set":53,"id":"7df52bb5_1ca700a2","line":498,"in_reply_to":"db1874b8_f677bcdb","updated":"2022-06-27 13:28:56.000000000","message":"This is fixed now, added a timeout (L508 here), so no need to add a FIXME","commit_id":"6bae095c0c066ee7b7abe894a4dc8fafbd8021cf"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"c4415092e913503be0210bf05eb658be6b5b01ed","unresolved":true,"context_lines":[{"line_number":513,"context_line":"            cluster_deleted\u003d1"},{"line_number":514,"context_line":"            echo \"[CEPHADM] Cluster deleted\""},{"line_number":515,"context_line":"        fi"},{"line_number":516,"context_line":"        timeout--"},{"line_number":517,"context_line":""},{"line_number":518,"context_line":"        [[ \"$cluster_deleted\" -eq 0 \u0026\u0026 \"$timeout\" -eq 0 ]] \u0026\u0026 \\"},{"line_number":519,"context_line":"            echo \"[CEPHADM] Error deleting the cluster\" \u0026\u0026 exit 255"}],"source_content_type":"application/x-shellscript","patch_set":62,"id":"2b93995c_4b7a9299","line":516,"updated":"2022-05-06 08:28:33.000000000","message":"(( timeout-- ))","commit_id":"d3373220597820ea305b685109e653453c3c2393"},{"author":{"_account_id":6413,"name":"Victoria Martinez de la Cruz","email":"victoria@redhat.com","username":"vkmc"},"change_message_id":"8d2cc9d8abef19683bdfcd1510e515433392ee38","unresolved":false,"context_lines":[{"line_number":513,"context_line":"            cluster_deleted\u003d1"},{"line_number":514,"context_line":"            echo \"[CEPHADM] Cluster deleted\""},{"line_number":515,"context_line":"        fi"},{"line_number":516,"context_line":"        timeout--"},{"line_number":517,"context_line":""},{"line_number":518,"context_line":"        [[ \"$cluster_deleted\" -eq 0 \u0026\u0026 \"$timeout\" -eq 0 ]] \u0026\u0026 \\"},{"line_number":519,"context_line":"            echo \"[CEPHADM] Error deleting the cluster\" \u0026\u0026 exit 255"}],"source_content_type":"application/x-shellscript","patch_set":62,"id":"6289023d_47752b78","line":516,"in_reply_to":"2b93995c_4b7a9299","updated":"2022-06-17 08:06:26.000000000","message":"Done","commit_id":"d3373220597820ea305b685109e653453c3c2393"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":19,"context_line":""},{"line_number":20,"context_line":"# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT"},{"line_number":21,"context_line":"CEPH_PUB_KEY\u003d\"/etc/ceph/ceph.pub\""},{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"928616e6_9284b52a","line":22,"range":{"start_line":22,"start_character":0,"end_line":22,"end_character":6},"updated":"2022-06-21 19:27:49.000000000","message":"I would suggest renaming to a more specific variable name here.. CEPH_CONFIG perhaps?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":19,"context_line":""},{"line_number":20,"context_line":"# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT"},{"line_number":21,"context_line":"CEPH_PUB_KEY\u003d\"/etc/ceph/ceph.pub\""},{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"264dc309_a3cde453","line":22,"range":{"start_line":22,"start_character":0,"end_line":22,"end_character":6},"in_reply_to":"928616e6_9284b52a","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":21,"context_line":"CEPH_PUB_KEY\u003d\"/etc/ceph/ceph.pub\""},{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"81fc32d6_fd165ffc","line":24,"range":{"start_line":24,"start_character":0,"end_line":24,"end_character":7},"updated":"2022-06-21 19:27:49.000000000","message":"nit: CEPH_KEYRING","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":21,"context_line":"CEPH_PUB_KEY\u003d\"/etc/ceph/ceph.pub\""},{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"1cd65dd7_aa274a93","line":24,"range":{"start_line":24,"start_character":0,"end_line":24,"end_character":7},"in_reply_to":"81fc32d6_fd165ffc","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"edc4492d_f1085331","line":25,"range":{"start_line":25,"start_character":20,"end_line":25,"end_character":23},"updated":"2022-06-21 19:27:49.000000000","message":"\"lvm2\" can be specified as a system package: https://docs.openstack.org/devstack/latest/plugins.html#system-packages","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"84075746_a2b27c3a","line":25,"range":{"start_line":25,"start_character":26,"end_line":25,"end_character":33},"updated":"2022-06-21 19:27:49.000000000","message":"python3 is installed earlier in the stacking process: https://github.com/openstack/devstack/blob/44d07f300150f7297773a215031ea85cb1f5e205/inc/python#L456-L459\n\nWe can drop this","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"570c14c9c8b6a44dcb29d0e15f6dba8f65b60186","unresolved":false,"context_lines":[{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"76b45e09_d7301668","line":25,"range":{"start_line":25,"start_character":20,"end_line":25,"end_character":23},"in_reply_to":"6a4c66fc_205a071b","updated":"2022-06-22 07:39:50.000000000","message":"yeah, the previous run in CI worked, the package is [1], we don\u0027t need this dependency here. Resolving the comment!\n\n[1] https://storage.gra.cloud.ovh.net/v1/AUTH_dcaab5e32b234d56b626f72581e3644c/zuul_opendev_logs_35e/826484/81/check/devstack-plugin-ceph-tempest-cephadm/35e1734/controller/logs/dpkg-l.txt","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"cc32122a_338d83b3","line":25,"range":{"start_line":25,"start_character":26,"end_line":25,"end_character":33},"in_reply_to":"84075746_a2b27c3a","updated":"2022-06-21 21:32:12.000000000","message":"Nice, thanks, removing it!","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":true,"context_lines":[{"line_number":22,"context_line":"CONFIG\u003d\"/etc/ceph/ceph.conf\""},{"line_number":23,"context_line":"BOOTSTRAP_CONFIG\u003d\"$HOME/bootstrap_ceph.conf\""},{"line_number":24,"context_line":"KEYRING\u003d\"/etc/ceph/ceph.client.admin.keyring\""},{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"6a4c66fc_205a071b","line":25,"range":{"start_line":25,"start_character":20,"end_line":25,"end_character":23},"in_reply_to":"edc4492d_f1085331","updated":"2022-06-21 21:32:12.000000000","message":"Removed: in theory this package is already present in the system, but if not we can add it to devstack (with a diff review, using the aforementioned approach).","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"# DEFAULT OPTIONS"},{"line_number":31,"context_line":"ATTEMPTS\u003d30"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"610294f4_6b8def4a","line":28,"range":{"start_line":28,"start_character":0,"end_line":28,"end_character":54},"updated":"2022-06-21 19:27:49.000000000","message":"this may be too promising; could we instead call this:\n\nDISABLE_CEPHADM_POST_DEPLOY \n\nand default this to FALSE so that we can override only in the CI context?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"7332385da82805d0e0904454c453a70fcdba6754","unresolved":false,"context_lines":[{"line_number":25,"context_line":"REQUIREMENTS\u003d(\"jq\" \"lvm\" \"python3\")"},{"line_number":26,"context_line":"TARGET_BIN\u003d/usr/bin"},{"line_number":27,"context_line":"# TOGGLED IN THE CI TO SAVE RESOURCES"},{"line_number":28,"context_line":"CEPHADM_SAVE_RESOURCES\u003d${CEPHADM_SAVE_RESOURCES:-True}"},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"# DEFAULT OPTIONS"},{"line_number":31,"context_line":"ATTEMPTS\u003d30"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"1e12c5a9_2770c834","line":28,"range":{"start_line":28,"start_character":0,"end_line":28,"end_character":54},"in_reply_to":"610294f4_6b8def4a","updated":"2022-06-21 21:40:39.000000000","message":"Changing the name here as you suggested to reflect what we\u0027re doing with this flag (disabling cephadm) and I\u0027m ok with the variable name (I don\u0027t have a strong opinion on that).\nHowever, as discussed w/ Luigi, in order to have one less parameter in CI, the default value has been changed from False to True.\nI know we need it for nfs to interact w/ the orch, so it makes sense defaulting it to False.\nLet me try using False as default and let\u0027s see how it goes in CI.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":49,"context_line":"FSNAME\u003d${FSNAME:-\u0027cephfs\u0027}"},{"line_number":50,"context_line":"NFS_PORT\u003d12345"},{"line_number":51,"context_line":"CEPHFS_CLIENT\u003d0"},{"line_number":52,"context_line":"CEPHFS_CLIENT_NAME\u003d\"client.manila\""},{"line_number":53,"context_line":"CEPHFS_CLIENT_LOG\u003d\"/var/log/ceph-$CEPHFS_CLIENT_NAME.log\""},{"line_number":54,"context_line":"CEPHFS_MULTIPLE_FILESYSTEMS\u003d${CEPHFS_MULTIPLE_FILESYSTEMS:-False}"},{"line_number":55,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"13fabac1_77c638f7","line":52,"range":{"start_line":52,"start_character":27,"end_line":52,"end_character":33},"updated":"2022-06-21 19:27:49.000000000","message":"Think you could just use \"client.$MANILA_CEPH_USER\" and drop this variable","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":49,"context_line":"FSNAME\u003d${FSNAME:-\u0027cephfs\u0027}"},{"line_number":50,"context_line":"NFS_PORT\u003d12345"},{"line_number":51,"context_line":"CEPHFS_CLIENT\u003d0"},{"line_number":52,"context_line":"CEPHFS_CLIENT_NAME\u003d\"client.manila\""},{"line_number":53,"context_line":"CEPHFS_CLIENT_LOG\u003d\"/var/log/ceph-$CEPHFS_CLIENT_NAME.log\""},{"line_number":54,"context_line":"CEPHFS_MULTIPLE_FILESYSTEMS\u003d${CEPHFS_MULTIPLE_FILESYSTEMS:-False}"},{"line_number":55,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"37eec025_a9fd3ddc","line":52,"range":{"start_line":52,"start_character":27,"end_line":52,"end_character":33},"in_reply_to":"13fabac1_77c638f7","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":67,"context_line":"RBD_CLIENT_LOG\u003d/var/log/ceph/qemu-guest-\\$pid.log"},{"line_number":68,"context_line":""},{"line_number":69,"context_line":"# MANILA DEFAULTS"},{"line_number":70,"context_line":"MANILA_CEPH_USER\u003d${MANILA_CEPH_USER:-manila}"},{"line_number":71,"context_line":""},{"line_number":72,"context_line":"# GLANCE DEFAULTS"},{"line_number":73,"context_line":"GLANCE_CEPH_USER\u003d${GLANCE_CEPH_USER:-glance}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"ecb6b7dd_b52e10ce","line":70,"updated":"2022-06-21 17:29:40.000000000","message":"what\u0027s MANILA_CEPH_USER for? and what\u0027s CEPHFS_CLIENT_NAME for?\ndo we need both?\n\nIs one used by the manila driver\u0027s CephFS client, and another for cephfs client used in scenario testing?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":67,"context_line":"RBD_CLIENT_LOG\u003d/var/log/ceph/qemu-guest-\\$pid.log"},{"line_number":68,"context_line":""},{"line_number":69,"context_line":"# MANILA DEFAULTS"},{"line_number":70,"context_line":"MANILA_CEPH_USER\u003d${MANILA_CEPH_USER:-manila}"},{"line_number":71,"context_line":""},{"line_number":72,"context_line":"# GLANCE DEFAULTS"},{"line_number":73,"context_line":"GLANCE_CEPH_USER\u003d${GLANCE_CEPH_USER:-glance}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"c489b9de_0dec664e","line":70,"in_reply_to":"97d8d27a_6068f330","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":67,"context_line":"RBD_CLIENT_LOG\u003d/var/log/ceph/qemu-guest-\\$pid.log"},{"line_number":68,"context_line":""},{"line_number":69,"context_line":"# MANILA DEFAULTS"},{"line_number":70,"context_line":"MANILA_CEPH_USER\u003d${MANILA_CEPH_USER:-manila}"},{"line_number":71,"context_line":""},{"line_number":72,"context_line":"# GLANCE DEFAULTS"},{"line_number":73,"context_line":"GLANCE_CEPH_USER\u003d${GLANCE_CEPH_USER:-glance}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"a26a85b9_e96900cf","line":70,"in_reply_to":"ecb6b7dd_b52e10ce","updated":"2022-06-21 19:27:49.000000000","message":"+1 don\u0027t see why we need both variables, fwiu, they refer to the same ceph client user","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":true,"context_lines":[{"line_number":67,"context_line":"RBD_CLIENT_LOG\u003d/var/log/ceph/qemu-guest-\\$pid.log"},{"line_number":68,"context_line":""},{"line_number":69,"context_line":"# MANILA DEFAULTS"},{"line_number":70,"context_line":"MANILA_CEPH_USER\u003d${MANILA_CEPH_USER:-manila}"},{"line_number":71,"context_line":""},{"line_number":72,"context_line":"# GLANCE DEFAULTS"},{"line_number":73,"context_line":"GLANCE_CEPH_USER\u003d${GLANCE_CEPH_USER:-glance}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"97d8d27a_6068f330","line":70,"in_reply_to":"ecb6b7dd_b52e10ce","updated":"2022-06-21 20:17:44.000000000","message":"MANILA_CEPH_USER is used here [1], and looks like we don\u0027t use MANILA_CEPH_USER anymore. \nHowever, it can be used more within this code, starting from L52 where we need to set CEPHFS_CLIENT_NAME.\nI\u0027ll improve this variable usage in the next PS.\n\n[1] https://github.com/openstack/devstack-plugin-ceph/blob/master/devstack/lib/ceph#L101","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":119,"context_line":"## Pre-install"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"# Pre-install ceph: check depdendencies are available"},{"line_number":122,"context_line":"function prereq {"},{"line_number":123,"context_line":"    for cmd in \"${REQUIREMENTS[@]}\"; do"},{"line_number":124,"context_line":"        if ! command -v \"$cmd\" \u0026\u003e /dev/null; then"},{"line_number":125,"context_line":"            echo \"Command $cmd not found\""},{"line_number":126,"context_line":"            exit 1;"},{"line_number":127,"context_line":"        fi"},{"line_number":128,"context_line":"    done"},{"line_number":129,"context_line":"}"},{"line_number":130,"context_line":""},{"line_number":131,"context_line":"# Pre-install ceph: install podman"},{"line_number":132,"context_line":"function _install_podman {"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"89b3e888_a40d9125","line":129,"range":{"start_line":122,"start_character":0,"end_line":129,"end_character":1},"updated":"2022-06-21 19:27:49.000000000","message":"redundant; if a package installation fails, devstack will bail out","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":119,"context_line":"## Pre-install"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"# Pre-install ceph: check depdendencies are available"},{"line_number":122,"context_line":"function prereq {"},{"line_number":123,"context_line":"    for cmd in \"${REQUIREMENTS[@]}\"; do"},{"line_number":124,"context_line":"        if ! command -v \"$cmd\" \u0026\u003e /dev/null; then"},{"line_number":125,"context_line":"            echo \"Command $cmd not found\""},{"line_number":126,"context_line":"            exit 1;"},{"line_number":127,"context_line":"        fi"},{"line_number":128,"context_line":"    done"},{"line_number":129,"context_line":"}"},{"line_number":130,"context_line":""},{"line_number":131,"context_line":"# Pre-install ceph: install podman"},{"line_number":132,"context_line":"function _install_podman {"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"b9718064_c359ffc8","line":129,"range":{"start_line":122,"start_character":0,"end_line":129,"end_character":1},"in_reply_to":"89b3e888_a40d9125","updated":"2022-06-21 21:32:12.000000000","message":"I see, we fail early in the process, so this part is not required anymore in the current status. Thanks, good catch, and nice hint to make some cleanup.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":130,"context_line":""},{"line_number":131,"context_line":"# Pre-install ceph: install podman"},{"line_number":132,"context_line":"function _install_podman {"},{"line_number":133,"context_line":"    # FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)"},{"line_number":134,"context_line":"    # Remove when our CI is pushed to the next LTS version"},{"line_number":135,"context_line":"    if [[ $os_CODENAME \u003d~ (focal) ]]; then"},{"line_number":136,"context_line":"        echo \"deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /\" \\"},{"line_number":137,"context_line":"            | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"},{"line_number":138,"context_line":"        curl -L \"https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key\" \\"},{"line_number":139,"context_line":"            | sudo apt-key add -"},{"line_number":140,"context_line":"        sudo apt-get update"},{"line_number":141,"context_line":"        sudo apt-get -y upgrade"},{"line_number":142,"context_line":"    fi"},{"line_number":143,"context_line":"    install_package podman"},{"line_number":144,"context_line":"}"},{"line_number":145,"context_line":""},{"line_number":146,"context_line":"# Pre-install ceph: install required dependencies"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"b7f857f1_8109e3a3","line":143,"range":{"start_line":133,"start_character":0,"end_line":143,"end_character":26},"updated":"2022-06-21 19:27:49.000000000","message":"could we check if podman is installed so we can avoid this step?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":130,"context_line":""},{"line_number":131,"context_line":"# Pre-install ceph: install podman"},{"line_number":132,"context_line":"function _install_podman {"},{"line_number":133,"context_line":"    # FIXME(vkmc) Check required for Ubuntu 20.04 LTS (current CI node)"},{"line_number":134,"context_line":"    # Remove when our CI is pushed to the next LTS version"},{"line_number":135,"context_line":"    if [[ $os_CODENAME \u003d~ (focal) ]]; then"},{"line_number":136,"context_line":"        echo \"deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/ /\" \\"},{"line_number":137,"context_line":"            | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list"},{"line_number":138,"context_line":"        curl -L \"https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_20.04/Release.key\" \\"},{"line_number":139,"context_line":"            | sudo apt-key add -"},{"line_number":140,"context_line":"        sudo apt-get update"},{"line_number":141,"context_line":"        sudo apt-get -y upgrade"},{"line_number":142,"context_line":"    fi"},{"line_number":143,"context_line":"    install_package podman"},{"line_number":144,"context_line":"}"},{"line_number":145,"context_line":""},{"line_number":146,"context_line":"# Pre-install ceph: install required dependencies"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"59b70585_383d07b0","line":143,"range":{"start_line":133,"start_character":0,"end_line":143,"end_character":26},"in_reply_to":"b7f857f1_8109e3a3","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":147,"context_line":"function install_deps {"},{"line_number":148,"context_line":"    install_package jq ceph-common"},{"line_number":149,"context_line":"    _install_podman"},{"line_number":150,"context_line":"    if python3_enabled; then"},{"line_number":151,"context_line":"        install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests"},{"line_number":152,"context_line":"    fi"},{"line_number":153,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"afab74dd_ec3c5f17","line":150,"range":{"start_line":150,"start_character":0,"end_line":150,"end_character":28},"updated":"2022-06-21 19:27:49.000000000","message":"there isn\u0027t any other option anymore","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":147,"context_line":"function install_deps {"},{"line_number":148,"context_line":"    install_package jq ceph-common"},{"line_number":149,"context_line":"    _install_podman"},{"line_number":150,"context_line":"    if python3_enabled; then"},{"line_number":151,"context_line":"        install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests"},{"line_number":152,"context_line":"    fi"},{"line_number":153,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"5967b5d6_4d1aa1da","line":150,"range":{"start_line":150,"start_character":0,"end_line":150,"end_character":28},"in_reply_to":"afab74dd_ec3c5f17","updated":"2022-06-21 21:32:12.000000000","message":"removing the if statement.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"# Pre-install ceph: get cephadm binary"},{"line_number":156,"context_line":"function get_cephadm {"},{"line_number":157,"context_line":"    curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm"},{"line_number":158,"context_line":"    $SUDO mv cephadm $TARGET_BIN"},{"line_number":159,"context_line":"    $SUDO chmod +x $TARGET_BIN/cephadm"},{"line_number":160,"context_line":"    echo \"[GET CEPHADM] cephadm is now available\""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"49826016_0c999eb0","line":157,"range":{"start_line":157,"start_character":56,"end_line":157,"end_character":63},"updated":"2022-06-21 19:27:49.000000000","message":"CEPH_RELEASE\u003d${CEPH_RELEASE:-pacific} earlier so we can use a variable here","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"# Pre-install ceph: get cephadm binary"},{"line_number":156,"context_line":"function get_cephadm {"},{"line_number":157,"context_line":"    curl -O https://raw.githubusercontent.com/ceph/ceph/pacific/src/cephadm/cephadm"},{"line_number":158,"context_line":"    $SUDO mv cephadm $TARGET_BIN"},{"line_number":159,"context_line":"    $SUDO chmod +x $TARGET_BIN/cephadm"},{"line_number":160,"context_line":"    echo \"[GET CEPHADM] cephadm is now available\""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"edab5701_20e3547c","line":157,"range":{"start_line":157,"start_character":56,"end_line":157,"end_character":63},"in_reply_to":"49826016_0c999eb0","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":170,"context_line":"[global]"},{"line_number":171,"context_line":"log to file \u003d true"},{"line_number":172,"context_line":"osd crush chooseleaf type \u003d 0"},{"line_number":173,"context_line":"osd_pool_default_pg_num \u003d 8"},{"line_number":174,"context_line":"osd_pool_default_pgp_num \u003d 8"},{"line_number":175,"context_line":"osd_pool_default_size \u003d 1"},{"line_number":176,"context_line":"[mon]"},{"line_number":177,"context_line":"mon_warn_on_pool_no_redundancy \u003d False"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"24c55637_9943f36c","line":174,"range":{"start_line":173,"start_character":0,"end_line":174,"end_character":28},"updated":"2022-06-21 19:27:49.000000000","message":"we\u0027re not supporting the existing \"\u003cSERVICE\u003e_CEPH_POOL_PG\" and \"\u003cSERVICE\u003e_CEPH_POOL_PGP\" intentionally?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":170,"context_line":"[global]"},{"line_number":171,"context_line":"log to file \u003d true"},{"line_number":172,"context_line":"osd crush chooseleaf type \u003d 0"},{"line_number":173,"context_line":"osd_pool_default_pg_num \u003d 8"},{"line_number":174,"context_line":"osd_pool_default_pgp_num \u003d 8"},{"line_number":175,"context_line":"osd_pool_default_size \u003d 1"},{"line_number":176,"context_line":"[mon]"},{"line_number":177,"context_line":"mon_warn_on_pool_no_redundancy \u003d False"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"1d29cff6_02e9f155","line":174,"range":{"start_line":173,"start_character":0,"end_line":174,"end_character":28},"in_reply_to":"24c55637_9943f36c","updated":"2022-06-21 21:32:12.000000000","message":"yes, we should probably drop this part as well but the goal here is to make the CI happy, so leaving these defaults for now.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":211,"context_line":"    if [ \"$CEPHADM_DEV_OSD\" \u003d\u003d \u0027True\u0027 ]; then"},{"line_number":212,"context_line":"        create_osd_dev"},{"line_number":213,"context_line":"    fi"},{"line_number":214,"context_line":"    # Wait cephadm backend to be operational"},{"line_number":215,"context_line":"    # and add osds via drivegroups"},{"line_number":216,"context_line":"    sleep \"$SLEEP\""},{"line_number":217,"context_line":"    add_osds"},{"line_number":218,"context_line":"    fi"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"b59ab5ee_7c4cb8fb","line":215,"range":{"start_line":214,"start_character":0,"end_line":215,"end_character":34},"updated":"2022-06-21 19:27:49.000000000","message":"is there any status to check, or is the wait always deterministic?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":211,"context_line":"    if [ \"$CEPHADM_DEV_OSD\" \u003d\u003d \u0027True\u0027 ]; then"},{"line_number":212,"context_line":"        create_osd_dev"},{"line_number":213,"context_line":"    fi"},{"line_number":214,"context_line":"    # Wait cephadm backend to be operational"},{"line_number":215,"context_line":"    # and add osds via drivegroups"},{"line_number":216,"context_line":"    sleep \"$SLEEP\""},{"line_number":217,"context_line":"    add_osds"},{"line_number":218,"context_line":"    fi"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"2d43ef88_25fe1fa0","line":215,"range":{"start_line":214,"start_character":0,"end_line":215,"end_character":34},"in_reply_to":"b59ab5ee_7c4cb8fb","updated":"2022-06-21 21:32:12.000000000","message":"nothing to check here: the sleep is a safe measure to make sure that we have time between these two operations that are not atomic:\n\n- create_osd_dev: setup the pv/vg/lv\n- add_osds: the \u0027ceph orch daemon add osd\u0027 command run against the \"DEVICES\" array.\n\nNote: if something fails in \"create_osd_dev\", everything will exit -1","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":282,"context_line":""},{"line_number":283,"context_line":"        # set the application to the pool (which also means rbd init the pool)"},{"line_number":284,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":285,"context_line":"            --keyring $KEYRING -- ceph osd pool application enable \"$pool\" rbd"},{"line_number":286,"context_line":"    done"},{"line_number":287,"context_line":"}"},{"line_number":288,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"1cde66cc_d29106fc","line":285,"updated":"2022-06-21 17:29:40.000000000","message":"I see that non-RBD pools also get created by \"add_pools\" function. Do we need to set the \"rbd\" pool application label for them as well?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":true,"context_lines":[{"line_number":282,"context_line":""},{"line_number":283,"context_line":"        # set the application to the pool (which also means rbd init the pool)"},{"line_number":284,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":285,"context_line":"            --keyring $KEYRING -- ceph osd pool application enable \"$pool\" rbd"},{"line_number":286,"context_line":"    done"},{"line_number":287,"context_line":"}"},{"line_number":288,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"a19e68f3_e6bae2f2","line":285,"in_reply_to":"1cde66cc_d29106fc","updated":"2022-06-21 20:17:44.000000000","message":"This code creates:\n\n- glance pool (see L651)\n- cinder pool (see L663)\n- cinder bak pool (see L663)\n- nova pool (see L668)\n\nWe set rbd for all of them because are explicitly created (and needs to be \u0027rbd\u0027 as we do in TripleO and ceph-ansible), and the application is not a parameter here (there\u0027s basically no need for extra logic to associate a particular application to a particular pool). \nThe other pools (like cephfs_{data \u0026\u0026 metadata} are created by orchestrator, so we\u0027re good in terms of application as we don\u0027t have to set it.\n\nExtra logic will be added in future changes only if needed, otherwise the goal can still be feature parity with the old ceph plugin.\nDoes it make sense?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"68da9365c00c81715f40fbfab99b1e824b3321b1","unresolved":false,"context_lines":[{"line_number":282,"context_line":""},{"line_number":283,"context_line":"        # set the application to the pool (which also means rbd init the pool)"},{"line_number":284,"context_line":"        $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":285,"context_line":"            --keyring $KEYRING -- ceph osd pool application enable \"$pool\" rbd"},{"line_number":286,"context_line":"    done"},{"line_number":287,"context_line":"}"},{"line_number":288,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"7b66272b_74b321ac","line":285,"in_reply_to":"a19e68f3_e6bae2f2","updated":"2022-06-22 12:45:45.000000000","message":"I guess you need RBD pools for not just cinder/cinder back, but also for nova and glance testing. So makes sense.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":332,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":333,"context_line":"    # and the mds daemon is deployed"},{"line_number":334,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":335,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":336,"context_line":"}"},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"752e2977_ca36c5d4","line":335,"updated":"2022-06-21 17:29:40.000000000","message":"I can\u0027t find the respective `ceph fs volume rm \u003cfsname\u003e --yes-i-really-mean-it` to clean up the MDS daemons, the FS metada and data pools , https://docs.ceph.com/en/quincy/cephfs/fs-volumes/#fs-volumes","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"13818b0ef71379521098e76e117031b145126d52","unresolved":true,"context_lines":[{"line_number":332,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":333,"context_line":"    # and the mds daemon is deployed"},{"line_number":334,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":335,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":336,"context_line":"}"},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"a8f78945_6ea6820b","line":335,"in_reply_to":"3491d911_7438779d","updated":"2022-06-22 13:22:20.000000000","message":"I checked devstack/lib/ceph. In its stop_ceph, we stop the MDSes and NFS-ganesha service explicitly. I think we should do the same. In the stop_ceph:\n\nif manila_is_enabled:\n   ceph fs volume rm --yes-i-really-mean-it\n\nif manila_driver is cephfsnfs:\n   ceph nfs cluster rm  \u003cganesha-cluster\u003e","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":332,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":333,"context_line":"    # and the mds daemon is deployed"},{"line_number":334,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":335,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":336,"context_line":"}"},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"19f2eeb4_de8b6665","line":335,"in_reply_to":"752e2977_ca36c5d4","updated":"2022-06-21 19:27:49.000000000","message":"+1","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":332,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":333,"context_line":"    # and the mds daemon is deployed"},{"line_number":334,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":335,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":336,"context_line":"}"},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"3491d911_7438779d","line":335,"in_reply_to":"752e2977_ca36c5d4","updated":"2022-06-21 20:17:44.000000000","message":"unstack || clean, in the scope of this plugin, are used to delete the cluster entirely, cleaning up the osds as well. the MDS cleanup is out of scope here.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"f10ea41237de76be57a027d231e7a4871a473004","unresolved":true,"context_lines":[{"line_number":332,"context_line":"    # - $FSNAME.FSNAME.meta"},{"line_number":333,"context_line":"    # and the mds daemon is deployed"},{"line_number":334,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":335,"context_line":"        --keyring $KEYRING -- ceph fs volume create \"$FSNAME\""},{"line_number":336,"context_line":"}"},{"line_number":337,"context_line":""},{"line_number":338,"context_line":"# Install ceph: add NFS"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"80877e96_a84e2d40","line":335,"in_reply_to":"a8f78945_6ea6820b","updated":"2022-06-22 14:18:01.000000000","message":"We do that in devstack/lib/ceph because they\u0027re container || processes running on the host but w/o any kind of orchestration on top of them.\n\n```\ncephadm rm-cluster --fsid \u003cfsid\u003e --zap-osds --force\n```\n\nshould take care about stopping them (I\u0027ve done it many times in my dev environment), and [1] is generic enough to stop/kill/remove any kind of daemon.\nAlso, we\u0027re cleaning the osd and releasing the loopback device after the cluster is removed: it leaves no traces in the system.\n\nNo need to run these two commands and then remove the cluster.\n\n[1] https://github.com/ceph/ceph/blob/master/src/cephadm/cephadm#L6965-L7061","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":340,"context_line":"    # (fpantano) TODO: Build an ingress daemon on top of this"},{"line_number":341,"context_line":"    echo \"[CEPHADM] Deploy nfs.$FSNAME backend\""},{"line_number":342,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":343,"context_line":"        --keyring $KEYRING -- ceph orch apply nfs \\"},{"line_number":344,"context_line":"        \"$FSNAME\" --placement\u003d\"$HOSTNAME\" --port $NFS_PORT"},{"line_number":345,"context_line":"}"},{"line_number":346,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"c83392d2_ba6d3d9f","line":343,"updated":"2022-06-21 17:29:40.000000000","message":"I can\u0027t find the respective nfs cluster removal, https://docs.ceph.com/en/latest/mgr/nfs/#delete-nfs-ganesha-cluster","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":340,"context_line":"    # (fpantano) TODO: Build an ingress daemon on top of this"},{"line_number":341,"context_line":"    echo \"[CEPHADM] Deploy nfs.$FSNAME backend\""},{"line_number":342,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":343,"context_line":"        --keyring $KEYRING -- ceph orch apply nfs \\"},{"line_number":344,"context_line":"        \"$FSNAME\" --placement\u003d\"$HOSTNAME\" --port $NFS_PORT"},{"line_number":345,"context_line":"}"},{"line_number":346,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"4e985ac2_55cfdbb1","line":343,"in_reply_to":"c83392d2_ba6d3d9f","updated":"2022-06-21 19:27:49.000000000","message":"+1","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":340,"context_line":"    # (fpantano) TODO: Build an ingress daemon on top of this"},{"line_number":341,"context_line":"    echo \"[CEPHADM] Deploy nfs.$FSNAME backend\""},{"line_number":342,"context_line":"    $SUDO \"$CEPHADM\" shell --fsid $FSID --config $CONFIG \\"},{"line_number":343,"context_line":"        --keyring $KEYRING -- ceph orch apply nfs \\"},{"line_number":344,"context_line":"        \"$FSNAME\" --placement\u003d\"$HOSTNAME\" --port $NFS_PORT"},{"line_number":345,"context_line":"}"},{"line_number":346,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"5c1197fe_5d031e23","line":343,"in_reply_to":"c83392d2_ba6d3d9f","updated":"2022-06-21 20:17:44.000000000","message":"see my comment on the MDS cleanup on L335, same thing here.","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":376,"context_line":"    fi"},{"line_number":377,"context_line":"}"},{"line_number":378,"context_line":""},{"line_number":379,"context_line":"# General Ceph utility to set config keys within the mgr"},{"line_number":380,"context_line":"function set_config_key {"},{"line_number":381,"context_line":"    local section\u003d$1"},{"line_number":382,"context_line":"    local key\u003d$2"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"9d5a4fc1_4612ecf2","line":379,"updated":"2022-06-21 17:29:40.000000000","message":"\"`ceph config set` will set a config option in Ceph monitor\u0027s config database\". See, https://docs.ceph.com/en/latest/rados/configuration/ceph-conf/#commands\n\nI don\u0027t get why the comment mentions that the config keys are set within the mgr?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":376,"context_line":"    fi"},{"line_number":377,"context_line":"}"},{"line_number":378,"context_line":""},{"line_number":379,"context_line":"# General Ceph utility to set config keys within the mgr"},{"line_number":380,"context_line":"function set_config_key {"},{"line_number":381,"context_line":"    local section\u003d$1"},{"line_number":382,"context_line":"    local key\u003d$2"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"007c8b2b_77330458","line":379,"in_reply_to":"9d5a4fc1_4612ecf2","updated":"2022-06-21 20:17:44.000000000","message":"Comment fixed","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"014441801730e5c9a4afcf11e2925c18f65713e7","unresolved":true,"context_lines":[{"line_number":440,"context_line":"    # Deploy and configure ganesha"},{"line_number":441,"context_line":"    [ $MANILA_CEPH_DRIVER \u003d\u003d \u0027cephfsnfs\u0027 ] \u0026\u0026 ceph_nfs_config"},{"line_number":442,"context_line":"    # Add manila keys to the list"},{"line_number":443,"context_line":"    KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":444,"context_line":"}"},{"line_number":445,"context_line":""},{"line_number":446,"context_line":"# Install ceph: services deployment"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"8516b5f4_d3d128b8","line":443,"range":{"start_line":443,"start_character":19,"end_line":443,"end_character":25},"updated":"2022-06-21 19:27:49.000000000","message":"overridable with a variable","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"0a91a2f194c7b7d573a62a61a227b29ef70ab3c4","unresolved":false,"context_lines":[{"line_number":440,"context_line":"    # Deploy and configure ganesha"},{"line_number":441,"context_line":"    [ $MANILA_CEPH_DRIVER \u003d\u003d \u0027cephfsnfs\u0027 ] \u0026\u0026 ceph_nfs_config"},{"line_number":442,"context_line":"    # Add manila keys to the list"},{"line_number":443,"context_line":"    KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":444,"context_line":"}"},{"line_number":445,"context_line":""},{"line_number":446,"context_line":"# Install ceph: services deployment"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"d6c7aaa4_572d637e","line":443,"range":{"start_line":443,"start_character":19,"end_line":443,"end_character":25},"in_reply_to":"8516b5f4_d3d128b8","updated":"2022-06-21 21:32:12.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":642,"context_line":""},{"line_number":643,"context_line":"    if is_ceph_enabled_for_service manila; then"},{"line_number":644,"context_line":"        SERVICES+\u003d(\u0027cephfs\u0027)"},{"line_number":645,"context_line":"        KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":646,"context_line":"    fi"},{"line_number":647,"context_line":""},{"line_number":648,"context_line":"    [ \"$MANILA_CEPH_DRIVER\" \u003d\u003d \"cephfsnfs\" ] \u0026\u0026 SERVICES+\u003d(\u0027nfs\u0027)"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"21938e09_a909b963","line":645,"updated":"2022-06-21 17:29:40.000000000","message":"use client.$MANILA_CEPH_USER instead?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":642,"context_line":""},{"line_number":643,"context_line":"    if is_ceph_enabled_for_service manila; then"},{"line_number":644,"context_line":"        SERVICES+\u003d(\u0027cephfs\u0027)"},{"line_number":645,"context_line":"        KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":646,"context_line":"    fi"},{"line_number":647,"context_line":""},{"line_number":648,"context_line":"    [ \"$MANILA_CEPH_DRIVER\" \u003d\u003d \"cephfsnfs\" ] \u0026\u0026 SERVICES+\u003d(\u0027nfs\u0027)"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"00067f90_961fa63f","line":645,"in_reply_to":"21938e09_a909b963","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":645,"context_line":"        KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":646,"context_line":"    fi"},{"line_number":647,"context_line":""},{"line_number":648,"context_line":"    [ \"$MANILA_CEPH_DRIVER\" \u003d\u003d \"cephfsnfs\" ] \u0026\u0026 SERVICES+\u003d(\u0027nfs\u0027)"},{"line_number":649,"context_line":""},{"line_number":650,"context_line":"    if is_ceph_enabled_for_service glance; then"},{"line_number":651,"context_line":"        POOLS+\u003d($GLANCE_CEPH_POOL)"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"90709cf9_ae912417","line":648,"updated":"2022-06-21 17:29:40.000000000","message":"move this within \"if is_ceph_enabled_for_service manila\" statement?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":645,"context_line":"        KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":646,"context_line":"    fi"},{"line_number":647,"context_line":""},{"line_number":648,"context_line":"    [ \"$MANILA_CEPH_DRIVER\" \u003d\u003d \"cephfsnfs\" ] \u0026\u0026 SERVICES+\u003d(\u0027nfs\u0027)"},{"line_number":649,"context_line":""},{"line_number":650,"context_line":"    if is_ceph_enabled_for_service glance; then"},{"line_number":651,"context_line":"        POOLS+\u003d($GLANCE_CEPH_POOL)"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"8f41711f_45bc151e","line":648,"in_reply_to":"90709cf9_ae912417","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":649,"context_line":""},{"line_number":650,"context_line":"    if is_ceph_enabled_for_service glance; then"},{"line_number":651,"context_line":"        POOLS+\u003d($GLANCE_CEPH_POOL)"},{"line_number":652,"context_line":"        KEYS+\u003d(\u0027client.glance\u0027)"},{"line_number":653,"context_line":"        config_glance"},{"line_number":654,"context_line":"    fi"},{"line_number":655,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"a441eccb_0fd81173","line":652,"updated":"2022-06-21 17:29:40.000000000","message":"use client.$GLANCE_CEPH_USER instead?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":649,"context_line":""},{"line_number":650,"context_line":"    if is_ceph_enabled_for_service glance; then"},{"line_number":651,"context_line":"        POOLS+\u003d($GLANCE_CEPH_POOL)"},{"line_number":652,"context_line":"        KEYS+\u003d(\u0027client.glance\u0027)"},{"line_number":653,"context_line":"        config_glance"},{"line_number":654,"context_line":"    fi"},{"line_number":655,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"ed2edca3_583837f2","line":652,"in_reply_to":"a441eccb_0fd81173","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":655,"context_line":""},{"line_number":656,"context_line":"    if is_ceph_enabled_for_service cinder; then"},{"line_number":657,"context_line":"        POOLS+\u003d($CINDER_CEPH_POOL)"},{"line_number":658,"context_line":"        KEYS+\u003d(\u0027client.cinder\u0027)"},{"line_number":659,"context_line":"        set_min_client_version"},{"line_number":660,"context_line":"    fi"},{"line_number":661,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"67b48e49_29494643","line":658,"updated":"2022-06-21 17:29:40.000000000","message":"use client.$CINDER_CEPH_USER instead?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":655,"context_line":""},{"line_number":656,"context_line":"    if is_ceph_enabled_for_service cinder; then"},{"line_number":657,"context_line":"        POOLS+\u003d($CINDER_CEPH_POOL)"},{"line_number":658,"context_line":"        KEYS+\u003d(\u0027client.cinder\u0027)"},{"line_number":659,"context_line":"        set_min_client_version"},{"line_number":660,"context_line":"    fi"},{"line_number":661,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"81fd4eb8_157bd5cb","line":658,"in_reply_to":"67b48e49_29494643","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":661,"context_line":""},{"line_number":662,"context_line":"    if is_ceph_enabled_for_service c-bak; then"},{"line_number":663,"context_line":"        POOLS+\u003d($CINDER_BAK_CEPH_POOL)"},{"line_number":664,"context_line":"        KEYS+\u003d(\u0027client.cinder-bak\u0027)"},{"line_number":665,"context_line":"    fi"},{"line_number":666,"context_line":""},{"line_number":667,"context_line":"    if is_ceph_enabled_for_service nova; then"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"44e8aa11_19a4692e","line":664,"updated":"2022-06-21 17:29:40.000000000","message":"use client.$CINDER_BAK_CEPH_USER instead?","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":661,"context_line":""},{"line_number":662,"context_line":"    if is_ceph_enabled_for_service c-bak; then"},{"line_number":663,"context_line":"        POOLS+\u003d($CINDER_BAK_CEPH_POOL)"},{"line_number":664,"context_line":"        KEYS+\u003d(\u0027client.cinder-bak\u0027)"},{"line_number":665,"context_line":"    fi"},{"line_number":666,"context_line":""},{"line_number":667,"context_line":"    if is_ceph_enabled_for_service nova; then"}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"a1363dfc_95821b39","line":664,"in_reply_to":"44e8aa11_19a4692e","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":8056,"name":"Ramana Raja","email":"rraja@redhat.com","username":"Ram_Raja"},"change_message_id":"bdb39c57e8a745526ebf829cdd17073dc847028a","unresolved":true,"context_lines":[{"line_number":666,"context_line":""},{"line_number":667,"context_line":"    if is_ceph_enabled_for_service nova; then"},{"line_number":668,"context_line":"        POOLS+\u003d($NOVA_CEPH_POOL)"},{"line_number":669,"context_line":"        KEYS+\u003d(\u0027client.cinder\u0027)"},{"line_number":670,"context_line":"        config_nova"},{"line_number":671,"context_line":"    fi"},{"line_number":672,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"669a8074_acd14db6","line":669,"updated":"2022-06-21 17:29:40.000000000","message":"use client.$CINDER_CEPH_USER instead","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"86eceb9fffbe6967d89f50b9e1338003d7f89569","unresolved":false,"context_lines":[{"line_number":666,"context_line":""},{"line_number":667,"context_line":"    if is_ceph_enabled_for_service nova; then"},{"line_number":668,"context_line":"        POOLS+\u003d($NOVA_CEPH_POOL)"},{"line_number":669,"context_line":"        KEYS+\u003d(\u0027client.cinder\u0027)"},{"line_number":670,"context_line":"        config_nova"},{"line_number":671,"context_line":"    fi"},{"line_number":672,"context_line":""}],"source_content_type":"application/x-shellscript","patch_set":78,"id":"c5dff77e_84d5a9c1","line":669,"in_reply_to":"669a8074_acd14db6","updated":"2022-06-21 20:17:44.000000000","message":"Done","commit_id":"81a9e8d5681aa7a32e0315cd7ae5329ae598942c"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"e506581beb7b5735608dee3f62c22d682d3eaf3c","unresolved":true,"context_lines":[{"line_number":429,"context_line":"    # Deploy and configure ganesha"},{"line_number":430,"context_line":"    [ $MANILA_CEPH_DRIVER \u003d\u003d \u0027cephfsnfs\u0027 ] \u0026\u0026 ceph_nfs_config"},{"line_number":431,"context_line":"    # Add manila keys to the list"},{"line_number":432,"context_line":"    KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":433,"context_line":"}"},{"line_number":434,"context_line":""},{"line_number":435,"context_line":"# Install ceph: services deployment"}],"source_content_type":"application/x-shellscript","patch_set":81,"id":"b462dfd2_6c8d9a06","line":432,"range":{"start_line":432,"start_character":12,"end_line":432,"end_character":25},"updated":"2022-06-22 11:00:06.000000000","message":"\"client.$MANILA_CEPH_USER\"","commit_id":"db626be7063f12d720fdd0ff9cca60eaf0fd656e"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"05876c6c7dd64a8c0e0e76d864abe771788fa1d5","unresolved":false,"context_lines":[{"line_number":429,"context_line":"    # Deploy and configure ganesha"},{"line_number":430,"context_line":"    [ $MANILA_CEPH_DRIVER \u003d\u003d \u0027cephfsnfs\u0027 ] \u0026\u0026 ceph_nfs_config"},{"line_number":431,"context_line":"    # Add manila keys to the list"},{"line_number":432,"context_line":"    KEYS+\u003d(\u0027client.manila\u0027)"},{"line_number":433,"context_line":"}"},{"line_number":434,"context_line":""},{"line_number":435,"context_line":"# Install ceph: services deployment"}],"source_content_type":"application/x-shellscript","patch_set":81,"id":"51f43ee9_92f88faa","line":432,"range":{"start_line":432,"start_character":12,"end_line":432,"end_character":25},"in_reply_to":"b462dfd2_6c8d9a06","updated":"2022-06-22 11:02:48.000000000","message":"Thanks :D","commit_id":"db626be7063f12d720fdd0ff9cca60eaf0fd656e"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"439830521e9004a495b806a0a69bd2fca7e93240","unresolved":true,"context_lines":[{"line_number":621,"context_line":"# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt"},{"line_number":622,"context_line":"function _undefine_virsh_secret {"},{"line_number":623,"context_line":"    local virsh_uuid"},{"line_number":624,"context_line":"    virsh_uuid\u003d$($SUDO virsh secret-list | awk \u0027/^ ?[0-9a-z]/ { print $1 }\u0027)"},{"line_number":625,"context_line":"    echo $virsh_uuid"},{"line_number":626,"context_line":"    $SUDO virsh secret-undefine ${virsh_uuid} \u0026\u003e/dev/null"},{"line_number":627,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":82,"id":"350d2797_c882a967","line":624,"range":{"start_line":624,"start_character":0,"end_line":624,"end_character":23},"updated":"2022-06-27 13:52:03.000000000","message":"This probably fails on deployments w/o Nova/virsh installed, since there is no \"is Nova enabled\" check around it?","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":4523,"name":"Eric Harney","email":"eharney@redhat.com","username":"eharney"},"change_message_id":"cf578418f053b849aed7851ad03ec9dfe9719854","unresolved":true,"context_lines":[{"line_number":621,"context_line":"# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt"},{"line_number":622,"context_line":"function _undefine_virsh_secret {"},{"line_number":623,"context_line":"    local virsh_uuid"},{"line_number":624,"context_line":"    virsh_uuid\u003d$($SUDO virsh secret-list | awk \u0027/^ ?[0-9a-z]/ { print $1 }\u0027)"},{"line_number":625,"context_line":"    echo $virsh_uuid"},{"line_number":626,"context_line":"    $SUDO virsh secret-undefine ${virsh_uuid} \u0026\u003e/dev/null"},{"line_number":627,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":82,"id":"f67ae82c_438a3fb7","line":624,"range":{"start_line":624,"start_character":0,"end_line":624,"end_character":23},"in_reply_to":"350d2797_c882a967","updated":"2022-06-27 15:51:21.000000000","message":"^","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"b886ef445a2e7bcca4e68d374cadef8371d034f8","unresolved":false,"context_lines":[{"line_number":621,"context_line":"# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt"},{"line_number":622,"context_line":"function _undefine_virsh_secret {"},{"line_number":623,"context_line":"    local virsh_uuid"},{"line_number":624,"context_line":"    virsh_uuid\u003d$($SUDO virsh secret-list | awk \u0027/^ ?[0-9a-z]/ { print $1 }\u0027)"},{"line_number":625,"context_line":"    echo $virsh_uuid"},{"line_number":626,"context_line":"    $SUDO virsh secret-undefine ${virsh_uuid} \u0026\u003e/dev/null"},{"line_number":627,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":82,"id":"7303cb1b_bb7d09fa","line":624,"range":{"start_line":624,"start_character":0,"end_line":624,"end_character":23},"in_reply_to":"a53454b6_9f23179f","updated":"2022-06-27 16:54:34.000000000","message":"Done","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":16643,"name":"Goutham Pacha Ravi","email":"gouthampravi@gmail.com","username":"gouthamr"},"change_message_id":"5823011303502ddaaa038dc8d2a0ba5fe8f3365f","unresolved":true,"context_lines":[{"line_number":621,"context_line":"# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt"},{"line_number":622,"context_line":"function _undefine_virsh_secret {"},{"line_number":623,"context_line":"    local virsh_uuid"},{"line_number":624,"context_line":"    virsh_uuid\u003d$($SUDO virsh secret-list | awk \u0027/^ ?[0-9a-z]/ { print $1 }\u0027)"},{"line_number":625,"context_line":"    echo $virsh_uuid"},{"line_number":626,"context_line":"    $SUDO virsh secret-undefine ${virsh_uuid} \u0026\u003e/dev/null"},{"line_number":627,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":82,"id":"a53454b6_9f23179f","line":624,"range":{"start_line":624,"start_character":0,"end_line":624,"end_character":23},"in_reply_to":"d405b20b_eb9f4758","updated":"2022-06-27 16:31:42.000000000","message":"The use in the old script is executed if ceph is enabled for nova:\n\nhttps://opendev.org/openstack/devstack-plugin-ceph/src/commit/e222cc976918a331bacff150e84069fda8f4960a/devstack/plugin.sh#L57-L61\n\n\nSimilarly, the ceph secret isn\u0027t generated if ceph isn\u0027t enabled for nova even in the cephadm code:\n\nhttps://review.opendev.org/c/openstack/devstack-plugin-ceph/+/826484/82/devstack/lib/cephadm#603\n\n\nSo I guess it makes sense to use the same condition and skip this step during cleanup:\n\n   [ \"$ENABLE_CEPH_NOVA\" \u003d\u003d \"False\" ] \u0026\u0026 return;","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"},{"author":{"_account_id":25402,"name":"Francesco Pantano","email":"fpantano@redhat.com","username":"fmount"},"change_message_id":"3648b2e2d12267c2adc21e0e445f346ab75609f4","unresolved":true,"context_lines":[{"line_number":621,"context_line":"# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt"},{"line_number":622,"context_line":"function _undefine_virsh_secret {"},{"line_number":623,"context_line":"    local virsh_uuid"},{"line_number":624,"context_line":"    virsh_uuid\u003d$($SUDO virsh secret-list | awk \u0027/^ ?[0-9a-z]/ { print $1 }\u0027)"},{"line_number":625,"context_line":"    echo $virsh_uuid"},{"line_number":626,"context_line":"    $SUDO virsh secret-undefine ${virsh_uuid} \u0026\u003e/dev/null"},{"line_number":627,"context_line":"}"}],"source_content_type":"application/x-shellscript","patch_set":82,"id":"d405b20b_eb9f4758","line":624,"range":{"start_line":624,"start_character":0,"end_line":624,"end_character":23},"in_reply_to":"f67ae82c_438a3fb7","updated":"2022-06-27 16:06:00.000000000","message":"virsh exists and it\u0027s used for two reasons:\n\n1. it was used in the corresponding \u0027ceph\u0027 plugin (e.g. grep something here -\u003e [1])\n\n2. if I\u0027m not wrong, libvirt is always installed when nova is used [2]\n\n\n[1] https://github.com/openstack/devstack-plugin-ceph/blob/master/devstack/lib/ceph#L256\n[2] https://github.com/openstack/devstack/blob/master/files/debs/nova","commit_id":"c48bbd0b451de21ce7c65a273448c9e4ba18fb0a"}]}
