)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"47d69545b36f5d201c94fc5f191cc4048466071e","unresolved":false,"context_lines":[{"line_number":7,"context_line":"BaR backup /var/lib/ceph ceph mon and mgr"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"The idea is to backup the directory of the control-plane"},{"line_number":10,"context_line":"sequencially. So it wont have production disruption."},{"line_number":11,"context_line":" - Stop ceph services (mon \u0026 mgr)"},{"line_number":12,"context_line":" - Do the backup of the /var/lib/ceph"},{"line_number":13,"context_line":" - Restart ceph services"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":11,"id":"df33271e_4610a900","line":10,"updated":"2020-04-03 10:59:50.000000000","message":"I understand and support the purpose of this change but also wanted to bring up one potential problem with it which I think we need to test carefully\n\nWe\u0027ll end up with the backups of the mons having each a different epoch, because every time a mon is stopped/started the other two will bump up the cluster epoch as a consequence of the cluster reconfiguration\n\nThe mon last stopped will be the one having in its backup the highest epoch\n\nOn restore, the mons have to be started all three at the same time though which should work but was curious if we tested this?","commit_id":"64d7d68021801a9c2b31ec203298ed84eac45db5"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"b9dc56905a684bcdf05fa8a82abdccd3aaaa91f5","unresolved":false,"context_lines":[{"line_number":7,"context_line":"BaR backup /var/lib/ceph ceph mon and mgr"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"The idea is to backup the directory of the control-plane"},{"line_number":10,"context_line":"sequencially. So it wont have production disruption."},{"line_number":11,"context_line":" - Stop ceph services (mon \u0026 mgr)"},{"line_number":12,"context_line":" - Do the backup of the /var/lib/ceph"},{"line_number":13,"context_line":" - Restart ceph services"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":11,"id":"df33271e_b4540c56","line":10,"in_reply_to":"df33271e_2679fb2e","updated":"2020-04-06 17:02:29.000000000","message":"ack, election_epoch was the same on the three nodes","commit_id":"64d7d68021801a9c2b31ec203298ed84eac45db5"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"7373161bec3d1c12373c863086392d66914e5c62","unresolved":false,"context_lines":[{"line_number":7,"context_line":"BaR backup /var/lib/ceph ceph mon and mgr"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"The idea is to backup the directory of the control-plane"},{"line_number":10,"context_line":"sequencially. So it wont have production disruption."},{"line_number":11,"context_line":" - Stop ceph services (mon \u0026 mgr)"},{"line_number":12,"context_line":" - Do the backup of the /var/lib/ceph"},{"line_number":13,"context_line":" - Restart ceph services"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":11,"id":"df33271e_2679fb2e","line":10,"in_reply_to":"df33271e_4610a900","updated":"2020-04-06 08:41:35.000000000","message":"Yes, this was tested.\nWe havent checked the epoch, but the content of ceph was tested with a python script to ensure that the content was there.\n\nBasically, we used the overcloud pingtest to create a stack.\nTo make things more complicated, we created a different stack on every node backup of the control-plane.\n- We created an stack\n- Backup the /var/lib/ceph on controller-0\n- We created different stack\n- Backup the /var/lib/ceph on controller-1\n- Created different stack \n- Backup the /var/lib/ceph on controller-2\n\nAfter the restoration we check the content of ceph.","commit_id":"64d7d68021801a9c2b31ec203298ed84eac45db5"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"09c3cbf93682bff15f5e75d232b8334bca3701dd","unresolved":false,"context_lines":[{"line_number":7,"context_line":"BaR backup /var/lib/ceph ceph mon and mgr"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"The idea is to backup the directory of the control-plane"},{"line_number":10,"context_line":"sequencially. So it wont have production disruption."},{"line_number":11,"context_line":" - Stop ceph services (mon \u0026 mgr)"},{"line_number":12,"context_line":" - Do the backup of the /var/lib/ceph"},{"line_number":13,"context_line":" - Restart ceph services"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":11,"id":"df33271e_7fc1bd80","line":10,"in_reply_to":"df33271e_b4540c56","updated":"2020-04-06 17:20:35.000000000","message":"it might still be not sufficient though, investigating further, sorry","commit_id":"64d7d68021801a9c2b31ec203298ed84eac45db5"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"612bfd2855e7bbe74758f001ec5dc86db604c875","unresolved":false,"context_lines":[{"line_number":10,"context_line":"sequencially. So it wont have production disruption."},{"line_number":11,"context_line":" - Stop ceph services (mon \u0026 mgr)"},{"line_number":12,"context_line":" - Do the backup of the /var/lib/ceph"},{"line_number":13,"context_line":" - Restart ceph services"},{"line_number":14,"context_line":""},{"line_number":15,"context_line":"Change-Id: I55681d69cacf2e7bee52d9a61f12d8577fe15a2d"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":11,"id":"df33271e_5faaf984","line":13,"updated":"2020-04-06 17:30:35.000000000","message":"problem in this scenario is that the cluster will never lose quorum, hence when the last of the mons is getting \"snapshotted\" (say, node3) the other two (of which a backup was already taken) might be acking new epoc change requests which osds *will remember*\n\non restore though, the most updated copy of the osds map will be that of node3, the other two mons will eventually sync up but none of the three will have the latest epoch which was instead acked to osds by node1/node2 while they were still online\n\nthe only safe way to take a \"working\" backup of the mons is to do so when the cluster *is not quorate* because that is the only scenario in which it will refuse to incorporate changes which might make the backup useless","commit_id":"64d7d68021801a9c2b31ec203298ed84eac45db5"}],"doc/source/roles/role-backup-and-restore.rst":[{"author":{"_account_id":20775,"name":"Carlos Camacho","email":"ccamacho@redhat.com","username":"ccamacho"},"change_message_id":"2543d56b982d9ec5ce9c1df79a2310af7c224f37","unresolved":false,"context_lines":[{"line_number":85,"context_line":"  EOF"},{"line_number":86,"context_line":""},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"Another plpaybook to do ceph backup without production disruption."},{"line_number":89,"context_line":""},{"line_number":90,"context_line":"::"},{"line_number":91,"context_line":""}],"source_content_type":"text/x-rst","patch_set":3,"id":"1fa4df85_a76c7936","line":88,"range":{"start_line":88,"start_character":8,"end_line":88,"end_character":17},"updated":"2020-03-18 11:34:31.000000000","message":"playbook","commit_id":"579d08641a8521b6e469885dc7576f7ffdf91de4"},{"author":{"_account_id":20775,"name":"Carlos Camacho","email":"ccamacho@redhat.com","username":"ccamacho"},"change_message_id":"2543d56b982d9ec5ce9c1df79a2310af7c224f37","unresolved":false,"context_lines":[{"line_number":89,"context_line":""},{"line_number":90,"context_line":"::"},{"line_number":91,"context_line":""},{"line_number":92,"context_line":"  cat \u003c\u003c\u0027EOF\u0027 \u003e ~/bar_rear_ceph_backup.yaml "},{"line_number":93,"context_line":"  # Playbook"},{"line_number":94,"context_line":"  # We install and configure ReaR in the control plane nodes"},{"line_number":95,"context_line":"  # As they are the only nodes we will like to backup now."}],"source_content_type":"text/x-rst","patch_set":3,"id":"1fa4df85_67768109","line":92,"range":{"start_line":92,"start_character":43,"end_line":92,"end_character":44},"updated":"2020-03-18 11:34:31.000000000","message":"remove","commit_id":"579d08641a8521b6e469885dc7576f7ffdf91de4"},{"author":{"_account_id":20775,"name":"Carlos Camacho","email":"ccamacho@redhat.com","username":"ccamacho"},"change_message_id":"2543d56b982d9ec5ce9c1df79a2310af7c224f37","unresolved":false,"context_lines":[{"line_number":104,"context_line":"        tasks_from: ceph_backup"},{"line_number":105,"context_line":"      tags:"},{"line_number":106,"context_line":"      -  bar_create_recover_image"},{"line_number":107,"context_line":"  "},{"line_number":108,"context_line":"  - become: true"},{"line_number":109,"context_line":"    hosts: Controller"},{"line_number":110,"context_line":"    name: Do a full recover image"}],"source_content_type":"text/x-rst","patch_set":3,"id":"1fa4df85_2762c93f","line":107,"range":{"start_line":107,"start_character":0,"end_line":107,"end_character":2},"updated":"2020-03-18 11:34:31.000000000","message":"remove","commit_id":"579d08641a8521b6e469885dc7576f7ffdf91de4"},{"author":{"_account_id":20775,"name":"Carlos Camacho","email":"ccamacho@redhat.com","username":"ccamacho"},"change_message_id":"2543d56b982d9ec5ce9c1df79a2310af7c224f37","unresolved":false,"context_lines":[{"line_number":107,"context_line":"  "},{"line_number":108,"context_line":"  - become: true"},{"line_number":109,"context_line":"    hosts: Controller"},{"line_number":110,"context_line":"    name: Do a full recover image"},{"line_number":111,"context_line":"    roles:"},{"line_number":112,"context_line":"      - role: backup-and-restore"},{"line_number":113,"context_line":""},{"line_number":114,"context_line":"The last step is to run the previously create playbooks"},{"line_number":115,"context_line":"filtering by the corresponding tag."}],"source_content_type":"text/x-rst","patch_set":3,"id":"1fa4df85_c7509561","line":112,"range":{"start_line":110,"start_character":0,"end_line":112,"end_character":10},"updated":"2020-03-18 11:34:31.000000000","message":"Missing EOF?","commit_id":"579d08641a8521b6e469885dc7576f7ffdf91de4"},{"author":{"_account_id":20775,"name":"Carlos Camacho","email":"ccamacho@redhat.com","username":"ccamacho"},"change_message_id":"2543d56b982d9ec5ce9c1df79a2310af7c224f37","unresolved":false,"context_lines":[{"line_number":111,"context_line":"    roles:"},{"line_number":112,"context_line":"      - role: backup-and-restore"},{"line_number":113,"context_line":""},{"line_number":114,"context_line":"The last step is to run the previously create playbooks"},{"line_number":115,"context_line":"filtering by the corresponding tag."},{"line_number":116,"context_line":""},{"line_number":117,"context_line":"First, we configure the NFS server."},{"line_number":118,"context_line":""}],"source_content_type":"text/x-rst","patch_set":3,"id":"1fa4df85_874a9da9","line":115,"range":{"start_line":114,"start_character":0,"end_line":115,"end_character":11},"updated":"2020-03-18 11:34:31.000000000","message":"Missing how to run the playbook.","commit_id":"579d08641a8521b6e469885dc7576f7ffdf91de4"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"1d66b887742fd78c60819bb59b5897e293f31310","unresolved":false,"context_lines":[{"line_number":90,"context_line":"::"},{"line_number":91,"context_line":""},{"line_number":92,"context_line":"  cat \u003c\u003c\u0027EOF\u0027 \u003e ~/bar_rear_ceph_backup.yaml"},{"line_number":93,"context_line":"  # Playbook"},{"line_number":94,"context_line":"  # We install and configure ReaR in the control plane nodes"},{"line_number":95,"context_line":"  # As they are the only nodes we will like to backup now."},{"line_number":96,"context_line":"  - name: Backup ceph on the controller without production disruption"},{"line_number":97,"context_line":"    become: true"},{"line_number":98,"context_line":"    hosts: Controller"},{"line_number":99,"context_line":"    serial: 1"},{"line_number":100,"context_line":"    tasks:"},{"line_number":101,"context_line":"    - name: Backup ceph first"},{"line_number":102,"context_line":"      include_role:"},{"line_number":103,"context_line":"        name: backup-and-restore"},{"line_number":104,"context_line":"        tasks_from: ceph_backup"},{"line_number":105,"context_line":"      tags:"},{"line_number":106,"context_line":"      -  bar_create_recover_image"},{"line_number":107,"context_line":""},{"line_number":108,"context_line":"  - become: true"},{"line_number":109,"context_line":"    hosts: Controller"},{"line_number":110,"context_line":"    name: Do a full recover image"},{"line_number":111,"context_line":"    roles:"},{"line_number":112,"context_line":"      - role: backup-and-restore"},{"line_number":113,"context_line":"  EOF"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":"The last step is to run the previously create playbooks"}],"source_content_type":"text/x-rst","patch_set":10,"id":"df33271e_c304cf17","line":112,"range":{"start_line":93,"start_character":0,"end_line":112,"end_character":32},"updated":"2020-04-01 23:04:39.000000000","message":"maybe this should be added to the playbooks provided by tripleo-ansible and then referenced in the docs?","commit_id":"10c7bc299f935ad43abc43b63b8135648a92e397"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"04414f8e21ffda7df8d8544d64b02ccb35339177","unresolved":false,"context_lines":[{"line_number":90,"context_line":"::"},{"line_number":91,"context_line":""},{"line_number":92,"context_line":"  cat \u003c\u003c\u0027EOF\u0027 \u003e ~/bar_rear_ceph_backup.yaml"},{"line_number":93,"context_line":"  # Playbook"},{"line_number":94,"context_line":"  # We install and configure ReaR in the control plane nodes"},{"line_number":95,"context_line":"  # As they are the only nodes we will like to backup now."},{"line_number":96,"context_line":"  - name: Backup ceph on the controller without production disruption"},{"line_number":97,"context_line":"    become: true"},{"line_number":98,"context_line":"    hosts: Controller"},{"line_number":99,"context_line":"    serial: 1"},{"line_number":100,"context_line":"    tasks:"},{"line_number":101,"context_line":"    - name: Backup ceph first"},{"line_number":102,"context_line":"      include_role:"},{"line_number":103,"context_line":"        name: backup-and-restore"},{"line_number":104,"context_line":"        tasks_from: ceph_backup"},{"line_number":105,"context_line":"      tags:"},{"line_number":106,"context_line":"      -  bar_create_recover_image"},{"line_number":107,"context_line":""},{"line_number":108,"context_line":"  - become: true"},{"line_number":109,"context_line":"    hosts: Controller"},{"line_number":110,"context_line":"    name: Do a full recover image"},{"line_number":111,"context_line":"    roles:"},{"line_number":112,"context_line":"      - role: backup-and-restore"},{"line_number":113,"context_line":"  EOF"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":"The last step is to run the previously create playbooks"}],"source_content_type":"text/x-rst","patch_set":10,"id":"df33271e_f4aee22b","line":112,"range":{"start_line":93,"start_character":0,"end_line":112,"end_character":32},"in_reply_to":"df33271e_0affe466","updated":"2020-04-02 12:47:25.000000000","message":"sounds good to me. thanks for the info.","commit_id":"10c7bc299f935ad43abc43b63b8135648a92e397"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"e2cc0e56a789efe898402abbaaca47286d9c56a4","unresolved":false,"context_lines":[{"line_number":90,"context_line":"::"},{"line_number":91,"context_line":""},{"line_number":92,"context_line":"  cat \u003c\u003c\u0027EOF\u0027 \u003e ~/bar_rear_ceph_backup.yaml"},{"line_number":93,"context_line":"  # Playbook"},{"line_number":94,"context_line":"  # We install and configure ReaR in the control plane nodes"},{"line_number":95,"context_line":"  # As they are the only nodes we will like to backup now."},{"line_number":96,"context_line":"  - name: Backup ceph on the controller without production disruption"},{"line_number":97,"context_line":"    become: true"},{"line_number":98,"context_line":"    hosts: Controller"},{"line_number":99,"context_line":"    serial: 1"},{"line_number":100,"context_line":"    tasks:"},{"line_number":101,"context_line":"    - name: Backup ceph first"},{"line_number":102,"context_line":"      include_role:"},{"line_number":103,"context_line":"        name: backup-and-restore"},{"line_number":104,"context_line":"        tasks_from: ceph_backup"},{"line_number":105,"context_line":"      tags:"},{"line_number":106,"context_line":"      -  bar_create_recover_image"},{"line_number":107,"context_line":""},{"line_number":108,"context_line":"  - become: true"},{"line_number":109,"context_line":"    hosts: Controller"},{"line_number":110,"context_line":"    name: Do a full recover image"},{"line_number":111,"context_line":"    roles:"},{"line_number":112,"context_line":"      - role: backup-and-restore"},{"line_number":113,"context_line":"  EOF"},{"line_number":114,"context_line":""},{"line_number":115,"context_line":"The last step is to run the previously create playbooks"}],"source_content_type":"text/x-rst","patch_set":10,"id":"df33271e_0affe466","line":112,"range":{"start_line":93,"start_character":0,"end_line":112,"end_character":32},"in_reply_to":"df33271e_c304cf17","updated":"2020-04-02 07:02:33.000000000","message":"We are working on having a openstack CLI for both the undercloud and the overcloud where the playbooks will be located.\nNot sure whether is enough?","commit_id":"10c7bc299f935ad43abc43b63b8135648a92e397"}],"doc/source/roles/role-backup_and_restore.rst":[{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"ae4fd8f3de477298b692cf110e675671fd82a857","unresolved":false,"context_lines":[{"line_number":124,"context_line":"      --tags bar_setup_rear \\"},{"line_number":125,"context_line":"      ~/bar_rear_setup.yaml"},{"line_number":126,"context_line":""},{"line_number":127,"context_line":"Lastly, we execute the actual backup step. With or without ceph."},{"line_number":128,"context_line":""},{"line_number":129,"context_line":"::"},{"line_number":130,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"1f493fa4_8c017309","line":127,"range":{"start_line":127,"start_character":43,"end_line":127,"end_character":64},"updated":"2020-04-29 12:07:26.000000000","message":"This needs to be deleted.","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"}],"tripleo_ansible/playbooks/ceph-backup.yaml":[{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"08f88c327df59e78136b94ef2469ba289908ab6d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"9f560f44_d174cd00","updated":"2020-08-04 13:30:15.000000000","message":"boiler plate header should be here.","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":8932,"name":"Telles Mota Vidal Nóbrega","email":"tellesnobrega@gmail.com","username":"tellesnobrega"},"change_message_id":"5c701224d70dda14634050e80fafc79559a348f5","unresolved":false,"context_lines":[{"line_number":26,"context_line":""},{"line_number":27,"context_line":"- name: Ceph MGR"},{"line_number":28,"context_line":"  become: true"},{"line_number":29,"context_line":"  hosts: overcloud_ceph_gr"},{"line_number":30,"context_line":"  tasks:"},{"line_number":31,"context_line":"    - name: Stop manager services"},{"line_number":32,"context_line":"      import_role:"}],"source_content_type":"text/x-yaml","patch_set":27,"id":"9f560f44_b5f9e5c8","line":29,"updated":"2020-08-27 18:37:50.000000000","message":"I think the host here should be overcloud_ceph_mgr","commit_id":"d62d8884bc1a0c464b7604062e7f5bbb93be4f92"}],"tripleo_ansible/playbooks/ceph_deactivate_mds.yaml":[{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"08f88c327df59e78136b94ef2469ba289908ab6d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":26,"id":"9f560f44_f168f11d","updated":"2020-08-04 13:30:15.000000000","message":"boiler plate header should be here.","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"08f88c327df59e78136b94ef2469ba289908ab6d","unresolved":false,"context_lines":[{"line_number":1,"context_line":"---"},{"line_number":2,"context_line":"- name: gather facts"},{"line_number":3,"context_line":"  hosts: all"},{"line_number":4,"context_line":""},{"line_number":5,"context_line":"- name: upgrade ceph mdss cluster, deactivate all rank \u003e 0"},{"line_number":6,"context_line":"  hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\""}],"source_content_type":"text/x-yaml","patch_set":26,"id":"9f560f44_f1f63163","line":3,"range":{"start_line":2,"start_character":0,"end_line":3,"end_character":12},"updated":"2020-08-04 13:30:15.000000000","message":"is this needed?","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"8163657d3cb751b68897edb3ceac0f33282f028b","unresolved":false,"context_lines":[{"line_number":1,"context_line":"---"},{"line_number":2,"context_line":"- name: gather facts"},{"line_number":3,"context_line":"  hosts: all"},{"line_number":4,"context_line":""},{"line_number":5,"context_line":"- name: upgrade ceph mdss cluster, deactivate all rank \u003e 0"},{"line_number":6,"context_line":"  hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\""}],"source_content_type":"text/x-yaml","patch_set":26,"id":"9f560f44_e2a880da","line":3,"range":{"start_line":2,"start_character":0,"end_line":3,"end_character":12},"in_reply_to":"9f560f44_f1f63163","updated":"2020-08-19 12:33:00.000000000","message":"Yes, this is needed otherwise it fails.","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"08f88c327df59e78136b94ef2469ba289908ab6d","unresolved":false,"context_lines":[{"line_number":9,"context_line":"    - name: deactivate all mds rank \u003e 0"},{"line_number":10,"context_line":"      when: groups.get(mds_group_name, []) | length \u003e 0"},{"line_number":11,"context_line":"      block:"},{"line_number":12,"context_line":"        - import_role:"},{"line_number":13,"context_line":"            name: ceph-defaults"},{"line_number":14,"context_line":"        - import_role:"},{"line_number":15,"context_line":"            name: ceph-facts"},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"        - name: deactivate all mds rank \u003e 0 if any"},{"line_number":18,"context_line":"          when: groups.get(mds_group_name, []) | length \u003e 1"}],"source_content_type":"text/x-yaml","patch_set":26,"id":"9f560f44_f1c471f7","line":15,"range":{"start_line":12,"start_character":0,"end_line":15,"end_character":28},"updated":"2020-08-04 13:30:15.000000000","message":"given these are straight role imports we can probably use the `roles` directive within the playbook to better organize the code.\n\nSomething like so \n\n  - name: upgrade ceph mdss cluster, deactivate all rank \u003e 0\n    hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\"\n    become: true\n    roles:\n      - role: ceph-defaults\n        when:\n          - groups.get(mds_group_name, []) | length \u003e 0\n      - role: ceph-facts\n        when:\n          - groups.get(mds_group_name, []) | length \u003e 0\n    post_tasks:\n      - name: deactivate all mds rank \u003e 0 if any\n        when: groups.get(mds_group_name, []) | length \u003e 1\n        block:\n          ...\n\n\nWhich provides some nice organization and flow control, while also avoiding the nested blocks.","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"8163657d3cb751b68897edb3ceac0f33282f028b","unresolved":false,"context_lines":[{"line_number":9,"context_line":"    - name: deactivate all mds rank \u003e 0"},{"line_number":10,"context_line":"      when: groups.get(mds_group_name, []) | length \u003e 0"},{"line_number":11,"context_line":"      block:"},{"line_number":12,"context_line":"        - import_role:"},{"line_number":13,"context_line":"            name: ceph-defaults"},{"line_number":14,"context_line":"        - import_role:"},{"line_number":15,"context_line":"            name: ceph-facts"},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"        - name: deactivate all mds rank \u003e 0 if any"},{"line_number":18,"context_line":"          when: groups.get(mds_group_name, []) | length \u003e 1"}],"source_content_type":"text/x-yaml","patch_set":26,"id":"9f560f44_42916c99","line":15,"range":{"start_line":12,"start_character":0,"end_line":15,"end_character":28},"in_reply_to":"9f560f44_f1c471f7","updated":"2020-08-19 12:33:00.000000000","message":"This is a copy of https://github.com/ceph/ceph-ansible/blob/v4.0.14/infrastructure-playbooks/rolling_update.yml#L550-L623 as suggested by Randy on a previous review.\n\nI\u0027d prefer to leave it as it is, TBH.","commit_id":"63d538aa06173562ff66c8fd066031fd259e9b26"},{"author":{"_account_id":14985,"name":"Alex Schultz","email":"aschultz@next-development.com","username":"mwhahaha"},"change_message_id":"f960eaf3ee3108c3fd65fc5c284f58b02bc9f4f8","unresolved":false,"context_lines":[{"line_number":14,"context_line":"# License for the specific language governing permissions and limitations"},{"line_number":15,"context_line":"# under the License."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"- name: gather facts"},{"line_number":18,"context_line":"  hosts: all"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"- name: upgrade ceph mdss cluster, deactivate all rank \u003e 0"},{"line_number":21,"context_line":"  hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\""}],"source_content_type":"text/x-yaml","patch_set":28,"id":"9f560f44_503e69b9","line":18,"range":{"start_line":17,"start_character":0,"end_line":18,"end_character":12},"updated":"2020-08-31 16:31:34.000000000","message":"I\u0027m still concerned this is a thing, though I think we could address this in a follow up. It looks like this is needed for the filesystems usage down in the fact setting. \n\nWhat happens if a node is unavailable in the cluster? will backup not work?","commit_id":"a930407217c7478a4b0141d28b9377973d840f97"},{"author":{"_account_id":14985,"name":"Alex Schultz","email":"aschultz@next-development.com","username":"mwhahaha"},"change_message_id":"9a585f5334731b23d74d84b438b55d5838fa4722","unresolved":false,"context_lines":[{"line_number":14,"context_line":"# License for the specific language governing permissions and limitations"},{"line_number":15,"context_line":"# under the License."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"- name: gather facts"},{"line_number":18,"context_line":"  hosts: all"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"- name: upgrade ceph mdss cluster, deactivate all rank \u003e 0"},{"line_number":21,"context_line":"  hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\""}],"source_content_type":"text/x-yaml","patch_set":28,"id":"9f560f44_216e0996","line":18,"range":{"start_line":17,"start_character":0,"end_line":18,"end_character":12},"in_reply_to":"9f560f44_1097d13c","updated":"2020-08-31 21:16:42.000000000","message":"Right but I think that by default if you do this and one of the nodes in the inventory is not available, i think this will fail the playbook here. Even if not targeted by the rest of the playbook.  Just something to look into as end users typically end up with a server or two dead at some point","commit_id":"a930407217c7478a4b0141d28b9377973d840f97"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"4622b362084df86f5c5b87a0d07ad64a9f161887","unresolved":false,"context_lines":[{"line_number":14,"context_line":"# License for the specific language governing permissions and limitations"},{"line_number":15,"context_line":"# under the License."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"- name: gather facts"},{"line_number":18,"context_line":"  hosts: all"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"- name: upgrade ceph mdss cluster, deactivate all rank \u003e 0"},{"line_number":21,"context_line":"  hosts: \"{{ groups[mon_group_name|default(\u0027mons\u0027)][0] }}\""}],"source_content_type":"text/x-yaml","patch_set":28,"id":"9f560f44_1097d13c","line":18,"range":{"start_line":17,"start_character":0,"end_line":18,"end_character":12},"in_reply_to":"9f560f44_503e69b9","updated":"2020-08-31 21:14:15.000000000","message":"This patch does not look into that, as nothing on the ceph cluster is backed it up.\n\nThe idea of this, it\u0027s to restore the control-plane in the same status as it was when the backup process happened. Ceph is an exception of this, as the ceph cluster needs to keep the status as it is in the moment of the restoration process.","commit_id":"a930407217c7478a4b0141d28b9377973d840f97"}],"tripleo_ansible/roles/backup_and_restore/tasks/ceph.yml":[{"author":{"_account_id":11085,"name":"Toure Dunnon","email":"toure@redhat.com","username":"Toure"},"change_message_id":"b69357e9f2169055cdc7df4c9548b9c5d152eb60","unresolved":false,"context_lines":[{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- name: Stop ceph management"},{"line_number":44,"context_line":"  systemd: "},{"line_number":45,"context_line":"    state: stopped"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"1fa4df85_d8e34bd8","line":42,"updated":"2020-03-05 15:24:37.000000000","message":"add pause here.\n\n- pause:\n    seconds: 10","commit_id":"6a2fe44b28688d0fb01c70cd9bc6266aaf488626"},{"author":{"_account_id":11085,"name":"Toure Dunnon","email":"toure@redhat.com","username":"Toure"},"change_message_id":"b69357e9f2169055cdc7df4c9548b9c5d152eb60","unresolved":false,"context_lines":[{"line_number":47,"context_line":"  become: true"},{"line_number":48,"context_line":"  tags:"},{"line_number":49,"context_line":"    - bar_create_recover_image"},{"line_number":50,"context_line":""},{"line_number":51,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":52,"context_line":"  shell: tar -zcv --xattrs-include\u003d*.* --xattrs --xattrs-include\u003dsecurity.capability --xattrs-include\u003dsecurity.selinux --acls -f /var/lib/ceph.tar.gz /var/lib/ceph /tmp/ceph_check_{{ ansible_hostname }}"},{"line_number":53,"context_line":"  become: true"}],"source_content_type":"text/x-yaml","patch_set":1,"id":"1fa4df85_98e953b8","line":50,"updated":"2020-03-05 15:24:37.000000000","message":"add pause here.\n\n- pause:\n    seconds: 10","commit_id":"6a2fe44b28688d0fb01c70cd9bc6266aaf488626"}],"tripleo_ansible/roles/backup_and_restore/tasks/ceph_authentication.yml":[{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"0bb53d093f1c5283c26311d969fb75a1c2237f3c","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Export ceph authentication"},{"line_number":36,"context_line":"  shell: |"},{"line_number":37,"context_line":"    set -o pipefail"},{"line_number":38,"context_line":"    {{ tripleo_container_cli }} exec ceph-mon-{{ ansible_hostname }} bash -c \"ceph auth export\" \u003e {{ tripleo_backup_and_restore_ceph_auth_file }}"},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_c9e24dd5","line":38,"updated":"2020-04-29 09:53:25.000000000","message":"I think the container is mounting /var/lib/ceph/ but this will save the file within the container fs?","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"ebe0222fcde7abaf0c56edde1906676e27180274","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Export ceph authentication"},{"line_number":36,"context_line":"  shell: |"},{"line_number":37,"context_line":"    set -o pipefail"},{"line_number":38,"context_line":"    {{ tripleo_container_cli }} exec ceph-mon-{{ ansible_hostname }} bash -c \"ceph auth export\" \u003e {{ tripleo_backup_and_restore_ceph_auth_file }}"},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_c67a1322","line":38,"in_reply_to":"1f493fa4_6cdd07df","updated":"2020-04-30 18:24:09.000000000","message":"ack it\u0027s capturing output of command launched into the container but saving it on ansible node\n\nthanks","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"a7ae9c01756422bb51060d327e9585bbe9426ed9","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Export ceph authentication"},{"line_number":36,"context_line":"  shell: |"},{"line_number":37,"context_line":"    set -o pipefail"},{"line_number":38,"context_line":"    {{ tripleo_container_cli }} exec ceph-mon-{{ ansible_hostname }} bash -c \"ceph auth export\" \u003e {{ tripleo_backup_and_restore_ceph_auth_file }}"},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_6cdd07df","line":38,"in_reply_to":"1f493fa4_c9e24dd5","updated":"2020-04-29 10:45:22.000000000","message":"Nope, this will save the file /home/heat-admin/ceph_auth_export.bak outside the container","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"}],"tripleo_ansible/roles/backup_and_restore/tasks/ceph_backup.yml":[{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"bfcb63117a2235b540395224099d4a774751cc50","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: ceph-mon@{{ ansible_hostname }}"},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":5,"id":"1fa4df85_bde1e6fe","line":38,"range":{"start_line":38,"start_character":10,"end_line":38,"end_character":41},"updated":"2020-03-18 13:15:26.000000000","message":"this should be quoted.","commit_id":"394fe56b7ce014dc3a149f1fbb5f76f118e3ba60"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"bfcb63117a2235b540395224099d4a774751cc50","unresolved":false,"context_lines":[{"line_number":45,"context_line":"- name: Stop ceph management"},{"line_number":46,"context_line":"  systemd:"},{"line_number":47,"context_line":"    state: stopped"},{"line_number":48,"context_line":"    name: ceph-mgr@{{ ansible_hostname }}"},{"line_number":49,"context_line":"  become: true"},{"line_number":50,"context_line":"  tags:"},{"line_number":51,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":5,"id":"1fa4df85_5df0f2c9","line":48,"range":{"start_line":48,"start_character":10,"end_line":48,"end_character":41},"updated":"2020-03-18 13:15:26.000000000","message":"this should be quoted.","commit_id":"394fe56b7ce014dc3a149f1fbb5f76f118e3ba60"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"bfcb63117a2235b540395224099d4a774751cc50","unresolved":false,"context_lines":[{"line_number":68,"context_line":"- name: Start ceph monitor"},{"line_number":69,"context_line":"  systemd:"},{"line_number":70,"context_line":"    state: started"},{"line_number":71,"context_line":"    name: ceph-mon@{{ ansible_hostname }}"},{"line_number":72,"context_line":"  become: true"},{"line_number":73,"context_line":"  tags:"},{"line_number":74,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":5,"id":"1fa4df85_7deb6edb","line":71,"range":{"start_line":71,"start_character":10,"end_line":71,"end_character":41},"updated":"2020-03-18 13:15:26.000000000","message":"this should be quoted.","commit_id":"394fe56b7ce014dc3a149f1fbb5f76f118e3ba60"},{"author":{"_account_id":7353,"name":"Kevin Carter","email":"kevin@cloudnull.com","username":"cloudnull"},"change_message_id":"bfcb63117a2235b540395224099d4a774751cc50","unresolved":false,"context_lines":[{"line_number":76,"context_line":"- name: Start ceph management"},{"line_number":77,"context_line":"  systemd:"},{"line_number":78,"context_line":"    state: started"},{"line_number":79,"context_line":"    name: ceph-mgr@{{ ansible_hostname }}"},{"line_number":80,"context_line":"  become: true"},{"line_number":81,"context_line":"  tags:"},{"line_number":82,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":5,"id":"1fa4df85_3dbf36d1","line":79,"range":{"start_line":79,"start_character":10,"end_line":79,"end_character":41},"updated":"2020-03-18 13:15:26.000000000","message":"this should be quoted.","commit_id":"394fe56b7ce014dc3a149f1fbb5f76f118e3ba60"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"ba26d8c478315a55bf803969ecca8a0b59fed0f5","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_1266cdc2","line":38,"updated":"2020-03-19 17:03:40.000000000","message":"Does this work with our containerized Ceph services?","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"e5174499d8a05ee1e13e3581dfc4b80cabd90bd4","unresolved":false,"context_lines":[{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_52f7655a","line":38,"in_reply_to":"1fa4df85_1266cdc2","updated":"2020-03-19 18:25:08.000000000","message":"Yes, it was tested.","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"ba26d8c478315a55bf803969ecca8a0b59fed0f5","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- pause: seconds\u003d8"},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"- name: Stop ceph management"},{"line_number":46,"context_line":"  systemd:"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_72ddc16e","line":43,"updated":"2020-03-19 17:03:40.000000000","message":"All tasks should be named. In this case, we need to explain why do we need to wait before the next task.","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"e5174499d8a05ee1e13e3581dfc4b80cabd90bd4","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- pause: seconds\u003d8"},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"- name: Stop ceph management"},{"line_number":46,"context_line":"  systemd:"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_f5375baf","line":43,"in_reply_to":"1fa4df85_72ddc16e","updated":"2020-03-19 18:25:08.000000000","message":"We need ensure that the service is stopped.","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"ba26d8c478315a55bf803969ecca8a0b59fed0f5","unresolved":false,"context_lines":[{"line_number":52,"context_line":""},{"line_number":53,"context_line":"- pause: seconds\u003d8"},{"line_number":54,"context_line":""},{"line_number":55,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":56,"context_line":"  shell: |-"},{"line_number":57,"context_line":"    tar -zcv --xattrs-include\u003d*.* \\"},{"line_number":58,"context_line":"      --xattrs \\"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_92487d2d","line":55,"updated":"2020-03-19 17:03:40.000000000","message":"We may want to wrap this into a \"block\". If the tarball fails for whatever reason then the service is never started again. Using \"block\", you can add an \"always\" section to start the services back up again. https://docs.ansible.com/ansible/latest/user_guide/playbooks_blocks.html","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"e5174499d8a05ee1e13e3581dfc4b80cabd90bd4","unresolved":false,"context_lines":[{"line_number":52,"context_line":""},{"line_number":53,"context_line":"- pause: seconds\u003d8"},{"line_number":54,"context_line":""},{"line_number":55,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":56,"context_line":"  shell: |-"},{"line_number":57,"context_line":"    tar -zcv --xattrs-include\u003d*.* \\"},{"line_number":58,"context_line":"      --xattrs \\"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_d55d7f79","line":55,"in_reply_to":"1fa4df85_92487d2d","updated":"2020-03-19 18:25:08.000000000","message":"My idea was to wrap these wholes tasks into a block so we are able to do one node at a time.","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"ba26d8c478315a55bf803969ecca8a0b59fed0f5","unresolved":false,"context_lines":[{"line_number":54,"context_line":""},{"line_number":55,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":56,"context_line":"  shell: |-"},{"line_number":57,"context_line":"    tar -zcv --xattrs-include\u003d*.* \\"},{"line_number":58,"context_line":"      --xattrs \\"},{"line_number":59,"context_line":"      --xattrs-include\u003dsecurity.capability \\"},{"line_number":60,"context_line":"      --xattrs-include\u003dsecurity.selinux \\"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_92c39d89","line":57,"updated":"2020-03-19 17:03:40.000000000","message":"The Ansible \"archive\" module does not support using custom attributes. It might be worth leaving a comment above the task to note that is why we are using the \"shell\" module instead. https://docs.ansible.com/ansible/latest/modules/archive_module.html","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"e5174499d8a05ee1e13e3581dfc4b80cabd90bd4","unresolved":false,"context_lines":[{"line_number":54,"context_line":""},{"line_number":55,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":56,"context_line":"  shell: |-"},{"line_number":57,"context_line":"    tar -zcv --xattrs-include\u003d*.* \\"},{"line_number":58,"context_line":"      --xattrs \\"},{"line_number":59,"context_line":"      --xattrs-include\u003dsecurity.capability \\"},{"line_number":60,"context_line":"      --xattrs-include\u003dsecurity.selinux \\"}],"source_content_type":"text/x-yaml","patch_set":6,"id":"1fa4df85_55674fb8","line":57,"in_reply_to":"1fa4df85_92c39d89","updated":"2020-03-19 18:25:08.000000000","message":"Sure, I will add a comment.","commit_id":"ff8225f09a2d9f603ee58b4641752a0e26f13726"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"4aec6b615f67a2dd5ed4e004f91cf5903ac7cc58","unresolved":false,"context_lines":[{"line_number":41,"context_line":"    - bar_create_recover_image"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- name: Wait for ceph monitor service to stop"},{"line_number":44,"context_line":"  pause: seconds\u003d8"},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"- name: Stop ceph management"},{"line_number":47,"context_line":"  systemd:"}],"source_content_type":"text/x-yaml","patch_set":8,"id":"df33271e_98f4b65d","line":44,"updated":"2020-03-23 14:07:21.000000000","message":"The `systemd` task that is previously ran should only complete / continue onto the next task once the service is stopped. Have you tried testing it without the pause? If so, it would be a bug with Ansible that should be raised with them and noted here.","commit_id":"cf629e23155dda808992e25ce523dd88efbddb72"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"4aec6b615f67a2dd5ed4e004f91cf5903ac7cc58","unresolved":false,"context_lines":[{"line_number":52,"context_line":"    - bar_create_recover_image"},{"line_number":53,"context_line":""},{"line_number":54,"context_line":"- name: Wait for ceph management service to stop"},{"line_number":55,"context_line":"  pause: seconds\u003d8"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"# The shell command is used cos the archive ansible module can not use"},{"line_number":58,"context_line":"# extra flags needed."}],"source_content_type":"text/x-yaml","patch_set":8,"id":"df33271e_1800a634","line":55,"updated":"2020-03-23 14:07:21.000000000","message":"Same","commit_id":"cf629e23155dda808992e25ce523dd88efbddb72"},{"author":{"_account_id":25877,"name":"Luke Short","email":"ekultails@gmail.com","username":"ekultails"},"change_message_id":"4aec6b615f67a2dd5ed4e004f91cf5903ac7cc58","unresolved":false,"context_lines":[{"line_number":54,"context_line":"- name: Wait for ceph management service to stop"},{"line_number":55,"context_line":"  pause: seconds\u003d8"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"# The shell command is used cos the archive ansible module can not use"},{"line_number":58,"context_line":"# extra flags needed."},{"line_number":59,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":60,"context_line":"  shell: |-"}],"source_content_type":"text/x-yaml","patch_set":8,"id":"df33271e_588fcecb","line":57,"updated":"2020-03-23 14:07:21.000000000","message":"\"cos\" should be \"because\"","commit_id":"cf629e23155dda808992e25ce523dd88efbddb72"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"808f0245d5c5e3ecf0d89d288cdf4889bcdbff5f","unresolved":false,"context_lines":[{"line_number":58,"context_line":"  tags:"},{"line_number":59,"context_line":"    - bar_create_recover_image"},{"line_number":60,"context_line":""},{"line_number":61,"context_line":"# The shell command is used cos the archive ansible module can not use"},{"line_number":62,"context_line":"# extra flags needed."},{"line_number":63,"context_line":"- name: Tar and Compress the /var/lib/ceph directory"},{"line_number":64,"context_line":"  shell: |-"}],"source_content_type":"text/x-yaml","patch_set":9,"id":"df33271e_902baf2e","line":61,"range":{"start_line":61,"start_character":28,"end_line":61,"end_character":31},"updated":"2020-03-26 16:52:25.000000000","message":"I forgot to change this :(","commit_id":"59a41b234b4fa0bcedef354bf815e3db7891300f"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"1e9f4b0c3570733bcba047564829507155c16b19","unresolved":false,"context_lines":[{"line_number":81,"context_line":"- name: Copy the backup to the shared directory"},{"line_number":82,"context_line":"  copy:"},{"line_number":83,"context_line":"    src: \"{{ tripleo_backup_and_restore_ceph_backup_file }}\""},{"line_number":84,"context_line":"    dest: \"{{ tripleo_backup_and_restore_temporary_dir.path }}/{{ ansible_hostname }}\"/"},{"line_number":85,"context_line":"    remote_src: yes"},{"line_number":86,"context_line":""},{"line_number":87,"context_line":"- name: Umount nfs shared directory"}],"source_content_type":"text/x-yaml","patch_set":12,"id":"1f493fa4_612c04ac","line":84,"updated":"2020-04-28 17:04:47.000000000","message":"linters: [error] syntax error: expected \u003cblock end\u003e, but found \u0027\u003cscalar\u003e\u0027","commit_id":"fdad23f807172fad55be7166fc9dd07ee254f0e6"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"1e9f4b0c3570733bcba047564829507155c16b19","unresolved":false,"context_lines":[{"line_number":82,"context_line":"  copy:"},{"line_number":83,"context_line":"    src: \"{{ tripleo_backup_and_restore_ceph_backup_file }}\""},{"line_number":84,"context_line":"    dest: \"{{ tripleo_backup_and_restore_temporary_dir.path }}/{{ ansible_hostname }}\"/"},{"line_number":85,"context_line":"    remote_src: yes"},{"line_number":86,"context_line":""},{"line_number":87,"context_line":"- name: Umount nfs shared directory"},{"line_number":88,"context_line":"  mount:"}],"source_content_type":"text/x-yaml","patch_set":12,"id":"1f493fa4_012380be","line":85,"updated":"2020-04-28 17:04:47.000000000","message":"linters: [warning] truthy value should be true or false (truthy)","commit_id":"fdad23f807172fad55be7166fc9dd07ee254f0e6"},{"author":{"_account_id":22348,"name":"Zuul","username":"zuul","tags":["SERVICE_USER"]},"tag":"autogenerated:zuul:check","change_message_id":"3d1bdfd78f3a5a3d721d9c8b6b987777aa10aa07","unresolved":false,"context_lines":[{"line_number":82,"context_line":"  copy:"},{"line_number":83,"context_line":"    src: \"{{ tripleo_backup_and_restore_ceph_backup_file }}\""},{"line_number":84,"context_line":"    dest: \"{{ tripleo_backup_and_restore_temporary_dir.path }}/{{ ansible_hostname }}\""},{"line_number":85,"context_line":"    remote_src: yes"},{"line_number":86,"context_line":""},{"line_number":87,"context_line":"- name: Umount nfs shared directory"},{"line_number":88,"context_line":"  mount:"}],"source_content_type":"text/x-yaml","patch_set":13,"id":"1f493fa4_aa06e1cc","line":85,"updated":"2020-04-28 18:24:55.000000000","message":"linters: [warning] truthy value should be true or false (truthy)","commit_id":"724fe01c7930c8bbba6203d80dbbb3fc37c5b705"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"0bb53d093f1c5283c26311d969fb75a1c2237f3c","unresolved":false,"context_lines":[{"line_number":1,"context_line":"---"},{"line_number":2,"context_line":"# Copyright 2019 Red Hat, Inc."},{"line_number":3,"context_line":"# All Rights Reserved."},{"line_number":4,"context_line":"#"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_c967ed4c","line":1,"updated":"2020-04-29 09:53:25.000000000","message":"do I understand correctly this will run on one controller at a time and take a backup of the ceph mons even though we don\u0027t consider it sufficient in case of DR ... because we\u0027ll have in the docs a section explaining how to take the a backup of all nodes at the same time?","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"ebe0222fcde7abaf0c56edde1906676e27180274","unresolved":false,"context_lines":[{"line_number":1,"context_line":"---"},{"line_number":2,"context_line":"# Copyright 2019 Red Hat, Inc."},{"line_number":3,"context_line":"# All Rights Reserved."},{"line_number":4,"context_line":"#"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_86364b50","line":1,"in_reply_to":"1f493fa4_6c06a754","updated":"2020-04-30 18:24:09.000000000","message":"ah ok thanks","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"a7ae9c01756422bb51060d327e9585bbe9426ed9","unresolved":false,"context_lines":[{"line_number":1,"context_line":"---"},{"line_number":2,"context_line":"# Copyright 2019 Red Hat, Inc."},{"line_number":3,"context_line":"# All Rights Reserved."},{"line_number":4,"context_line":"#"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_6c06a754","line":1,"in_reply_to":"1f493fa4_c967ed4c","updated":"2020-04-29 10:45:22.000000000","message":"This task is the pre-restoration task to do a backup of the /var/lib/ceph directory.","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"0bb53d093f1c5283c26311d969fb75a1c2237f3c","unresolved":false,"context_lines":[{"line_number":57,"context_line":"      --xattrs-include\u003dsecurity.capability \\"},{"line_number":58,"context_line":"      --xattrs-include\u003dsecurity.selinux \\"},{"line_number":59,"context_line":"      --acls \\"},{"line_number":60,"context_line":"      -f {{ tripleo_backup_and_restore_ceph_backup_file }} \\"},{"line_number":61,"context_line":"      {{ tripleo_backup_and_restore_ceph_path }}"},{"line_number":62,"context_line":"  become: true"},{"line_number":63,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_29ff7167","line":60,"updated":"2020-04-29 09:53:25.000000000","message":"this seems to be saved within the container fs as well","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"a7ae9c01756422bb51060d327e9585bbe9426ed9","unresolved":false,"context_lines":[{"line_number":57,"context_line":"      --xattrs-include\u003dsecurity.capability \\"},{"line_number":58,"context_line":"      --xattrs-include\u003dsecurity.selinux \\"},{"line_number":59,"context_line":"      --acls \\"},{"line_number":60,"context_line":"      -f {{ tripleo_backup_and_restore_ceph_backup_file }} \\"},{"line_number":61,"context_line":"      {{ tripleo_backup_and_restore_ceph_path }}"},{"line_number":62,"context_line":"  become: true"},{"line_number":63,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":14,"id":"1f493fa4_cc51bb38","line":60,"in_reply_to":"1f493fa4_29ff7167","updated":"2020-04-29 10:45:22.000000000","message":"The file is outside the container.\nFurthermore the file is copied to an external nfs directory as it can be seen below in this task.","commit_id":"45f3e9c2bbb8564f9abd842ff68b3b6e4b6be8da"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"9055ca0a6514987c09e7373a875cba45d67bc2fe","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  tags:"},{"line_number":41,"context_line":"    - bar_create_recover_image"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- name: Stop ceph management"},{"line_number":44,"context_line":"  systemd:"},{"line_number":45,"context_line":"    state: stopped"},{"line_number":46,"context_line":"    name: \"ceph-mgr@{{ ansible_hostname }}\""}],"source_content_type":"text/x-yaml","patch_set":15,"id":"ff570b3c_f9e23e9f","line":43,"updated":"2020-05-21 16:15:04.000000000","message":"I think it would be best to stop the ceph-mds instances as well here\n\nceph-mds@{{ ansible_hostname }}\n\nbut it might not exist on all clusters so we\u0027d need to pass if it isn\u0027t found","commit_id":"2d6241179bd0cd77db5bb0fb711e229677157a07"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"7722762fe88b58e1b31be623d899199b9cede73a","unresolved":false,"context_lines":[{"line_number":34,"context_line":""},{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_a1a7ab4f","line":37,"updated":"2020-05-26 16:09:35.000000000","message":"+ enabled: no","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"2fc1a606cf49abff8757d2e57929751928ef328d","unresolved":false,"context_lines":[{"line_number":34,"context_line":""},{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_fb192488","line":37,"in_reply_to":"ff570b3c_1feb52b0","updated":"2020-05-28 16:05:03.000000000","message":"Actually, you\u0027re right. I made an assumption here as this is how it\u0027s done in ceph-ansible. I don\u0027t think stop+disable will hurt(followed with start+enable during restore), but after validating in my lab it\u0027s not required.","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"30dd81921247b7973e7c10ec8140789bbdc098a0","unresolved":false,"context_lines":[{"line_number":34,"context_line":""},{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_1feb52b0","line":37,"in_reply_to":"ff570b3c_a1a7ab4f","updated":"2020-05-26 17:25:46.000000000","message":"systemd won\u0027t start a unit which was manually stopped though; is this to be safe in case of, for example, a systemctl reload or is there more to it?","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":14985,"name":"Alex Schultz","email":"aschultz@next-development.com","username":"mwhahaha"},"change_message_id":"68a7857a0917a0417b8a1c4d1a0452a3c3043a86","unresolved":false,"context_lines":[{"line_number":34,"context_line":""},{"line_number":35,"context_line":"- name: Stop ceph monitor"},{"line_number":36,"context_line":"  systemd:"},{"line_number":37,"context_line":"    state: stopped"},{"line_number":38,"context_line":"    name: \"ceph-mon@{{ ansible_hostname }}\""},{"line_number":39,"context_line":"  become: true"},{"line_number":40,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_9476f5d7","line":37,"in_reply_to":"ff570b3c_fb192488","updated":"2020-05-28 16:35:05.000000000","message":"enabled is only need to prevent startup on reboot. I don\u0027t think it\u0027s actually what you want here if you are only looking to temporarily stop a service.","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"7722762fe88b58e1b31be623d899199b9cede73a","unresolved":false,"context_lines":[{"line_number":42,"context_line":""},{"line_number":43,"context_line":"- name: Stop ceph management"},{"line_number":44,"context_line":"  systemd:"},{"line_number":45,"context_line":"    state: stopped"},{"line_number":46,"context_line":"    name: \"ceph-mgr@{{ ansible_hostname }}\""},{"line_number":47,"context_line":"  become: true"},{"line_number":48,"context_line":"  tags:"}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_0162d716","line":45,"updated":"2020-05-26 16:09:35.000000000","message":"+ enabled: no","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"7722762fe88b58e1b31be623d899199b9cede73a","unresolved":false,"context_lines":[{"line_number":56,"context_line":"  tags:"},{"line_number":57,"context_line":"    - bar_create_recover_image"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"- name: Stop ceph mds"},{"line_number":60,"context_line":"  systemd:"},{"line_number":61,"context_line":"    state: stopped"},{"line_number":62,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_21bc3b68","line":59,"updated":"2020-05-26 16:09:35.000000000","message":"There\u0027s quite a few steps necessary to drain MDSs before they can safely be stopped. Namely: https://github.com/ceph/ceph-ansible/blob/v4.0.14/infrastructure-playbooks/rolling_update.yml#L550-L623","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"2fc1a606cf49abff8757d2e57929751928ef328d","unresolved":false,"context_lines":[{"line_number":56,"context_line":"  tags:"},{"line_number":57,"context_line":"    - bar_create_recover_image"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"- name: Stop ceph mds"},{"line_number":60,"context_line":"  systemd:"},{"line_number":61,"context_line":"    state: stopped"},{"line_number":62,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_39b564ce","line":59,"in_reply_to":"ff570b3c_1f4a327f","updated":"2020-05-28 16:05:03.000000000","message":"All clients on ctrls should be stopped yes(ganesha, rgws, mdss, etc.) before stopping the mons. Also, the min. mons go down, your VMs io will be at risk IF an OSD goes down/stops during the same timeframe. I just performed a benchmark, stopped all mons, and io resumed as expected; however I took it a step further and stopped an OSD, at which point all io halted.","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"30dd81921247b7973e7c10ec8140789bbdc098a0","unresolved":false,"context_lines":[{"line_number":56,"context_line":"  tags:"},{"line_number":57,"context_line":"    - bar_create_recover_image"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"- name: Stop ceph mds"},{"line_number":60,"context_line":"  systemd:"},{"line_number":61,"context_line":"    state: stopped"},{"line_number":62,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_1f4a327f","line":59,"in_reply_to":"ff570b3c_21bc3b68","updated":"2020-05-26 17:25:46.000000000","message":"thanks for pointing this out but, curious what happens if we use only the units to stop the daemons? is there a particular problem with the scenario in which all instances are down at the same time?\n\nshall we switch off ganesha instances as well?","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":32018,"name":"Randy Martinez","email":"ramartin@redhat.com"},"change_message_id":"7722762fe88b58e1b31be623d899199b9cede73a","unresolved":false,"context_lines":[{"line_number":58,"context_line":""},{"line_number":59,"context_line":"- name: Stop ceph mds"},{"line_number":60,"context_line":"  systemd:"},{"line_number":61,"context_line":"    state: stopped"},{"line_number":62,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""},{"line_number":63,"context_line":"  become: true"},{"line_number":64,"context_line":"  when: bar_ceph_mds_exits.stdout!\u003d\"0\""}],"source_content_type":"text/x-yaml","patch_set":16,"id":"ff570b3c_817547cf","line":61,"updated":"2020-05-26 16:09:35.000000000","message":"+ enabled: no","commit_id":"a7d2e603c36559d1ede9a0faf7bfbdf2e74b36a3"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"5c34f8b9c0cf2d62c7ae9ddb2a3cf30c1997fd2f","unresolved":false,"context_lines":[{"line_number":72,"context_line":""},{"line_number":73,"context_line":"- name: Stop ceph mds"},{"line_number":74,"context_line":"  systemd:"},{"line_number":75,"context_line":"    state: stopped"},{"line_number":76,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""},{"line_number":77,"context_line":"  become: true"},{"line_number":78,"context_line":"  when: bar_ceph_mds_exits.stdout!\u003d\"0\""}],"source_content_type":"text/x-yaml","patch_set":18,"id":"ff570b3c_753dcac2","line":75,"updated":"2020-06-03 12:48:46.000000000","message":"I think for all services we have to do \"disable\" too to make sure it doesn\u0027t start back automatically on restore/reboot?","commit_id":"87678f5368abe75315e71bea8c985fc688c39590"},{"author":{"_account_id":6796,"name":"Giulio Fidente","email":"gfidente@redhat.com","username":"gfidente"},"change_message_id":"eec91f80c1a84a3bced0244272446d6883e048db","unresolved":false,"context_lines":[{"line_number":72,"context_line":""},{"line_number":73,"context_line":"- name: Stop ceph mds"},{"line_number":74,"context_line":"  systemd:"},{"line_number":75,"context_line":"    state: stopped"},{"line_number":76,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""},{"line_number":77,"context_line":"  become: true"},{"line_number":78,"context_line":"  when: bar_ceph_mds_exits.stdout!\u003d\"0\""}],"source_content_type":"text/x-yaml","patch_set":18,"id":"ff570b3c_ae3ec887","line":75,"in_reply_to":"ff570b3c_16971b3c","updated":"2020-06-05 09:48:41.000000000","message":"more than to run manual checks, this is to make sure on reboot they don\u0027t start again *before* the actual backup has been *restored*","commit_id":"87678f5368abe75315e71bea8c985fc688c39590"},{"author":{"_account_id":11085,"name":"Toure Dunnon","email":"toure@redhat.com","username":"Toure"},"change_message_id":"f863d9366dd334f4ee64dcc50cb33277314016e6","unresolved":false,"context_lines":[{"line_number":72,"context_line":""},{"line_number":73,"context_line":"- name: Stop ceph mds"},{"line_number":74,"context_line":"  systemd:"},{"line_number":75,"context_line":"    state: stopped"},{"line_number":76,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""},{"line_number":77,"context_line":"  become: true"},{"line_number":78,"context_line":"  when: bar_ceph_mds_exits.stdout!\u003d\"0\""}],"source_content_type":"text/x-yaml","patch_set":18,"id":"ff570b3c_16971b3c","line":75,"in_reply_to":"ff570b3c_753dcac2","updated":"2020-06-04 15:49:19.000000000","message":"@Giulio is this to ensure that we run manual checks before starting the ceph mons? If so do you know of any validation plays we may include to do this automatically or would this still require a human touch to inspect the outcome?","commit_id":"87678f5368abe75315e71bea8c985fc688c39590"},{"author":{"_account_id":22954,"name":"Juan Badia Payno","email":"jbadiapa@redhat.com","username":"jbadiapa"},"change_message_id":"93542c5df36c53124a55152285e9b9438c14a48f","unresolved":false,"context_lines":[{"line_number":72,"context_line":""},{"line_number":73,"context_line":"- name: Stop ceph mds"},{"line_number":74,"context_line":"  systemd:"},{"line_number":75,"context_line":"    state: stopped"},{"line_number":76,"context_line":"    name: \"ceph-mds@{{ ansible_hostname }}\""},{"line_number":77,"context_line":"  become: true"},{"line_number":78,"context_line":"  when: bar_ceph_mds_exits.stdout!\u003d\"0\""}],"source_content_type":"text/x-yaml","patch_set":18,"id":"ff570b3c_2e4d98c3","line":75,"in_reply_to":"ff570b3c_ae3ec887","updated":"2020-08-01 08:21:32.000000000","message":"The restoration of the directory is with all the services stopped. It\u0027ll be restored on a rear init console.","commit_id":"87678f5368abe75315e71bea8c985fc688c39590"}]}
