)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"287a9555829ee057fee0dc56fe8f6919f2ef75b0","unresolved":false,"context_lines":[{"line_number":4,"context_line":"Commit:     melanie witt \u003cmelwittt@gmail.com\u003e"},{"line_number":5,"context_line":"CommitDate: 2018-03-27 01:27:56 +0000"},{"line_number":6,"context_line":""},{"line_number":7,"context_line":"rbd: use MAX_AVAIL stat for reporting bytes available"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Change-Id: I96faff6d3b9747514441d83c629fdd1cface1eb5"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"3f79a3b5_8783914f","line":7,"range":{"start_line":7,"start_character":0,"end_line":7,"end_character":53},"updated":"2018-12-17 15:12:01.000000000","message":"I see context for this in the code itself, but this does sound like something that there should be a bug and possibly a reno for. Right?","commit_id":"1e9e2b44a51548383c84a305e7d1292a17cd1597"},{"author":{"_account_id":4690,"name":"melanie witt","display_name":"melwitt","email":"melwittt@gmail.com","username":"melwitt"},"change_message_id":"dd26729cc6c5309e5556b99458222fb879b16058","unresolved":false,"context_lines":[{"line_number":4,"context_line":"Commit:     melanie witt \u003cmelwittt@gmail.com\u003e"},{"line_number":5,"context_line":"CommitDate: 2018-03-27 01:27:56 +0000"},{"line_number":6,"context_line":""},{"line_number":7,"context_line":"rbd: use MAX_AVAIL stat for reporting bytes available"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Change-Id: I96faff6d3b9747514441d83c629fdd1cface1eb5"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"9fb8cfa7_cd9caf6b","line":7,"range":{"start_line":7,"start_character":0,"end_line":7,"end_character":53},"in_reply_to":"3f79a3b5_8783914f","updated":"2019-06-14 15:48:28.000000000","message":"It\u0027s not really a bug in that there\u0027s no incorrect reporting of bytes available for deployers who deploy one OSD per disk as recommended by the ceph documentation: \"Running multiple OSDs on a single disk–irrespective of partitions–is NOT a good idea.\" [1].\n\nBut for deployers who _do_ deploy more than one OSD per disk, this change will cause a change from an inflated bytes available number (multiplied by the number of configured replicas) to a correct bytes available number (inflated number divided by the number of configured replicas \u003d correct number).\n\nI\u0027m torn about it because those seeing a wrong number are deploying incorrectly/against the rules specified in the ceph docs.\n\nBut since we _can_ do this to improve the calculation for people who are deploying incorrectly, we might as well.\n\nDo what to do? Bug? Reno? Anyone have an opinion? I lean toward reno only, but happy to do whatever.\n\n[1] http://docs.ceph.com/docs/luminous/start/hardware-recommendations/#hard-disk-drives","commit_id":"1e9e2b44a51548383c84a305e7d1292a17cd1597"}],"nova/virt/libvirt/storage/rbd_utils.py":[{"author":{"_account_id":6167,"name":"Ken\u0027ichi Ohmichi","email":"ken1ohmichi@gmail.com","username":"oomichi"},"change_message_id":"1e94306f591b2b71a9564025efb257f27902f391","unresolved":false,"context_lines":[{"line_number":363,"context_line":"            for volume in filter(filter_fn, volumes):"},{"line_number":364,"context_line":"                self._destroy_volume(client, volume)"},{"line_number":365,"context_line":""},{"line_number":366,"context_line":"    def get_pool_info(self):"},{"line_number":367,"context_line":"        # NOTE(melwitt): We\u0027re executing \u0027ceph df\u0027 here instead of calling"},{"line_number":368,"context_line":"        # the RADOSClient.get_cluster_stats python API because we need"},{"line_number":369,"context_line":"        # access to the MAX_AVAIL stat, which reports the available bytes"}],"source_content_type":"text/x-python","patch_set":1,"id":"bf659307_f271f30d","line":366,"updated":"2018-03-27 18:31:34.000000000","message":"We might need unit tests for this change even if we don\u0027t have any unit tests for this method now.","commit_id":"1e9e2b44a51548383c84a305e7d1292a17cd1597"},{"author":{"_account_id":15334,"name":"Stephen Finucane","display_name":"stephenfin","email":"stephenfin@redhat.com","username":"sfinucan"},"change_message_id":"287a9555829ee057fee0dc56fe8f6919f2ef75b0","unresolved":false,"context_lines":[{"line_number":363,"context_line":"            for volume in filter(filter_fn, volumes):"},{"line_number":364,"context_line":"                self._destroy_volume(client, volume)"},{"line_number":365,"context_line":""},{"line_number":366,"context_line":"    def get_pool_info(self):"},{"line_number":367,"context_line":"        # NOTE(melwitt): We\u0027re executing \u0027ceph df\u0027 here instead of calling"},{"line_number":368,"context_line":"        # the RADOSClient.get_cluster_stats python API because we need"},{"line_number":369,"context_line":"        # access to the MAX_AVAIL stat, which reports the available bytes"}],"source_content_type":"text/x-python","patch_set":1,"id":"3f79a3b5_87aab1cd","line":366,"in_reply_to":"bf659307_f271f30d","updated":"2018-12-17 15:12:01.000000000","message":"+1","commit_id":"1e9e2b44a51548383c84a305e7d1292a17cd1597"}]}
