)]}'
{"/PATCHSET_LEVEL":[{"author":{"_account_id":36737,"name":"ohjiwoo","email":"jiwooo.oh@samsung.com","username":"ohjiwooo"},"change_message_id":"6eedcc9193aab41219404a019282661a5f88583d","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"9960c747_7217b27d","updated":"2026-04-20 05:34:45.000000000","message":"recheck: cinder-code-coverage, cinder-plugin-ceph-tempest-mn-aa","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"},{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"416484a8cafbe39de21e16de1d2d2bc92c0e8436","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"090671a1_e4427c2b","updated":"2026-04-23 06:27:28.000000000","message":"release notes are missing","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"},{"author":{"_account_id":36737,"name":"ohjiwoo","email":"jiwooo.oh@samsung.com","username":"ohjiwooo"},"change_message_id":"5e76176c700e61056258c32710b1ddc4de7a103a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":6,"id":"73278fe4_bc3aa2cc","updated":"2026-04-17 02:14:43.000000000","message":"rerun","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"},{"author":{"_account_id":36737,"name":"ohjiwoo","email":"jiwooo.oh@samsung.com","username":"ohjiwooo"},"change_message_id":"26f3943dd7f164d97a971f1a5a0075009b0b8932","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":8,"id":"8b5871d2_76f16835","updated":"2026-05-07 02:05:32.000000000","message":"In our production deployment we run Cinder in a clustered configuration and rely on the VolumeCountWeigher for backend selection. Because our storage backends impose a limit on the number of volumes, the scheduler must take the total volume count into account when choosing a backend. The current weigher only counts volumes on the local host, so volumes that already exist on other hosts in the same cluster are ignored. This patch updates the weigher to aggregate the volume count across all hosts in the cluster. Importantly, the volume count is fetched per‑cluster from Cinder’s database, so moving a pod or a host going down does not cause previously created volumes to be omitted from the calculation.","commit_id":"5b7968f929104bc9342790e5d7306cfae1120210"},{"author":{"_account_id":10058,"name":"Erlon R. Cruz","email":"erlon.rodrigues.cruz@canonical.com","username":"sombrafam"},"change_message_id":"001d8740c70435a873950bed89d8521c6c77a343","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":8,"id":"a10a0b2b_35ad8059","updated":"2026-05-06 12:25:41.000000000","message":"Is this bug coming from a real-world deployment or issue? This weigher is rarely used and is even more unlikely to be employed in a clustering configuration. This path is also not covered in any Tempest tests, so I would prefer not to address it unless there\u0027s a real justification and a proper scenario test.","commit_id":"5b7968f929104bc9342790e5d7306cfae1120210"}],"cinder/scheduler/weights/volume_number.py":[{"author":{"_account_id":36686,"name":"Inyong Hong","display_name":"hongp","email":"inyong.hong@samsung.com","username":"hong-p"},"change_message_id":"dd16367ff0bef0d9bf22e5f8c2a9930e477f6b21","unresolved":true,"context_lines":[{"line_number":51,"context_line":"        context \u003d weight_properties[\u0027context\u0027]"},{"line_number":52,"context_line":"        context \u003d context.elevated()"},{"line_number":53,"context_line":""},{"line_number":54,"context_line":"        if host_state.cluster_name:"},{"line_number":55,"context_line":"            filters \u003d {\u0027cluster_name\u0027: host_state.cluster_name}"},{"line_number":56,"context_line":"            volume_number \u003d db.calculate_resource_count("},{"line_number":57,"context_line":"                context, \u0027volume\u0027, filters)"},{"line_number":58,"context_line":"        else:"},{"line_number":59,"context_line":"            filters \u003d {\u0027host\u0027: host_state.host}"},{"line_number":60,"context_line":"            volume_number \u003d db.calculate_resource_count("},{"line_number":61,"context_line":"                context, \u0027volume\u0027, filters)"},{"line_number":62,"context_line":""},{"line_number":63,"context_line":"        return volume_number"}],"source_content_type":"text/x-python","patch_set":2,"id":"4d9b7f4d_7f495fac","line":61,"range":{"start_line":54,"start_character":6,"end_line":61,"end_character":32},"updated":"2026-03-09 01:20:01.000000000","message":"nit: how about like this\n```suggestion\n        if host_state.cluster_name:\n            filters \u003d {\u0027cluster_name\u0027: host_state.cluster_name}\n        else:\n            filters \u003d {\u0027host\u0027: host_state.host}\n            \n        volume_number \u003d db.calculate_resource_count(\n            context, \u0027volume\u0027, filters)\n```","commit_id":"d2e0aa4baf56935882a44fb7cdf1e8916de8ae27"},{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"416484a8cafbe39de21e16de1d2d2bc92c0e8436","unresolved":true,"context_lines":[{"line_number":31,"context_line":""},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"class VolumeNumberWeigher(weights.BaseHostWeigher):"},{"line_number":34,"context_line":"    \"\"\"Weigher that weighs hosts by volume number in backends."},{"line_number":35,"context_line":""},{"line_number":36,"context_line":"    The default is to spread volumes across all hosts evenly. If you prefer"},{"line_number":37,"context_line":"    stacking, you can set the ``volume_number_multiplier`` option to a positive"}],"source_content_type":"text/x-python","patch_set":6,"id":"ee73ddc5_3c683ebd","line":34,"updated":"2026-04-23 06:27:28.000000000","message":"update comments to reflect changes","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"},{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"416484a8cafbe39de21e16de1d2d2bc92c0e8436","unresolved":true,"context_lines":[{"line_number":56,"context_line":"        else:"},{"line_number":57,"context_line":"            filters \u003d {\u0027host\u0027: host_state.host}"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"        volume_number \u003d db.calculate_resource_count("},{"line_number":60,"context_line":"            context, \u0027volume\u0027, filters)"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"        return volume_number"}],"source_content_type":"text/x-python","patch_set":6,"id":"bf7f4ad0_8b75d3a4","line":59,"updated":"2026-04-23 06:27:28.000000000","message":"volume_data_get_for_host is executed with require_admin_context. This is not true with calculate_resource_count. non-admin context can query volume counts is good for your usecase?","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"}],"cinder/tests/unit/scheduler/test_volume_number_weigher.py":[{"author":{"_account_id":36686,"name":"Inyong Hong","display_name":"hongp","email":"inyong.hong@samsung.com","username":"hong-p"},"change_message_id":"dd16367ff0bef0d9bf22e5f8c2a9930e477f6b21","unresolved":true,"context_lines":[{"line_number":81,"context_line":"            disabled\u003ddisabled)"},{"line_number":82,"context_line":"        return backend_states"},{"line_number":83,"context_line":""},{"line_number":84,"context_line":"    def test_volume_number_weight_multiplier1(self):"},{"line_number":85,"context_line":"        self.flags(volume_number_multiplier\u003d-1.0)"},{"line_number":86,"context_line":"        backend_info_list \u003d self._get_all_backends()"},{"line_number":87,"context_line":""}],"source_content_type":"text/x-python","patch_set":2,"id":"fd89af1d_bd687170","line":84,"updated":"2026-03-09 01:20:01.000000000","message":"Could you add unit tests like `test_volume_number_weight_with_cluster_multiplier1` to verify the behavior when the cluster option is enabled?\n```suggestion\n    def test_volume_number_weight_with_host_multiplier1(self):\n```","commit_id":"d2e0aa4baf56935882a44fb7cdf1e8916de8ae27"},{"author":{"_account_id":36171,"name":"jayaanand borra","display_name":"jayaanand borra","email":"jayaanand.borra@netapp.com","username":"jayaanan","status":"netapp"},"change_message_id":"416484a8cafbe39de21e16de1d2d2bc92c0e8436","unresolved":true,"context_lines":[{"line_number":155,"context_line":"        # cluster2: 2 volumes"},{"line_number":156,"context_line":"        # cluster3: 3 volumes"},{"line_number":157,"context_line":"        # cluster4: 4 volumes"},{"line_number":158,"context_line":"        # cluster5: 5 volumes   Norm\u003d-1.0"},{"line_number":159,"context_line":"        # so, cluster5 should win:"},{"line_number":160,"context_line":"        with mock.patch.object(db, \u0027calculate_resource_count\u0027,"},{"line_number":161,"context_line":"                               fake_calculate_resource_count):"}],"source_content_type":"text/x-python","patch_set":6,"id":"f33abe47_7a4042d2","line":158,"updated":"2026-04-23 06:27:28.000000000","message":"With multiplier\u003d1.0 (stacking), the cluster with the most volumes wins with Norm\u003d1.0. The comment incorrectly states Norm\u003d-1.0 — copy-pasted from the multiplier1 test without correction. The correct comment for cluster5 is Norm\u003d1.0.","commit_id":"47e6172ffc82d501f1dc9df6c9b1578f0cee47c9"}]}
