)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":7,"context_line":"Support volume local cache"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"Use fast NVME SSD to cache for slow remote volumes"},{"line_number":10,"context_line":""},{"line_number":11,"context_line":"Change-Id: I5ed7626ad5c45514bcdedaf1625778e2380b3cac"},{"line_number":12,"context_line":"Blueprint: support-volume-local-cache"},{"line_number":13,"context_line":"Signed-off-by: Liang Fang \u003cliang.a.fang@intel.com\u003e"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":1,"id":"3fa7e38b_2b1de472","line":10,"updated":"2019-10-22 14:11:58.000000000","message":"Please add a Depends-On: link to the os-brick spec","commit_id":"1532f1489bd96b9208800a71357333c462536b74"}],"specs/ussuri/approved/support-volume-local-cache.rst":[{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":39,"context_line":""},{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume. os-brick will call cache software"},{"line_number":43,"context_line":"    to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_ab8f942c","line":42,"range":{"start_line":42,"start_character":11,"end_line":42,"end_character":44},"updated":"2019-10-22 14:11:58.000000000","message":"A reference to the os-brick feature used here would be nice. I guess this is the reference [2] in the spec.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":39,"context_line":""},{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume. os-brick will call cache software"},{"line_number":43,"context_line":"    to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_607a6b77","line":42,"range":{"start_line":42,"start_character":11,"end_line":42,"end_character":44},"in_reply_to":"3fa7e38b_ab8f942c","updated":"2019-11-03 14:56:40.000000000","message":"Done","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":54,"context_line":""},{"line_number":55,"context_line":"  - Step3) Replace the path in connection_info with the original volume path"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":""},{"line_number":61,"context_line":"POC code can be found in [1]_"},{"line_number":62,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_2bd64429","line":59,"range":{"start_line":57,"start_character":1,"end_line":59,"end_character":43},"updated":"2019-10-22 14:11:58.000000000","message":"As it seems this feature is virt driver dependent I guess this config needs to be in the [libvirt] section of the config.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":54,"context_line":""},{"line_number":55,"context_line":"  - Step3) Replace the path in connection_info with the original volume path"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":""},{"line_number":61,"context_line":"POC code can be found in [1]_"},{"line_number":62,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_80f20704","line":59,"range":{"start_line":57,"start_character":1,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_2bd64429","updated":"2019-11-03 14:56:40.000000000","message":"sounds like reasonable to add in [libvirt] section","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":""},{"line_number":61,"context_line":"POC code can be found in [1]_"},{"line_number":62,"context_line":""},{"line_number":63,"context_line":"Alternatives"},{"line_number":64,"context_line":"------------"},{"line_number":65,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_2bc164e5","line":62,"updated":"2019-10-22 14:11:58.000000000","message":"So the proposal is to change the device_path stored connection_info. Sure we update it when we connect the volume and then when we disconnect it. Is there any other possible operation when we need to make sure that the device_path in the connection_info is up to date?","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":""},{"line_number":61,"context_line":"POC code can be found in [1]_"},{"line_number":62,"context_line":""},{"line_number":63,"context_line":"Alternatives"},{"line_number":64,"context_line":"------------"},{"line_number":65,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_20dd536f","line":62,"in_reply_to":"3fa7e38b_2bc164e5","updated":"2019-11-03 14:56:40.000000000","message":"POC code looks works fine. Agree to search all the reference of connection_info in all the code. Will add comments for this later.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":63,"context_line":"Alternatives"},{"line_number":64,"context_line":"------------"},{"line_number":65,"context_line":""},{"line_number":66,"context_line":"* Cache can be added in backend storage side, e.g. in ceph. Storage server"},{"line_number":67,"context_line":"  normally has its own cache mechanism, e.g. using memory as cache, or using"},{"line_number":68,"context_line":"  NVME SSD as cache."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":71,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_abd7944f","line":68,"range":{"start_line":66,"start_character":0,"end_line":68,"end_character":20},"updated":"2019-10-22 14:11:58.000000000","message":"Do you mean that this cache in is the storage server and therefore behind the storage network? Which means that this does not have the same performance as having the cache in the compute side.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":63,"context_line":"Alternatives"},{"line_number":64,"context_line":"------------"},{"line_number":65,"context_line":""},{"line_number":66,"context_line":"* Cache can be added in backend storage side, e.g. in ceph. Storage server"},{"line_number":67,"context_line":"  normally has its own cache mechanism, e.g. using memory as cache, or using"},{"line_number":68,"context_line":"  NVME SSD as cache."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":71,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_e0b41b38","line":68,"range":{"start_line":66,"start_character":0,"end_line":68,"end_character":20},"in_reply_to":"3fa7e38b_abd7944f","updated":"2019-11-03 14:56:40.000000000","message":"Yes, especially latency is not good as cache in compute side locally","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":135,"context_line":"Developer impact"},{"line_number":136,"context_line":"----------------"},{"line_number":137,"context_line":""},{"line_number":138,"context_line":"None"},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Upgrade impact"},{"line_number":141,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_0b9fc865","line":138,"updated":"2019-10-22 14:11:58.000000000","message":"Based on the POC code the implementation is virt driver dependent. If it is then this should be mentioned.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":135,"context_line":"Developer impact"},{"line_number":136,"context_line":"----------------"},{"line_number":137,"context_line":""},{"line_number":138,"context_line":"None"},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Upgrade impact"},{"line_number":141,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_e0e2db2f","line":138,"in_reply_to":"3fa7e38b_0b9fc865","updated":"2019-11-03 14:56:40.000000000","message":"Done","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"497e10928e9c9a5db51aa620bfdfd743e44e4153","unresolved":false,"context_lines":[{"line_number":150,"context_line":""},{"line_number":151,"context_line":"Primary assignee:"},{"line_number":152,"context_line":"  Liang Fang \u003cliang.a.fang@intel.com\u003e"},{"line_number":153,"context_line":""},{"line_number":154,"context_line":"Work Items"},{"line_number":155,"context_line":"----------"},{"line_number":156,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_af2b6492","line":153,"updated":"2019-10-17 03:40:46.000000000","message":"You should add the \"Feature Liaison\" in this spec. Feature work must be sponsored by a member of the nova core team or other experienced and active nova developer. http://specs.openstack.org/openstack/nova-specs/specs/ussuri/implemented/ussuri-template.html#feature-liaison\n\nFeature Liaison\n---------------\n\nFeature liaison:\n  \u003cname and/or nick\u003e\n\nAnd you can get the member by IRC or mail list.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"82c848b1a7032cafd6b80c56f88cf54253b4bb97","unresolved":false,"context_lines":[{"line_number":150,"context_line":""},{"line_number":151,"context_line":"Primary assignee:"},{"line_number":152,"context_line":"  Liang Fang \u003cliang.a.fang@intel.com\u003e"},{"line_number":153,"context_line":""},{"line_number":154,"context_line":"Work Items"},{"line_number":155,"context_line":"----------"},{"line_number":156,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_c625255e","line":153,"in_reply_to":"3fa7e38b_af2b6492","updated":"2019-10-22 14:26:53.000000000","message":"@Liang: I\u0027m OK to be the Feature Liaison to this spec.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"e22abc4ba1f26fea9f23212f5e46fd0251f0895f","unresolved":false,"context_lines":[{"line_number":150,"context_line":""},{"line_number":151,"context_line":"Primary assignee:"},{"line_number":152,"context_line":"  Liang Fang \u003cliang.a.fang@intel.com\u003e"},{"line_number":153,"context_line":""},{"line_number":154,"context_line":"Work Items"},{"line_number":155,"context_line":"----------"},{"line_number":156,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_00de1769","line":153,"in_reply_to":"3fa7e38b_c625255e","updated":"2019-11-03 14:56:40.000000000","message":"Thanks.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":165,"context_line":"Dependencies"},{"line_number":166,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"* os-brick: [2]_"},{"line_number":169,"context_line":""},{"line_number":170,"context_line":"Testing"},{"line_number":171,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_0bc5880a","line":168,"updated":"2019-10-22 14:11:58.000000000","message":"I would state that os-brick also needs to be extended and such extension is proposed in [2]. \n\nTo be clear I think nova will not approve the nova spec until there is at least a clear agreement in the os-brick spec.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"9c29aa7a14063ad64866680d9f36b8e82f6c0b7f","unresolved":false,"context_lines":[{"line_number":172,"context_line":""},{"line_number":173,"context_line":"* New unit test should be added"},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"* Function test with simulated cache should be added. This can use bcache,"},{"line_number":176,"context_line":"  dm-cache or open-cas with a local file as NVME device."},{"line_number":177,"context_line":""},{"line_number":178,"context_line":"  - Check if the emulated volume is created for VM or not."},{"line_number":179,"context_line":""}],"source_content_type":"text/x-rst","patch_set":1,"id":"3fa7e38b_cb3e908a","line":176,"range":{"start_line":175,"start_character":0,"end_line":176,"end_character":56},"updated":"2019-10-22 14:11:58.000000000","message":"Can this be done in a devstack in the gate? If yes then we can have jobs where the existing tempest tests covering servers with volumes run with this caching enabled.","commit_id":"1532f1489bd96b9208800a71357333c462536b74"},{"author":{"_account_id":10135,"name":"Lee Yarwood","display_name":"Lee Yarwood","email":"lyarwood@redhat.com","username":"lyarwood"},"change_message_id":"33b39699d6fbfcf3548e3ec9c63e44f8013ee460","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/support-volume-local-cache"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"This blueprint proposes to add support of volume local cache in nova. Cache"},{"line_number":14,"context_line":"software such as dm-cache, bcache, open-cas can use fast NVME SSD to cache for"},{"line_number":15,"context_line":"the slow remote volume."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"},{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_e8e63216","line":15,"range":{"start_line":13,"start_character":0,"end_line":15,"end_character":23},"updated":"2019-11-05 10:39:38.000000000","message":"Local client volume caches wouldn\u0027t work with multiattach volumes spread over multiple computes FWIW.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/support-volume-local-cache"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"This blueprint proposes to add support of volume local cache in nova. Cache"},{"line_number":14,"context_line":"software such as dm-cache, bcache, open-cas can use fast NVME SSD to cache for"},{"line_number":15,"context_line":"the slow remote volume."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"},{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_550b167c","line":15,"range":{"start_line":13,"start_character":0,"end_line":15,"end_character":23},"in_reply_to":"3fa7e38b_bacccb8a","updated":"2019-11-12 10:06:07.000000000","message":"ceph is connected via qemu. But currently nvmeof is connected via hypervisor kernel I think: https://github.com/openstack/nova/blob/master/nova/virt/libvirt/driver.py#L199","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/support-volume-local-cache"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"This blueprint proposes to add support of volume local cache in nova. Cache"},{"line_number":14,"context_line":"software such as dm-cache, bcache, open-cas can use fast NVME SSD to cache for"},{"line_number":15,"context_line":"the slow remote volume."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"},{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_bacccb8a","line":15,"range":{"start_line":13,"start_character":0,"end_line":15,"end_character":23},"in_reply_to":"3fa7e38b_e4f40fd3","updated":"2019-11-08 18:55:51.000000000","message":"it also wont work with ceph right since we us the native rbd support or with the nvmeof cinder driver since that also uses qemu to connect to the volume instead of the hypervior kernel.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/support-volume-local-cache"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"This blueprint proposes to add support of volume local cache in nova. Cache"},{"line_number":14,"context_line":"software such as dm-cache, bcache, open-cas can use fast NVME SSD to cache for"},{"line_number":15,"context_line":"the slow remote volume."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"},{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_e4f40fd3","line":15,"range":{"start_line":13,"start_character":0,"end_line":15,"end_character":23},"in_reply_to":"3fa7e38b_e8e63216","updated":"2019-11-06 08:46:44.000000000","message":"Yes, multiattach is the limitation","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":10135,"name":"Lee Yarwood","display_name":"Lee Yarwood","email":"lyarwood@redhat.com","username":"lyarwood"},"change_message_id":"33b39699d6fbfcf3548e3ec9c63e44f8013ee460","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_e8cf127c","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"updated":"2019-11-05 10:39:38.000000000","message":"AFAIK you can\u0027t place any of these block based caches infront of librbd? The librbd cache is held within the memory of the client.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"43843e6700a2f5dccecfec04e8118c30c2c19d5c","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_bdfb7e13","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_36bfb6fd","updated":"2019-11-07 08:03:26.000000000","message":"Thanks for the info, Dan. So it may not worth to mount ceph to host because of this reason. At the same time, I\u0027m thinking it may still depend on user\u0027s scenario (if or not trust host).\nExcept ceph, others such as iscsi/fc/NVMEof are mounted to host first, and can be cached by this way. In my test environment, the latency can be 55us when using an SSD(with latency of 10us) as the cache. It have typically 10 times better than remote volumes.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_75d172cc","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_3ada1b1d","updated":"2019-11-12 10:06:07.000000000","message":"CAS is similar with bcache, some differences:\n1. CAS has a dedicated admin tool: casadm\n2. CAS will not format backing volume, but bcache will. CAS will not save metadata in backing volume. A volume can be mounted to other machines(no matter has cache or not) immediately when pull out from the machine with cache\n3. Some details: https://open-cas.github.io/, please refer to \"Open CAS Linux\"","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"e50cf4bfdf55258c6b895d0ed2dbb21bf9f264dd","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_36bfb6fd","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_445ba3f5","updated":"2019-11-06 14:35:56.000000000","message":"That defeats a lot of the purpose and benefit of the RBD volume, exposes the data on the volume directly to the host, etc. This is a non-starter.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_d545c6b5","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_5924c80e","updated":"2019-11-12 10:06:07.000000000","message":"Both dm-cache and bcache are removed.Thanks Dan.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_3ada1b1d","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_5924c80e","updated":"2019-11-08 18:55:51.000000000","message":"bacache would also reqruie you to mount the volume on the host then layer add the remote volume as a backing volume to the bcache device.\n\nas we discussed on irc bcache have removed the ability to add backing volumes to a cache without formatting.\n\nbcache also only caches random io.\nyou can tuen the threshold for sequential io but its 4MB i think by default so it will only accelerate random io it the block is in the cache. also by default it works in write through mode so it will only accelerate reads unless you configured the cache volume in write back mode.\n\nim not really familar with CAS can you provide more info on this","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"04d5f85b618d61d2b4e9090bd27819fa4eaaa2f4","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_5924c80e","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_bdfb7e13","updated":"2019-11-08 15:03:19.000000000","message":"So dm-cache should be removed from this list of ways this can work right?","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":23,"context_line":"level (iscsi / rbd). So these fast SSDs can be mounted on compute node locally"},{"line_number":24,"context_line":"and used as a cache for remote volumes."},{"line_number":25,"context_line":""},{"line_number":26,"context_line":"In order to do the cache, there\u0027re some cache softwares, such as dm-cache,"},{"line_number":27,"context_line":"bcache, Open CAS. This spec is trying to add volume local cache using such"},{"line_number":28,"context_line":"cache software."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_445ba3f5","line":27,"range":{"start_line":26,"start_character":65,"end_line":27,"end_character":17},"in_reply_to":"3fa7e38b_e8cf127c","updated":"2019-11-06 08:46:44.000000000","message":"ceph volume need to be mounted to host machine first so that it can be cached. From cache software\u0027s point of view, as long as there\u0027re mount points of the block devices, then cache is possible.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":10135,"name":"Lee Yarwood","display_name":"Lee Yarwood","email":"lyarwood@redhat.com","username":"lyarwood"},"change_message_id":"33b39699d6fbfcf3548e3ec9c63e44f8013ee460","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_28a76abe","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"updated":"2019-11-05 10:39:38.000000000","message":"I\u0027m not sure that this use case is strong enough. If they care about storage performance why don\u0027t they just use the local SSDs?","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"c94fb0d9d3461840ca625e4373ae08f0eae36384","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_eaaaa318","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_28a76abe","updated":"2019-11-05 17:54:03.000000000","message":"Also, since this is config-driven, the user doesn\u0027t know anything about this. They just see performance change (maybe).\n\nPresumably this should be an operator-focused use case (but I agree about the weakness here).","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_64cedfb8","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_28a76abe","updated":"2019-11-06 08:46:44.000000000","message":"Thanks, I will enrich the use case. And do you mean mount local SSD to a specific VM? Looks like it is not manageable by openstack. This spec\u0027s aim is to improve the whole node\u0027s performance(lower latency and higher throughput), not for a specific VM.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_759f3202","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_5a2d97fb","updated":"2019-11-12 10:06:07.000000000","message":"My fault here. I had not emphasize the use case that cloud operator want to promote the host node performance, but VM owners (may be not a tenant in private cloud) lacks of the knowledge of caching, they may be not a storage guy and don\u0027t know what is bcache, what\u0027s the different of bcache and dm-cache. They need to think a lot to integrate their software with cache, make it stable and safe. So, they may don\u0027t like to spend extra time to choose caching software and do the cache inside VM. But they are glad if the volume is transparently cached in host and performance is better by default.\n\nRegarding lvm, as far as I know, lvm impacts latency. It was developed years ago and was not designed for today\u0027s low latency NVME SSD.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"e50cf4bfdf55258c6b895d0ed2dbb21bf9f264dd","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_76420e12","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_64cedfb8","updated":"2019-11-06 14:35:56.000000000","message":"Right and the whole-host caching is problematic, for the reasons I pointed out about tracking the available resource.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_5a2d97fb","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_76420e12","updated":"2019-11-08 18:55:51.000000000","message":"i can see an the usecase of wanting a fast ssd locally.\nbut perhaps this coudl be managed by the guest if we could support requests for a local volume via cinder? or maybe cyborg.\n\nso the tenant would request everything as normal + 100G of local ssd and then tehy could do the caching them selves in the vm with bache or whatever they chose.\n\nif we were to do this at the host level i think it would be better to extend qemu to support this so it could do the cacheing.\n\nif we took the cyborg approach the proposal would be basically to have a local storage plugin that would carve up X amount of ssd and present it to the vm for its use as scratch space.\n\ni know that the latest generation of Samsung nvme ssds will support sriov to virtualise the ssd but even without that a lvm or driver could be written for cyborg that would carve up the local nvme ssd and could allocate a volume for the guest and qemu could then pass-though that lvm volume as a block device to use as a \"storage accelerator\" e.g. cache\n\nthis would allow the same usecause to be done but would also allow storage to be requested for other uses beyond cacheing such as a fast local scratch disk. i think that was part of the original reason for ephemeral disks in the flavor i.e.\nto provide fast local storage to use for working data excreta rather then using a persistent remote volume.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":30,"context_line":"Use Cases"},{"line_number":31,"context_line":"---------"},{"line_number":32,"context_line":""},{"line_number":33,"context_line":"User wants to use fast NVME SSD to cache for remote slow volume."},{"line_number":34,"context_line":""},{"line_number":35,"context_line":"Proposed change"},{"line_number":36,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_24a1c7f8","line":33,"range":{"start_line":33,"start_character":0,"end_line":33,"end_character":64},"in_reply_to":"3fa7e38b_eaaaa318","updated":"2019-11-06 08:46:44.000000000","message":"Thanks Dan. Yes, my current plan is: if cache is enabled in the config, then volumes of all the VMs in this compute node would be cached. So users don\u0027t know this and just see performance improved.\n\nIn the future, we may can cache only for specific volume which has property \"to be cached\". Just like volume encryption, volume has a property to indicate whether need to encrypt it or not. Do you want me to implement this way in this spec or next spec?","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"c94fb0d9d3461840ca625e4373ae08f0eae36384","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_ca2007b5","line":43,"updated":"2019-11-05 17:54:03.000000000","message":"Presumably brick then has to do scheduling and slicing of the available cache devices, right? How much of the cache device do you give to each instance?\n\nI guess this also introduces a potential failure to attach a volume if there is not sufficient space/slots on the cache device right? Really this would need to be tracked in placement somehow to make sure we don\u0027t send instances to compute nodes that won\u0027t have sufficient cache space, and of course, we can\u0027t anticipate that if it\u0027s not part of the boot request.\n\nSo, I boot a bunch of very small instances that land on one compute node, each with several volumes. I fully consume slots on the cache device for that node and then no more instances can boot there right? Presumably there\u0027s also a problem of over-committing the cache device while some instances are shut off and then are unable to restart. Evacuating an instance with volumes that happen to land on a node that has no more cache slots would be really bad.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_3a49bb8d","line":43,"in_reply_to":"3fa7e38b_2b598b5a","updated":"2019-11-08 18:55:51.000000000","message":"bache works similarly in that wehn you add backing block devices you get a serise of block device create in /dev\n/dev/bcache0  /dev/bcache0p1  /dev/bcache0p2\n\nso on my home openstack deployment  i have an md raid5 of 4 2TB sas HDDs fronted by an intel 900p. i then partation the bcache volume and create 2 lvm pvg and give one to cinder and one for nova and docker \n\nwhich ends up looking like this.\nhttp://paste.openstack.org/show/785920/\n\nin this setup the optane ssd is transparently cacheing all reads and write to the raid5 array via the bcache0p1 devcice which is used by the cinder lvm driver\n\nsimilarly novas local storage for root disks qcows are also cached since it reside on the lvm volume on the bcache0p2 device.\n\nthe the access from within the guest to the both the cinder volume and local qcow is cached in memory by qemu so the only thing caching the cinder volume explcitly woudl accelarte is if we have a cache miss in the qemu in memory cahce it would check the local block cache before connecting over isisi to the cinder volume.\n\nhave you compared the latency reduction of local caching of the cinder volume to remote caching as i have configured?\n\n\ntechincal this is an all in one deployment but i have previously used bcache on top the device used for ceph osd on the remote storage side to good effect and im wondering how much local caching buy you over that approch.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_a0024ab9","line":43,"in_reply_to":"3fa7e38b_3a49bb8d","updated":"2019-11-12 10:06:07.000000000","message":"In my PTG demo, when the volume is well cached (warm up by running fio more than 10 times upon the cached volume), the final latency fio get from VM is about 55 microsecond. I\u0027m using ssd p4800x with latency of 10 microsecond. So the cost by software is 55-10\u003d45us, including the cost of cache (and miss) and the cost of virtio.\n\nRegarding remote caching, the key point is which network protocol you choose, e.g. rbd, iscsi, fc or rdma. And another key point is the network topology(1GB/s in end server? 10G? 100G? how many switch?), the latency would be very different. Just FYI:\n1. if choose rbd, in my environment(1GB/s NIC, 2 switch), the latency is 2215us. \n2. the latency we can get from famous public cloud is about 400-500us\n3. In Open infra summit last week, StorPool shows their latency 170us, and this is the best one comparing other company cloud in the show, some details in the show:\nDigitalOcean(ceph): 1750us\nOVH(ceph): 1530us\nTencent(?): 760us\nAWS gp2 10k: 290us\nAlibaba(?): 180us\nStorPool BCP: 170us\nSo, we can see latency of ceph rbd would be millisecond level.\n\nIn order to keep the latency advantage of fast ssd, we should not add extra software layers between ssd hardware and VM. So we should avoid lvm and filesystem in front in host os.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_5f428c11","line":43,"in_reply_to":"3fa7e38b_4e10a401","updated":"2019-11-06 08:46:44.000000000","message":"A typically NVME SSD used here would be 2TB ~ 8TB. If choose 4TB, and cache for 40 VMs running on one node (2 sockets, 72 cpu cores for example). So every VM can get 100GB cache in average. It would be enough for most cases.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"e50cf4bfdf55258c6b895d0ed2dbb21bf9f264dd","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_d669828c","line":43,"in_reply_to":"3fa7e38b_7fe5083e","updated":"2019-11-06 14:35:56.000000000","message":"This is not how dm-cache works, as far as I know. What \"cache software\" works this way?","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0666840c33ff77c1049edeb27be4228d10a2b651","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_4e10a401","line":43,"in_reply_to":"3fa7e38b_ca2007b5","updated":"2019-11-06 02:15:57.000000000","message":"I think the *cache* proposed in this spec are shared across the entire host. Even so, maybe there is a maximum number for VMs to share the cache to ensure a fair performance improvement. Too many VMs competing the share cache may make it worse then even with out the cache.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_7fe5083e","line":43,"in_reply_to":"3fa7e38b_ca2007b5","updated":"2019-11-06 08:46:44.000000000","message":"Typically (in our customer\u0027s environment) one fast NVME SSD for one node. This SSD would act as a shared cache pool for all the VMs in the compute node. So every instance just share this SSD. Will not give a cache device to a specific instance.\n\nThe cache mechanism is:\ne.g.\ncache device mount point is: /dev/nvme1n1\nremote slow volume mount points are: /dev/sdc, /dev/sdd.\nThen /dev/nvme1n1 can cache for /dev/sdc, /dev/sdd. Emulated volume for /dev/sdc would be something like /dev/cas-1, /dev/sdd would be /dev/cas-2 for example. Then /dev/cas-1 be assigned to VM1, /dev/cas-2 be assigned to VM2. There can be lots of /dev/cas-xxx, and they share the same cache pool(the fast SSD).\n\nThere will not be the case that there\u0027s no sufficient space on the cache device, because no fixed size of cache allocated to a volume. If the io of a volume is busy, then the blocks will be cached; if the cached blocks has not been touched for a long time, then it will be wiped. Cache software will cache the blocks as much as possible, but if cache is full, it will wipe old data.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"43843e6700a2f5dccecfec04e8118c30c2c19d5c","unresolved":false,"context_lines":[{"line_number":40,"context_line":"  In function _connect_volume(), there\u0027re two steps to do the cache:"},{"line_number":41,"context_line":""},{"line_number":42,"context_line":"  - Step1) Call os-brick to cache the volume [2]_. os-brick will call cache"},{"line_number":43,"context_line":"    software to setup the cache and return an emulated volume."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"  - Step2) Replace the path of original volume with the emulated volume"},{"line_number":46,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_2b598b5a","line":43,"in_reply_to":"3fa7e38b_d669828c","updated":"2019-11-07 08:03:26.000000000","message":"This feature is driven from a open-cas(originally it\u0027s name is CAS) customer. In my POC code, I implemented open-cas in this way. open-cas is working.\nMy code implementation(POC) in osbrick: https://review.opendev.org/#/c/663549/2/os_brick/caches/ocf.py\nopen-cas:\nhttps://open-cas.github.io/\nhttps://open-cas.github.io/getting_started_open_cas_linux.html\n\nFor bcache, I took an experiment before, and it is similar with open-cas. For dm-cache, I took a study of the help doc, but no experiment for that.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":10135,"name":"Lee Yarwood","display_name":"Lee Yarwood","email":"lyarwood@redhat.com","username":"lyarwood"},"change_message_id":"33b39699d6fbfcf3548e3ec9c63e44f8013ee460","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_08906e4d","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"updated":"2019-11-05 10:39:38.000000000","message":"enable_local_volume_cache?","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_44296335","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_08906e4d","updated":"2019-11-06 08:46:44.000000000","message":"agree for this","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"150f1888bf7652ba9fd08177231d59c7c4788cc7","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_a6da621a","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_20c97a59","updated":"2019-11-12 12:17:28.000000000","message":"that\u0027s the thing if a tenant requested cache it would not be.\n\ni think there need to be a trait or some way to schedule to a cached node so that the operator or tenant can request it via a flavor or image\n\ne.g. COMPUTE_VOLUME_CACHING\n\nif we have a host config then we would report that trait if the cache is enabled in the config.\n\nbut rather then take that approch it might be better to instead have","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_20c97a59","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_3a2e9bab","updated":"2019-11-12 10:06:07.000000000","message":"dm-cache and bcache would not be used. So currently we will only keep open-cas. open-cas will not format the backing volume, and it is OK to migrate to a non-cached host.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_3a2e9bab","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_44296335","updated":"2019-11-08 18:55:51.000000000","message":"im not sure this should be in the nova config at all.\ni would expect this to be either in the flavor, image or volume metatdata.\n\nif its per host this would probably need to be exposed as a compute capability trait so we could schedule based on support and it raise the question of what do we do on live migration.\n\ndo we only migrate to other cached host? can the caching type change on migration bcache to open-cas. i dont think its really correct to expose which one of those is being used to a tenant. i can may see it being exposed as a custom trait but placement is admin only by default to it would not be exposed to a tenant unless the admin chose to allow that.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"cfda1477f32861246de7dc3ff0cbc0ca94314f9f","unresolved":false,"context_lines":[{"line_number":56,"context_line":""},{"line_number":57,"context_line":"* Add switch in nova-cpu.conf to enable/disable local cache"},{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_7231760a","line":59,"range":{"start_line":59,"start_character":25,"end_line":59,"end_character":43},"in_reply_to":"3fa7e38b_a6da621a","updated":"2019-11-13 09:39:54.000000000","message":"Will update the spec, thanks.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"04d5f85b618d61d2b4e9090bd27819fa4eaaa2f4","unresolved":false,"context_lines":[{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"},{"line_number":63,"context_line":""},{"line_number":64,"context_line":"Alternatives"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_f9633440","line":61,"updated":"2019-11-08 15:03:19.000000000","message":"Per the other discussion, you need to document all the ways this fundamentally can\u0027t work. So, rbd backend, multiattach, etc.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":58,"context_line":""},{"line_number":59,"context_line":"  Suggested switch name: enable_local_cache"},{"line_number":60,"context_line":"  Suggested section: [libvirt]"},{"line_number":61,"context_line":""},{"line_number":62,"context_line":"POC code can be found in [1]_"},{"line_number":63,"context_line":""},{"line_number":64,"context_line":"Alternatives"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_00979e47","line":61,"in_reply_to":"3fa7e38b_f9633440","updated":"2019-11-12 10:06:07.000000000","message":"Done","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":10135,"name":"Lee Yarwood","display_name":"Lee Yarwood","email":"lyarwood@redhat.com","username":"lyarwood"},"change_message_id":"33b39699d6fbfcf3548e3ec9c63e44f8013ee460","unresolved":false,"context_lines":[{"line_number":64,"context_line":"Alternatives"},{"line_number":65,"context_line":"------------"},{"line_number":66,"context_line":""},{"line_number":67,"context_line":"* Cache can be added in backend storage side, e.g. in ceph. Storage server"},{"line_number":68,"context_line":"  normally has its own cache mechanism, e.g. using memory as cache, or using"},{"line_number":69,"context_line":"  NVME SSD as cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":72,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_08f4eecd","line":69,"range":{"start_line":67,"start_character":0,"end_line":69,"end_character":20},"updated":"2019-11-05 10:39:38.000000000","message":"That isn\u0027t client side caching so I\u0027m not sure how that would help here.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"c94fb0d9d3461840ca625e4373ae08f0eae36384","unresolved":false,"context_lines":[{"line_number":72,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":73,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":74,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"But this would be the compete between local PCIe and remote network. The"},{"line_number":77,"context_line":"disadvantage if doing like these ways is: the network of the storage server"},{"line_number":78,"context_line":"would be bottleneck."}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_0a693ff7","line":75,"updated":"2019-11-05 17:54:03.000000000","message":"Surely another alternative is for operators to use local SSD instances with data volumes for persistence. Instances can use dm-cache internally against the ephemeral disk to cache their volume if they want.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"bba134548324abf4f88d0df5de1d5af0716b9b66","unresolved":false,"context_lines":[{"line_number":72,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":73,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":74,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"But this would be the compete between local PCIe and remote network. The"},{"line_number":77,"context_line":"disadvantage if doing like these ways is: the network of the storage server"},{"line_number":78,"context_line":"would be bottleneck."}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_9f06c4cd","line":75,"in_reply_to":"3fa7e38b_0a693ff7","updated":"2019-11-06 08:46:44.000000000","message":"Agreed, thanks Dan. Will add.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":72,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":73,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":74,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"But this would be the compete between local PCIe and remote network. The"},{"line_number":77,"context_line":"disadvantage if doing like these ways is: the network of the storage server"},{"line_number":78,"context_line":"would be bottleneck."}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_ba212bb8","line":75,"in_reply_to":"3fa7e38b_9f06c4cd","updated":"2019-11-08 18:55:51.000000000","message":"yes this is what i was getting at with the idea of a cyborg driver fo local lvm storage.\n\nephmeral disks option in the flavor could also be used to the same effect if you can use a trait for example to ensure the host has ssd storage. then the guest can just set up the caching between the local block device and the remote one.\n\nthe guest shoudl be able to tell the difference using the device role tagging feature.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":72,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":73,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":74,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"But this would be the compete between local PCIe and remote network. The"},{"line_number":77,"context_line":"disadvantage if doing like these ways is: the network of the storage server"},{"line_number":78,"context_line":"would be bottleneck."}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_0041dea6","line":75,"in_reply_to":"3fa7e38b_ba212bb8","updated":"2019-11-12 10:06:07.000000000","message":"Thanks. Already answered in above.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":81,"context_line":"    Every server with 200 TB capacity and 2 x 40Gbps NICs. So the network"},{"line_number":82,"context_line":"    bandwidth of every server would be 80Gbps, in other words, 10 GB/s. So"},{"line_number":83,"context_line":"    total bandwidth of 50 servers would be 500 GB/s. Take 1000 compute nodes as"},{"line_number":84,"context_line":"    a example, every compute node would only get 500 MB/s storage bandwidth in"},{"line_number":85,"context_line":"    average. As a contrast, the bandwidth of typical NVME SSD would be around 3"},{"line_number":86,"context_line":"    GB/s. So it would be 6 times faster if NVME SSD are used as local cache."},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_ba0a8b24","line":85,"range":{"start_line":84,"start_character":49,"end_line":85,"end_character":12},"updated":"2019-11-08 18:55:51.000000000","message":"this is about 8 times the sata 3 max limit and therefor\n8x faster then a local sata3 ssd in terms of throughput.\n\nif you replaced the 2*40Gb/s nics with 2*100Gb/s nic on the 50 storage server that would give you a 2.5x speed up acroos the cluster allowing you to push 1.5GBs across each of your compute nodes for a lot less money then install 1000 p4800x ssd in each of your compute nodes.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":81,"context_line":"    Every server with 200 TB capacity and 2 x 40Gbps NICs. So the network"},{"line_number":82,"context_line":"    bandwidth of every server would be 80Gbps, in other words, 10 GB/s. So"},{"line_number":83,"context_line":"    total bandwidth of 50 servers would be 500 GB/s. Take 1000 compute nodes as"},{"line_number":84,"context_line":"    a example, every compute node would only get 500 MB/s storage bandwidth in"},{"line_number":85,"context_line":"    average. As a contrast, the bandwidth of typical NVME SSD would be around 3"},{"line_number":86,"context_line":"    GB/s. So it would be 6 times faster if NVME SSD are used as local cache."},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_001a3ec5","line":85,"range":{"start_line":84,"start_character":49,"end_line":85,"end_character":12},"in_reply_to":"3fa7e38b_ba0a8b24","updated":"2019-11-12 10:06:07.000000000","message":"agreed","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7dc3edd5a6d41c22bbcd92b1ef76c65366b06307","unresolved":false,"context_lines":[{"line_number":87,"context_line":""},{"line_number":88,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"},{"line_number":89,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":90,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":91,"context_line":"    microsecond, typically larger than 500ms. As a contrast, the latency of"},{"line_number":92,"context_line":"    NVME SSD would be around 10 ms, take Intel Optane SSD p4800x as example."},{"line_number":93,"context_line":""},{"line_number":94,"context_line":"Data model impact"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_3d5d35ec","line":91,"range":{"start_line":90,"start_character":22,"end_line":91,"end_character":44},"updated":"2019-11-08 18:55:51.000000000","message":"NVME over TCP would be hunders of microseconds\nat least if your are using spdk https://dqtibwqq6s6ux.cloudfront.net/download/performance-reports/SPDK_nvmeof_tcp_perf_report_19.07.pdf\n\nbut NVMEof hardware acclerated rdma instead of nvme over tcp is less and add a software cache adds latency too when ever there is a cache miss.\n\nmelonox were claiming between 5-30μs latency for there rdma based solution.\nhttps://www.openfabrics.org/images/eventpresos/2017presentations/407_ExperiencesNVMeoF_PPandit.pdf#page\u003d18\n\nhttps://storageconference.us/2019/Research/NVMeOverFabrics.pdf\nalso did an analasys using a pair of 100Gbps broadcom NetXtreme NIC with hardware RDMA support and achived a latency as low as 3.17μs\n\nin there setup the server were connected back to back so there woudl be some addtional switch latency in a real world deployment\n\nthey also used last gen 400GB Intel Data Center P3600 SSD for there testing so the ssd latency dominates there testign since is mlc flash based but the rdma link latency is still 3 times smaller then the traget 10μs latency of optane.\n\nso if you were using a local optane ssd as a cache given the software overhead im not sure you would be able to mesure a perfromce improvement and there might actully be a performacne degreadata as caches only cache random io typically where as nvmeof with hardware rdma will also improve sequtial io throuput.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":87,"context_line":""},{"line_number":88,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"},{"line_number":89,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":90,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":91,"context_line":"    microsecond, typically larger than 500ms. As a contrast, the latency of"},{"line_number":92,"context_line":"    NVME SSD would be around 10 ms, take Intel Optane SSD p4800x as example."},{"line_number":93,"context_line":""},{"line_number":94,"context_line":"Data model impact"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_609c520e","line":91,"range":{"start_line":90,"start_character":22,"end_line":91,"end_character":44},"in_reply_to":"3fa7e38b_3d5d35ec","updated":"2019-11-12 10:06:07.000000000","message":"The values(such as 3.17us, 3-30us) shown here are the pure network link latency, it is using RDMA, point to point, no switch, no read/write io to the real ssd disk.\n\nBut 10us of the Optane SSD is the latency of real read/write io to the ssd disk.\n\nThis is the specification of P3600 SSD which their tests used, the latency is 120us for read, so how can they get the latency of 3.17us?: https://ark.intel.com/content/www/us/en/ark/products/80996/intel-ssd-dc-p3600-series-400gb-1-2-height-pcie-3-0-20nm-mlc.html","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"150f1888bf7652ba9fd08177231d59c7c4788cc7","unresolved":false,"context_lines":[{"line_number":87,"context_line":""},{"line_number":88,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"},{"line_number":89,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":90,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":91,"context_line":"    microsecond, typically larger than 500ms. As a contrast, the latency of"},{"line_number":92,"context_line":"    NVME SSD would be around 10 ms, take Intel Optane SSD p4800x as example."},{"line_number":93,"context_line":""},{"line_number":94,"context_line":"Data model impact"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_06bed6ad","line":91,"range":{"start_line":90,"start_character":22,"end_line":91,"end_character":44},"in_reply_to":"3fa7e38b_609c520e","updated":"2019-11-12 12:17:28.000000000","message":"there test did not show they were getting 3.17us for the io tests that was the cost of the rdma  transfer. that is why i said the perfomance of the storage dominated\n\nfrom the papaer  Finding  #1\n\n\"for the 128 KB requests, the 95th percentile latencyis  1,155μs  for  local  access  and  1,139μs  for  remote  access,which  is  1.39%  faster.\"\n\nif you look a figure 2 you see in the same test the 4k results are around 200us\n\nif we look later in the paper at Finding #4 they consider sequential io latency as a factor of io depth.\n\n\"We  benchmark  the  NVMeoF  storage  by  settingthe sequential read IODepth from 1 to 128 with 8 concurrent jobs and request size being set to 4 KB.Finding  #4As  shown  in  Figure  12,  when  the  IODepth increases  from  1  to  8,  the  latency  increases  and  the  remote access is slower than the local access in general. For example,when the IODepth is 1, the 95th percentile latency is 178μs for both local and remote access. When the IODepth is 8, for the50th percentile latency, it is 171μs for local access and 370μsfor  remote  access,  which  represents  about  116.4%  overhead.However, the 95th percentile latency is 586μs and 565μs for local  and  remote  access  respectively,  meaning  that  NVMeoF reduces the latency by about 3.58%\"\n\nso they were getting a latency of  171μs at the 95th percentile with an io depth of 1 raising to  just under 600μs\n\nthe io dept of  1 result are within about 30% of the minium latency that hardware is rated for of 120us as you pointed out so but they did not mesure a difference localy or remotely for a low io depth. \n\ni would expect those number to improve if they use p4800x ssd as the nvme device as one of the key benefits of octane over flash is the fact it maintains its performance under load better then flash. i.e. it is more deterministic.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"04d5f85b618d61d2b4e9090bd27819fa4eaaa2f4","unresolved":false,"context_lines":[{"line_number":131,"context_line":""},{"line_number":132,"context_line":"  enable_local_cache \u003d True"},{"line_number":133,"context_line":""},{"line_number":134,"context_line":"* Both nova and os-brick should be upgraded to contain local cache feature."},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"Developer impact"},{"line_number":137,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_3970ec08","line":134,"updated":"2019-11-08 15:03:19.000000000","message":"This is not a deployer impact.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":131,"context_line":""},{"line_number":132,"context_line":"  enable_local_cache \u003d True"},{"line_number":133,"context_line":""},{"line_number":134,"context_line":"* Both nova and os-brick should be upgraded to contain local cache feature."},{"line_number":135,"context_line":""},{"line_number":136,"context_line":"Developer impact"},{"line_number":137,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_40aa761a","line":134,"in_reply_to":"3fa7e38b_3970ec08","updated":"2019-11-12 10:06:07.000000000","message":"removed, thanks.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"04d5f85b618d61d2b4e9090bd27819fa4eaaa2f4","unresolved":false,"context_lines":[{"line_number":142,"context_line":"Upgrade impact"},{"line_number":143,"context_line":"--------------"},{"line_number":144,"context_line":""},{"line_number":145,"context_line":"* Both nova and os-brick should be upgraded to contain local cache feature."},{"line_number":146,"context_line":""},{"line_number":147,"context_line":"Implementation"},{"line_number":148,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_59692824","line":145,"updated":"2019-11-08 15:03:19.000000000","message":"This does not belong here. This section is for documenting things that need to happen during an upgrade from a release before to the one that supports this, or any release-to-release extra steps this feature will add. In this case, I would expect to just say \"None\" here as this would be turned on after an upgrade is completed.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":142,"context_line":"Upgrade impact"},{"line_number":143,"context_line":"--------------"},{"line_number":144,"context_line":""},{"line_number":145,"context_line":"* Both nova and os-brick should be upgraded to contain local cache feature."},{"line_number":146,"context_line":""},{"line_number":147,"context_line":"Implementation"},{"line_number":148,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_00a47e4a","line":145,"in_reply_to":"3fa7e38b_59692824","updated":"2019-11-12 10:06:07.000000000","message":"removed, thanks Dan.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"04d5f85b618d61d2b4e9090bd27819fa4eaaa2f4","unresolved":false,"context_lines":[{"line_number":186,"context_line":"  - Check if the emulated volume is created for VM or not."},{"line_number":187,"context_line":""},{"line_number":188,"context_line":"  - Check if the emulated volume is released or not when deleting VM"},{"line_number":189,"context_line":""},{"line_number":190,"context_line":"Documentation Impact"},{"line_number":191,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":192,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_b95dbc7a","line":189,"updated":"2019-11-08 15:03:19.000000000","message":"I\u0027ll want to see one of our tempest jobs changed to enable this feature, with bcache on a vanilla worker image. That should be totally doable and I think it should be table stakes for getting something like this added. I\u0027d want to see it working before we merge the first patch.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"a0005d5770043464d11422a8a5d734d9de8e3e1c","unresolved":false,"context_lines":[{"line_number":186,"context_line":"  - Check if the emulated volume is created for VM or not."},{"line_number":187,"context_line":""},{"line_number":188,"context_line":"  - Check if the emulated volume is released or not when deleting VM"},{"line_number":189,"context_line":""},{"line_number":190,"context_line":"Documentation Impact"},{"line_number":191,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":192,"context_line":""}],"source_content_type":"text/x-rst","patch_set":2,"id":"3fa7e38b_20741ad0","line":189,"in_reply_to":"3fa7e38b_b95dbc7a","updated":"2019-11-12 10:06:07.000000000","message":"OK, I will look into tempest for this, thanks.","commit_id":"8f6a5297ed28e12c13ba25ac86d8c5e14b67208b"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":42,"context_line":"rbd and sheepdog, cannot be cached. Details can be found in list"},{"line_number":43,"context_line":"libvirt_volume_drivers in [4]_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"In some high performance environments, RDMA may be chosen. RDMA effectively"},{"line_number":46,"context_line":"shorten the latency gap between local volume and remote volume. In experimental"},{"line_number":47,"context_line":"environment, without network switch, without read/write io to real volume, the"},{"line_number":48,"context_line":"point to point RDMA network link latency would be even 3 us in best case. This"},{"line_number":49,"context_line":"is the pure network link latency, and this also don\u0027t mean it is faster than"},{"line_number":50,"context_line":"local PCIe, because RDMA NIC card itself in host and target machines also are"},{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_c6be7e63","line":53,"range":{"start_line":45,"start_character":0,"end_line":53,"end_character":7},"updated":"2019-11-12 13:08:47.000000000","message":"+1","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_267432d0","line":54,"range":{"start_line":54,"start_character":28,"end_line":54,"end_character":41},"updated":"2019-11-12 13:08:47.000000000","message":"all melonox cards support it by default. i belive intel are working on rdma support and both netornome and broadcome generally support it so RDMA hardware support is not uncommon and the nic dont tend to cost more then the non rdma capable nic that support sriov or dpdk.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_46aa8e1a","line":54,"range":{"start_line":54,"start_character":65,"end_line":54,"end_character":79},"updated":"2019-11-12 13:08:47.000000000","message":"the cables and switchs are teh same","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":50,"context_line":"local PCIe, because RDMA NIC card itself in host and target machines also are"},{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_86b88675","line":54,"range":{"start_line":53,"start_character":73,"end_line":54,"end_character":13},"updated":"2019-11-12 13:08:47.000000000","message":"well ROCEv2 does not require the core network to be modified.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"},{"line_number":58,"context_line":"the VMs can be accelerated transparently."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_c64c3e9f","line":55,"range":{"start_line":54,"start_character":79,"end_line":55,"end_character":60},"updated":"2019-11-12 13:08:47.000000000","message":"no this is not true. it can in some cases but in general ROCEv2 (RDMA over converged Ethernet) can be deploy using your existing switch and cables. using RDMA is not like infinaband which requires all your network infrastucture to be replaced.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"cfda1477f32861246de7dc3ff0cbc0ca94314f9f","unresolved":false,"context_lines":[{"line_number":51,"context_line":"PCIe devices. But indeed, RDMA users may cannot get much benefit from volume"},{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"},{"line_number":58,"context_line":"the VMs can be accelerated transparently."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_927d12b4","line":55,"range":{"start_line":54,"start_character":79,"end_line":55,"end_character":60},"in_reply_to":"3fa7e38b_c64c3e9f","updated":"2019-11-13 09:39:54.000000000","message":"Even RoCE, at least need to replace NIC, otherwise it is Soft RoCE which cannot get the performance of hardware supported RDMA. Some link:\nhttps://pdfs.semanticscholar.org/b124/fb74be1d64bcbe9f5e1b3135d9e3ddd0b8b1.pdf\nhttps://blog.mellanox.com/2015/06/ethernet-just-got-big-performance-boost-with-release-soft-roce/","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":52,"context_line":"local cache, but still depend on how many switches between host and target"},{"line_number":53,"context_line":"machine. Meanwhile, lots of the network topology in user\u0027s datacenter is not"},{"line_number":54,"context_line":"based on RDMA. RDMA requires specialized and expensive NIC card, cable, switch."},{"line_number":55,"context_line":"So RDMA normally means a big network infrastructure upgrade. But it is easier"},{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"},{"line_number":58,"context_line":"the VMs can be accelerated transparently."},{"line_number":59,"context_line":""}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_e6561a2b","line":56,"range":{"start_line":55,"start_character":65,"end_line":56,"end_character":39},"updated":"2019-11-12 13:08:47.000000000","message":"this assumes you both have enough pci lanes and slots aviable to do this. 2u servers maybe. but in high density form factors or 1u servers you may not be physically able to install a pcie ssd so unless you can use a u.2 or m.2 ssd that might not be easy to do.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":56,"context_line":"for them to just plugin a fast NVME SSD, nothing inside VMs would be required"},{"line_number":57,"context_line":"to be changed, no effort is required for the VM tenants, but the latency of all"},{"line_number":58,"context_line":"the VMs can be accelerated transparently."},{"line_number":59,"context_line":""},{"line_number":60,"context_line":"Use Cases"},{"line_number":61,"context_line":"---------"},{"line_number":62,"context_line":""}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_6638eaed","line":59,"updated":"2019-11-12 13:08:47.000000000","message":"i would remove everything form line 53 to here as its mainly conjecture and not really relevant to the problem description.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":136,"context_line":"* Assign local SSD to a specific VM. VM can then use bcache internally against"},{"line_number":137,"context_line":"  the ephemeral disk to cache their volume if they want."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"  The shortages may include:"},{"line_number":140,"context_line":""},{"line_number":141,"context_line":"  - Can only accelerate one VM. The fast SSD capability cannot be shared by"},{"line_number":142,"context_line":"    other VMs. Unlike RAM, SSD normally is in TB level and large enough to"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_c61afe7a","line":139,"range":{"start_line":139,"start_character":6,"end_line":139,"end_character":15},"updated":"2019-11-12 13:08:47.000000000","message":"drawbacks or cons","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":138,"context_line":""},{"line_number":139,"context_line":"  The shortages may include:"},{"line_number":140,"context_line":""},{"line_number":141,"context_line":"  - Can only accelerate one VM. The fast SSD capability cannot be shared by"},{"line_number":142,"context_line":"    other VMs. Unlike RAM, SSD normally is in TB level and large enough to"},{"line_number":143,"context_line":"    cache for all the VMs in one node."},{"line_number":144,"context_line":""},{"line_number":145,"context_line":"  - The owner of the VM should setup cache explicitly. But not all the VM owner"},{"line_number":146,"context_line":"    want to do this, and not all the VM owner has the knowledge to do this. But"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_46060e24","line":143,"range":{"start_line":141,"start_character":0,"end_line":143,"end_character":38},"updated":"2019-11-12 13:08:47.000000000","message":"newer ssds from samsung and likely other in the futrue support sriov to partition the ssd but yes in generaly this would be a 1:1 mapping today.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":146,"context_line":"    want to do this, and not all the VM owner has the knowledge to do this. But"},{"line_number":147,"context_line":"    they for sure want the volume performance is better by default."},{"line_number":148,"context_line":""},{"line_number":149,"context_line":"  - How to manage the relationship with local ssd and VM."},{"line_number":150,"context_line":""},{"line_number":151,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":152,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_06001602","line":149,"range":{"start_line":149,"start_character":2,"end_line":149,"end_character":57},"updated":"2019-11-12 13:08:47.000000000","message":"if its passhtogh via pci pashtough then the pci tracker will handle it but i think cyborg would be a good fit here too.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":148,"context_line":""},{"line_number":149,"context_line":"  - How to manage the relationship with local ssd and VM."},{"line_number":150,"context_line":""},{"line_number":151,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":152,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":153,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":154,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":155,"context_line":""},{"line_number":156,"context_line":"But this would be the compete between local PCIe and remote network. The"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_c128a886","line":153,"range":{"start_line":151,"start_character":2,"end_line":153,"end_character":19},"updated":"2019-11-12 13:08:47.000000000","message":"you could do this as a cinder volume type.\n\nand the tenant could simple request 2 volumds 1 for mass storage and 1 for high performacne. then you dont need to dedicate the pool to cache.\n\nthis is a pretty common usecase today.\n\nyou would just deploy an NVMEoF pool for the high performace pool and say ceph for the mass storage cluster.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":161,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":162,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":163,"context_line":"    microsecond, depends on the network topology. As a contrast, the latency of"},{"line_number":164,"context_line":"    NVME SSD would be around 10 us, take Intel Optane SSD p4800x as example."},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"* Cache can be added in backend storage side, e.g. in ceph. Storage server"},{"line_number":167,"context_line":"  normally has its own cache mechanism, e.g. using memory as cache, or using"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_e645bab5","line":164,"range":{"start_line":164,"start_character":29,"end_line":164,"end_character":33},"updated":"2019-11-12 13:08:47.000000000","message":"from your reponce here https://review.opendev.org/#/c/689070/2/specs/ussuri/approved/support-volume-local-cache.rst@43\n\nthe expect perfomace is 55us best case with ~40us of software overhead. so the 10us which come form the hardware data sheet is not a fare comparison here.\n\nin any case nvmeof over rdma can get within 1 order of magnitude of this value. there is a performance advantage in best case and likely average case perfomce to caching but its not as large as you are implying here and if you are not using optane as your caching layer it will be signifcatly less.\n\ne.g. if you tried to use this feature with sata or flash based nvme ssds you might actually get better performance with nvmeof then local on the long tail due to cpu bottlenecks","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"cfda1477f32861246de7dc3ff0cbc0ca94314f9f","unresolved":false,"context_lines":[{"line_number":161,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":162,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":163,"context_line":"    microsecond, depends on the network topology. As a contrast, the latency of"},{"line_number":164,"context_line":"    NVME SSD would be around 10 us, take Intel Optane SSD p4800x as example."},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"* Cache can be added in backend storage side, e.g. in ceph. Storage server"},{"line_number":167,"context_line":"  normally has its own cache mechanism, e.g. using memory as cache, or using"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_ad89d36f","line":164,"range":{"start_line":164,"start_character":29,"end_line":164,"end_character":33},"in_reply_to":"3fa7e38b_e645bab5","updated":"2019-11-13 09:39:54.000000000","message":"Currently NVME over RDMA will mount to host kernel first, right? so it also need to go through host kernel stack, and then through virtio. So the software overhead is similar with local cache. So it should compare with raw NVME SSD (10us here).\nI agree that if use low performance SSD as cache, it will be meaningless for volume local cache.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":9555,"name":"Matthew Booth","email":"mbooth@redhat.com","username":"MatthewBooth"},"change_message_id":"7df2b4446087d83b95bbd5a7b4d78bac77f778d5","unresolved":false,"context_lines":[{"line_number":192,"context_line":"Other end user impact"},{"line_number":193,"context_line":"---------------------"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":"None"},{"line_number":196,"context_line":""},{"line_number":197,"context_line":"Performance Impact"},{"line_number":198,"context_line":"------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_bbdd9b14","line":195,"updated":"2019-11-12 12:14:17.000000000","message":"Multi-attach may not be used.\nCeph may not be used.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"58ecdb42dfad3c53c620b4200f6dea66399bf49b","unresolved":false,"context_lines":[{"line_number":192,"context_line":"Other end user impact"},{"line_number":193,"context_line":"---------------------"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":"None"},{"line_number":196,"context_line":""},{"line_number":197,"context_line":"Performance Impact"},{"line_number":198,"context_line":"------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3fa7e38b_46c92e4f","line":195,"in_reply_to":"3fa7e38b_bbdd9b14","updated":"2019-11-12 13:08:47.000000000","message":"multi attach is an enduser impact\n\nceph is not as end user wont know what sotrage system provides there volume.","commit_id":"f29d40b64008a34af3174b2bdb15288e29328601"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":124,"context_line":""},{"line_number":125,"context_line":"  In function _connect_volume():"},{"line_number":126,"context_line":""},{"line_number":127,"context_line":"  - attach_cache after attach_encryptor, so as to make cache layer closer to"},{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_84dc9717","line":127,"range":{"start_line":127,"start_character":4,"end_line":127,"end_character":39},"updated":"2020-02-11 12:10:10.000000000","message":"As far as I understand the cinder spec the cache should be used in a way that it caches encrypted data to keep security. Isn\u0027t this mean that attach_cache should be done _before_ attach_encryptor?","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":124,"context_line":""},{"line_number":125,"context_line":"  In function _connect_volume():"},{"line_number":126,"context_line":""},{"line_number":127,"context_line":"  - attach_cache after attach_encryptor, so as to make cache layer closer to"},{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_a3af222b","line":127,"range":{"start_line":127,"start_character":4,"end_line":127,"end_character":39},"in_reply_to":"3fa7e38b_84dc9717","updated":"2020-02-13 11:49:48.000000000","message":"synced with cinder spec. thanks","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":127,"context_line":"  - attach_cache after attach_encryptor, so as to make cache layer closer to"},{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_24dda311","line":130,"range":{"start_line":130,"start_character":70,"end_line":130,"end_character":75},"updated":"2020-02-11 12:10:10.000000000","message":"cacheable","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":127,"context_line":"  - attach_cache after attach_encryptor, so as to make cache layer closer to"},{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_439e6efa","line":130,"range":{"start_line":130,"start_character":70,"end_line":130,"end_character":75},"in_reply_to":"3fa7e38b_24dda311","updated":"2020-02-13 11:49:48.000000000","message":"Done","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_a4d0b3f6","line":131,"range":{"start_line":131,"start_character":28,"end_line":131,"end_character":47},"updated":"2020-02-11 12:10:10.000000000","message":"If caching is allowed. Based on the cinder spec this does not mean that caching is required. Such thing will come from the flavor based on the cinder spec.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":128,"context_line":"    end user"},{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_a332c20c","line":131,"range":{"start_line":131,"start_character":28,"end_line":131,"end_character":47},"in_reply_to":"3fa7e38b_a4d0b3f6","updated":"2020-02-13 11:49:48.000000000","message":"changed to \"allowed\" here. thanks","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"},{"line_number":135,"context_line":"    the machine don\u0027t has the capability."},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"  - Call os-brick to cache the volume [2]_. os-brick will call cache software"},{"line_number":138,"context_line":"    to setup the cache and replace the path of original volume with the"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_242b63e9","line":135,"range":{"start_line":132,"start_character":26,"end_line":135,"end_character":41},"updated":"2020-02-11 12:10:10.000000000","message":"If cache is needed but the host has no capability then is it OK for the end user that that the request fails? \n\nIn general can we handle cache as a best effort thing? E.g. if requested and available then have it, but if requested but not available then simply run the instance without cache.\n\nOr we need to treat caching as a requirement and if a boot request has caching needs then nova needs to select the compute host where such caching is available.\n\nDeciding which way to go is needed as it changes the impact on nova. If cache is best effort then no scheduling support is needed, but if caching is a requirement then scheduling support is needed.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"},{"line_number":135,"context_line":"    the machine don\u0027t has the capability."},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"  - Call os-brick to cache the volume [2]_. os-brick will call cache software"},{"line_number":138,"context_line":"    to setup the cache and replace the path of original volume with the"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_3527e5b7","line":135,"range":{"start_line":132,"start_character":26,"end_line":135,"end_character":41},"in_reply_to":"3fa7e38b_242b63e9","updated":"2020-02-13 11:49:48.000000000","message":"1. use flavor extra spec - trait to schedule guest to the host machine with cache capability\n2. ignore the failure when setting up cache in the selected host which advertised with cache capability. Best effort thing means this, I think.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"1b611e9a131e4e644711ab63e410e410199f26cc","unresolved":false,"context_lines":[{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"},{"line_number":135,"context_line":"    the machine don\u0027t has the capability."},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"  - Call os-brick to cache the volume [2]_. os-brick will call cache software"},{"line_number":138,"context_line":"    to setup the cache and replace the path of original volume with the"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_bc099929","line":135,"range":{"start_line":132,"start_character":26,"end_line":135,"end_character":41},"in_reply_to":"3fa7e38b_242b63e9","updated":"2020-02-12 08:09:39.000000000","message":"\u003e Deciding which way to go is needed as it changes the impact on\n \u003e nova. If cache is best effort then no scheduling support is needed,\n \u003e but if caching is a requirement then scheduling support is needed.\n\nDose the \u0027best effort\u0027 means \u0027prefer the cache\u0027? If yes, it also need the scheduling support","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"28c5156f77b141be583a6c12168c5de950e761fe","unresolved":false,"context_lines":[{"line_number":129,"context_line":""},{"line_number":130,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cache"},{"line_number":131,"context_line":"    property for the volume if cache is needed. If don\u0027t need cache, then just"},{"line_number":132,"context_line":"    skip the cache logic. If need cache, then check if the host machine has the"},{"line_number":133,"context_line":"    cache capability or not. If CONF.libvirt.cache_name is not empty, then it"},{"line_number":134,"context_line":"    means it has the cache capability, otherwise raise an exception saying that"},{"line_number":135,"context_line":"    the machine don\u0027t has the capability."},{"line_number":136,"context_line":""},{"line_number":137,"context_line":"  - Call os-brick to cache the volume [2]_. os-brick will call cache software"},{"line_number":138,"context_line":"    to setup the cache and replace the path of original volume with the"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_e63217f3","line":135,"range":{"start_line":132,"start_character":26,"end_line":135,"end_character":41},"in_reply_to":"3fa7e38b_bc099929","updated":"2020-02-12 16:42:39.000000000","message":"Here best effort for me means if the request lands on a compute that has cache defined then the cache will be used.  But yes your definition would need weigher support for the cache.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"a10b1aaa81333a1b293b0a5d90008c7cc6097a38","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"  Suggested switch names:"},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_5a191785","line":157,"range":{"start_line":157,"start_character":4,"end_line":157,"end_character":14},"updated":"2020-01-15 06:23:44.000000000","message":"I raised this issue in the cinder spec. If there is multiple cache software on one compute host, how will nova differentiate which cache software to use for which volume type? \nOr what is being proposed there is that only one cache software for one host?\nI would really like to see the flexibility to have multiple cache software co-exist, for Ceph RBD cache is on the way.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"  Suggested switch names:"},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_58de586b","line":157,"range":{"start_line":157,"start_character":4,"end_line":157,"end_character":14},"in_reply_to":"3fa7e38b_52400c60","updated":"2020-02-13 13:21:23.000000000","message":"Can we start simple and specific and evolve it to more and more generic in multiple steps as needs arise? \n\nThe cinder interface today is a single flag telling nova if the volume is allowed to be cached. \n\nIf we want to give more freedom to select cache software, cache instance, or cache mode per volume instance or per volume type then the cinder interface also needs to be extended to provide more information to nova about the requirements of the cache so that nova can schedule the instance accordingly and the to os-brick to setup the cache accordingly.\n\nIn parallel with the cinder interface change nova config needs to be extended to allow specifying more than one cache on the host with different parameters like cache instance id, cache software, and cache mode.\n\nThis could very well be the future. But we can start small, with simply allowing to turning on a single cache per host to make that host cache-capable and then schedule flavors with volume cache requirements to those computes.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"c36a7a608bfd5bd55512a9eb1bf9ebc2f0d22379","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"  Suggested switch names:"},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_a0fedc03","line":157,"range":{"start_line":157,"start_character":4,"end_line":157,"end_character":14},"in_reply_to":"3fa7e38b_5a191785","updated":"2020-01-15 09:46:12.000000000","message":"What I\u0027m thinking is: Multiple cache software can co-exist in one compute node. But for volume local cache, one cache software would be used (specified in configuration file), in fact at this time only open-cas is supported.\n\nDifferent volume type may require different cache mode, so in nova configuration file, there will be a mapping between cache mode and cache instance, something like:\nAdministrator would list the cache mode - cache instances mapping in Nova configuration file. Like:\n{\n\u0027write-through\u0027: [opencas-id#1, opencas-id#2],\n\u0027write-around\u0027: [opencas-id#3],\n\u0027write-invalidate\u0027: [],\n\u0027write-back\u0027: [opencas-id#5,opencas-id#6,opencas-id#7],\n\u0027write-only\u0027: [],\n}\nAnd then pass this to os-brick, and let os-brick to determine which cache instance to use.\n\nThis spec would not support ceph RBD, so I think would not impact RBD cache.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"20a0d4f951e56f5c7ddf4509608c03d7bbbb8b9e","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"  Suggested switch names:"},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_52400c60","line":157,"range":{"start_line":157,"start_character":4,"end_line":157,"end_character":14},"in_reply_to":"3fa7e38b_a0fedc03","updated":"2020-01-16 01:49:18.000000000","message":"What you have described is not the co-existence of *multiple local cache software* :) Actually only one can work with your current spec. The others would just be lying  there:)\nIMHO, you are creating something generic which can be used for other local volume cache software (maybe arrive in future). So in the spec, I would expect to see no hard code to opencas but making room for other possibilities as well.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"},{"line_number":161,"context_line":"    has only one cache instance in a single server, but it has the ability to"},{"line_number":162,"context_line":"    have more than one cache instance. Nova need to specify which cache"},{"line_number":163,"context_line":"    instance to use for volume local cache."},{"line_number":164,"context_line":""},{"line_number":165,"context_line":"  Suggested section: [libvirt]"},{"line_number":166,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_c414cf9b","line":163,"range":{"start_line":160,"start_character":0,"end_line":163,"end_character":43},"updated":"2020-02-11 12:10:10.000000000","message":"Does this mean that even if more than two opencas instance is on a compute host nova can only use one of those?","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":157,"context_line":"  - cache_name: Specifies which cache software to use. Currently only support"},{"line_number":158,"context_line":"    \u0027opencas\u0027. If it is empty, then local cache is disabled."},{"line_number":159,"context_line":""},{"line_number":160,"context_line":"  - opencas_cache_id: Specifies which cache instance to use. Typically opencas"},{"line_number":161,"context_line":"    has only one cache instance in a single server, but it has the ability to"},{"line_number":162,"context_line":"    have more than one cache instance. Nova need to specify which cache"},{"line_number":163,"context_line":"    instance to use for volume local cache."},{"line_number":164,"context_line":""},{"line_number":165,"context_line":"  Suggested section: [libvirt]"},{"line_number":166,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_b53755db","line":163,"range":{"start_line":160,"start_character":0,"end_line":163,"end_character":43},"in_reply_to":"3fa7e38b_c414cf9b","updated":"2020-02-13 11:49:48.000000000","message":"changed to IDs. Pass all the IDs to os-brick and let os-brick to find a best one.","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":162,"context_line":"    have more than one cache instance. Nova need to specify which cache"},{"line_number":163,"context_line":"    instance to use for volume local cache."},{"line_number":164,"context_line":""},{"line_number":165,"context_line":"  Suggested section: [libvirt]"},{"line_number":166,"context_line":""},{"line_number":167,"context_line":"Nova just need to call os-brick to set cache for the volume only when the"},{"line_number":168,"context_line":"volume has the property of \"cachable\". Let cinder to determine and set the"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_64eabb77","line":165,"updated":"2020-02-11 12:10:10.000000000","message":"Is this feature libvirt driver specific? I think if os-brick support caching then not just the libvirt driver can use such function from os-brick.\n\nAs far as I see at least hyperv uses os-brick besides libvirt. So in theory caching might need to be configured virt driver independently. So I suggest to have this config in the [compute] section.\n\nAlso I would made the config cache technology independent if possible. Something like:\n\n[compute]\nvolume_local_cache_driver \u003d \u0027opencas\u0027\nvolume_local_cache_instance \u003d ...","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":181,"context_line":"         +                                    |"},{"line_number":182,"context_line":"         |                                    |"},{"line_number":183,"context_line":"         +                                    |"},{"line_number":184,"context_line":"       attach_encryptor                       |"},{"line_number":185,"context_line":"         +                                    |"},{"line_number":186,"context_line":"         |                                    |"},{"line_number":187,"context_line":"         +                                    |"},{"line_number":188,"context_line":"       attach_cache                           |"},{"line_number":189,"context_line":"             +                                |"},{"line_number":190,"context_line":"             |                                |"},{"line_number":191,"context_line":"             +                                |"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_449a1f12","line":188,"range":{"start_line":184,"start_character":0,"end_line":188,"end_character":47},"updated":"2020-02-11 12:10:10.000000000","message":"this needs to be updated according to the cinder spec","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":181,"context_line":"         +                                    |"},{"line_number":182,"context_line":"         |                                    |"},{"line_number":183,"context_line":"         +                                    |"},{"line_number":184,"context_line":"       attach_encryptor                       |"},{"line_number":185,"context_line":"         +                                    |"},{"line_number":186,"context_line":"         |                                    |"},{"line_number":187,"context_line":"         +                                    |"},{"line_number":188,"context_line":"       attach_cache                           |"},{"line_number":189,"context_line":"             +                                |"},{"line_number":190,"context_line":"             |                                |"},{"line_number":191,"context_line":"             +                                |"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_5542a13d","line":188,"range":{"start_line":184,"start_character":0,"end_line":188,"end_character":47},"in_reply_to":"3fa7e38b_449a1f12","updated":"2020-02-13 11:49:48.000000000","message":"Done","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":265,"context_line":""},{"line_number":266,"context_line":"REST API impact"},{"line_number":267,"context_line":"---------------"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"None"},{"line_number":270,"context_line":""},{"line_number":271,"context_line":"Security impact"},{"line_number":272,"context_line":"---------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_04a98723","line":269,"range":{"start_line":268,"start_character":0,"end_line":269,"end_character":4},"updated":"2020-02-11 12:10:10.000000000","message":"How will the user request a volume to be cached? The cinder cacheable attribute only means (as far as I understand) that the volume can be cached if requested. Will this be a new flavor extra_spec? How that will look like?","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":265,"context_line":""},{"line_number":266,"context_line":"REST API impact"},{"line_number":267,"context_line":"---------------"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"None"},{"line_number":270,"context_line":""},{"line_number":271,"context_line":"Security impact"},{"line_number":272,"context_line":"---------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_15e5093e","line":269,"range":{"start_line":268,"start_character":0,"end_line":269,"end_character":4},"in_reply_to":"3fa7e38b_04a98723","updated":"2020-02-13 11:49:48.000000000","message":"Updated in spec. Let operator to:\n1. Set trait in placement for the host machine with cache capability\n2. Add trait in flavor extra spec\n3. Select the flavor with trait, the guest would be scheduled to host with cache. Select flavor without trait if don\u0027t want cache","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":265,"context_line":""},{"line_number":266,"context_line":"REST API impact"},{"line_number":267,"context_line":"---------------"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"None"},{"line_number":270,"context_line":""},{"line_number":271,"context_line":"Security impact"},{"line_number":272,"context_line":"---------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_b88bcc1f","line":269,"range":{"start_line":268,"start_character":0,"end_line":269,"end_character":4},"in_reply_to":"3fa7e38b_15e5093e","updated":"2020-02-13 13:21:23.000000000","message":"\u003e Updated in spec. Let operator to:\n \u003e 1. Set trait in placement for the host machine with cache\n \u003e capability\n\nI think nova compute can set this trait automatically based on configuration. If there is a cache instance configured in the nova compute config then the libvirt driver (that implements the cache logic) can mark the compute capable of providing this functionality. This way the operator only need to configure nova and not both nova and placement.\n\n \u003e 2. Add trait in flavor extra spec\n\nthis means a flavor extra_spec with trait:COMPUTE_SUPPORT_VOLUME_CACHE \u003d required will be added to the flavor.\n\n \u003e 3. Select the flavor with trait, the guest would be scheduled to\n \u003e host with cache. Select flavor without trait if don\u0027t want cache\n\nAnd a flavor with such trait would mean \"every volume of this server needs to be cached\". \n\nIf we need more granular support in the future, like I need my data volume to be cached but my boot volume does not need to be cached, then I think such request should be configured in cinder, per volume, and cinder should provide the fine grained request to nova during the scheduling","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"26c7195098854c753758488c411f8949d686951e","unresolved":false,"context_lines":[{"line_number":271,"context_line":"Security impact"},{"line_number":272,"context_line":"---------------"},{"line_number":273,"context_line":""},{"line_number":274,"context_line":"None"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":"Notifications impact"},{"line_number":277,"context_line":"--------------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_84625714","line":274,"updated":"2020-02-11 12:10:10.000000000","message":"mention the order of the encryptor and the cache devices","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"0e257b1756ee2a4e84c6c34f55f5b178867012b1","unresolved":false,"context_lines":[{"line_number":271,"context_line":"Security impact"},{"line_number":272,"context_line":"---------------"},{"line_number":273,"context_line":""},{"line_number":274,"context_line":"None"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":"Notifications impact"},{"line_number":277,"context_line":"--------------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"3fa7e38b_b5d3d562","line":274,"in_reply_to":"3fa7e38b_84625714","updated":"2020-02-13 11:49:48.000000000","message":"Done","commit_id":"5decd4e23c5ab7bcffabb6375990018e1a8ce6d9"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":75,"context_line":"It is allowed that more than one cache instance in one compute node. Cache IDs"},{"line_number":76,"context_line":"identifies cache instances that can be used. Cache mode is transparent to"},{"line_number":77,"context_line":"os-brick."},{"line_number":78,"context_line":""},{"line_number":79,"context_line":"Operator sets trait (e.g. COMPUTE_SUPPORT_VOLUME_CACHE) in placement for the"},{"line_number":80,"context_line":"host machines that have cache capability. If want the volume be cached, firstly"},{"line_number":81,"context_line":"the volume should belongs to a volume type with \"cacheable\" property. Then"},{"line_number":82,"context_line":"select the flavor with extra spec containing this trait, so the guest would be"},{"line_number":83,"context_line":"landed at the host machine with cache capability. If don\u0027t want the volume be"}],"source_content_type":"text/x-rst","patch_set":9,"id":"3fa7e38b_78e31404","line":80,"range":{"start_line":78,"start_character":0,"end_line":80,"end_character":40},"updated":"2020-02-13 13:21:23.000000000","message":"This could be a compute capability that is mapped to the trait and the libvirt driver can set this capability to true if there is cache instance id is configured in the nove conf.","commit_id":"66a767a36aaaee7c641ea31741a7609a219ec0e8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":86,"context_line":"If there\u0027s failure happened during setting up caching, e.g. cache device"},{"line_number":87,"context_line":"broken, just ignore the failure and go ahead to boot the guest without cache."},{"line_number":88,"context_line":"Meanwhile the failure would be logged."},{"line_number":89,"context_line":""},{"line_number":90,"context_line":"Final architecture would be something like::"},{"line_number":91,"context_line":""},{"line_number":92,"context_line":"                        Compute Node"}],"source_content_type":"text/x-rst","patch_set":9,"id":"3fa7e38b_b86d0c02","line":89,"updated":"2020-02-13 13:21:23.000000000","message":"I think we should not ignore the above error but re-schedule the request instead.\n\nSo my understanding of the use case:\n\n* Cinder can state if a volume is local cacheable or not.\n* Deployer can configure local cache on compute and configure nova-compute to use that local cache\n* Deployer can create (a premium) flavor that requests volume local caching\n* User can decide that she is willing to pay some extra for a faster volume so selects the above premium flavor for her server\n\nNow:\n* Nova has to place the server to a compute which supports caching.\n* The libvirt driver needs to set up the volume to be cached via os-brick.\n\nSo, if there is no way to cache the volume then such request needs to be re-scheduled or rejected.\n\nAlso if the volume is not cacheable then requesting to caches it is a contradiction. I can imagine that the end user would be pretty sad that she paid for the premium flavor with volume local cache but got no locally cached volume as cinder marked the volumes as non cacheable. \n\nAlso an interesting question what should happen if an server create request includes two volumes and a flavor with local cached required. But one of the volumes are marked non cacheable by cinder while the other is marked cacheable by cinder","commit_id":"66a767a36aaaee7c641ea31741a7609a219ec0e8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":182,"context_line":""},{"line_number":183,"context_line":"  Instance IDs are separated by commas."},{"line_number":184,"context_line":""},{"line_number":185,"context_line":"Nova calls os-brick to set cache for the volume only when it has the property"},{"line_number":186,"context_line":"of \"cacheable\". Let cinder to determine and set the property, just like the way"},{"line_number":187,"context_line":"did for volume encryption. If the volume contains property \"multiattach\","},{"line_number":188,"context_line":"cinder would not set \"cacheable\" for it. Code work flow would be like::"},{"line_number":189,"context_line":""}],"source_content_type":"text/x-rst","patch_set":9,"id":"3fa7e38b_584738a5","line":186,"range":{"start_line":185,"start_character":48,"end_line":186,"end_character":14},"updated":"2020-02-13 13:21:23.000000000","message":"and the flavor requested such caching","commit_id":"66a767a36aaaee7c641ea31741a7609a219ec0e8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Work Items"},{"line_number":350,"context_line":"----------"},{"line_number":351,"context_line":""},{"line_number":352,"context_line":"* Cache the volume during connecting volume"},{"line_number":353,"context_line":""},{"line_number":354,"context_line":"* Release cache during disconnecting volume"}],"source_content_type":"text/x-rst","patch_set":9,"id":"3fa7e38b_98ccd062","line":351,"updated":"2020-02-13 13:21:23.000000000","message":"* Add COMPUTE_SUPPORT_VOLUME_CACHE trait to os-traits\n\n* add a new compute capability that maps to this trait\n\n* enable this capability in the libvirt driver if a caches is configured","commit_id":"66a767a36aaaee7c641ea31741a7609a219ec0e8"},{"author":{"_account_id":9708,"name":"Balazs Gibizer","display_name":"gibi","email":"gibizer@gmail.com","username":"gibi"},"change_message_id":"480b3b736bffa03d62c006dd0ca0f91f78584e11","unresolved":false,"context_lines":[{"line_number":368,"context_line":""},{"line_number":369,"context_line":"* New unit test should be added"},{"line_number":370,"context_line":""},{"line_number":371,"context_line":"* Function test with simulated cache should be added. This can use open-cas"},{"line_number":372,"context_line":"  with a local file as NVME device."},{"line_number":373,"context_line":""},{"line_number":374,"context_line":"  - Check if the emulated volume is created for VM or not."},{"line_number":375,"context_line":""}],"source_content_type":"text/x-rst","patch_set":9,"id":"3fa7e38b_78e6b4dd","line":372,"range":{"start_line":371,"start_character":2,"end_line":372,"end_character":35},"updated":"2020-02-13 13:21:23.000000000","message":"Hm in the nova functional env the virt driver is mostly faked. However this kind of local file based cache would be  good solution for some tempest tests.","commit_id":"66a767a36aaaee7c641ea31741a7609a219ec0e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":32,"context_line":"guest don\u0027t know it is using an emulated block device. Regarding lower layer,"},{"line_number":33,"context_line":"backend volume don\u0027t know it is cached, and the data in backend volume will not"},{"line_number":34,"context_line":"have extra change because of cache. That means even if the cache is lost for"},{"line_number":35,"context_line":"some reason, the backend volume can be mounted to other places and available"},{"line_number":36,"context_line":"immediately. This spec is trying to add volume local cache using such cache"},{"line_number":37,"context_line":"software."},{"line_number":38,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_ddfc2b73","line":35,"range":{"start_line":35,"start_character":5,"end_line":35,"end_character":11},"updated":"2020-02-19 06:11:25.000000000","message":"s/reason/reasons","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":32,"context_line":"guest don\u0027t know it is using an emulated block device. Regarding lower layer,"},{"line_number":33,"context_line":"backend volume don\u0027t know it is cached, and the data in backend volume will not"},{"line_number":34,"context_line":"have extra change because of cache. That means even if the cache is lost for"},{"line_number":35,"context_line":"some reason, the backend volume can be mounted to other places and available"},{"line_number":36,"context_line":"immediately. This spec is trying to add volume local cache using such cache"},{"line_number":37,"context_line":"software."},{"line_number":38,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_9238dd9e","line":35,"range":{"start_line":35,"start_character":5,"end_line":35,"end_character":11},"in_reply_to":"3fa7e38b_ddfc2b73","updated":"2020-02-19 07:27:44.000000000","message":"Done","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":71,"context_line":"All volumes cached by the same cache instance share same cache mode. The"},{"line_number":72,"context_line":"operator can change cache mode dynamically, using cache software management"},{"line_number":73,"context_line":"tool. os-brick just accepts the cache name and cache IDs from Nova. Cache name"},{"line_number":74,"context_line":"identifies which cache software to use, currently it only supports \u0027opencas\u0027."},{"line_number":75,"context_line":"It is allowed that more than one cache instance in one compute node. Cache IDs"},{"line_number":76,"context_line":"identifies cache instances that can be used. Cache mode is transparent to"},{"line_number":77,"context_line":"os-brick."}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_bd536f6e","line":74,"range":{"start_line":74,"start_character":68,"end_line":74,"end_character":75},"updated":"2020-02-19 06:11:25.000000000","message":"While we have a deployment, how to get that \u0027opencas\u0027 package? From https://pypi.org/ (Obtained manually) I am not found it, while we deploy nova, I would like to get the package automatically.\n\nAnother, which version of \u0027opencas\u0027 does we need to use?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":71,"context_line":"All volumes cached by the same cache instance share same cache mode. The"},{"line_number":72,"context_line":"operator can change cache mode dynamically, using cache software management"},{"line_number":73,"context_line":"tool. os-brick just accepts the cache name and cache IDs from Nova. Cache name"},{"line_number":74,"context_line":"identifies which cache software to use, currently it only supports \u0027opencas\u0027."},{"line_number":75,"context_line":"It is allowed that more than one cache instance in one compute node. Cache IDs"},{"line_number":76,"context_line":"identifies cache instances that can be used. Cache mode is transparent to"},{"line_number":77,"context_line":"os-brick."}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_7268c188","line":74,"range":{"start_line":74,"start_character":68,"end_line":74,"end_character":75},"in_reply_to":"3fa7e38b_bd536f6e","updated":"2020-02-19 07:27:44.000000000","message":"please refer to: https://open-cas.github.io/getting_started_open_cas_linux.html\n--------------------------------------\ngit clone https://github.com/Open-CAS/open-cas-linux\ncd open-cas-linux\ngit submodule update --init\n./configure\nmake\nmake install\n--------------------------------------","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":79,"context_line":"A compute capability is mapped to the trait (e.g. COMPUTE_SUPPORT_VOLUME_CACHE)"},{"line_number":80,"context_line":"and the libvirt driver can set this capability to true if there is cache"},{"line_number":81,"context_line":"instance id is configured in the nove conf. If want the volume be cached,"},{"line_number":82,"context_line":"firstly the volume should belongs to a volume type with \"cacheable\" property."},{"line_number":83,"context_line":"Then select the flavor with extra spec containing this trait, so the guest"},{"line_number":84,"context_line":"would be landed at the host machine with cache capability. If don\u0027t want the"},{"line_number":85,"context_line":"volume be cached, just select a flavor without this trait."}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_effb6e11","line":82,"range":{"start_line":82,"start_character":56,"end_line":82,"end_character":67},"updated":"2020-02-19 06:11:25.000000000","message":"This trait depends on https://review.opendev.org/#/c/684556/22 in Cinder, if the Cinder doesnot support this feature in volume type that Nova can\u0027t use volume local cacahe successfully, right?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":79,"context_line":"A compute capability is mapped to the trait (e.g. COMPUTE_SUPPORT_VOLUME_CACHE)"},{"line_number":80,"context_line":"and the libvirt driver can set this capability to true if there is cache"},{"line_number":81,"context_line":"instance id is configured in the nove conf. If want the volume be cached,"},{"line_number":82,"context_line":"firstly the volume should belongs to a volume type with \"cacheable\" property."},{"line_number":83,"context_line":"Then select the flavor with extra spec containing this trait, so the guest"},{"line_number":84,"context_line":"would be landed at the host machine with cache capability. If don\u0027t want the"},{"line_number":85,"context_line":"volume be cached, just select a flavor without this trait."}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_721681f6","line":82,"range":{"start_line":82,"start_character":56,"end_line":82,"end_character":67},"in_reply_to":"3fa7e38b_effb6e11","updated":"2020-02-19 07:27:44.000000000","message":"Yes, if a previous version of cinder is used, no \"cacheable\" property will be found in connection_info, so Nova will not set cache in this case.","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":105,"context_line":" |   |                       |          |          |       |"},{"line_number":106,"context_line":" |   | attach/detach         |          |          |       |"},{"line_number":107,"context_line":" |   |                 +-----+----------+------+   |       |"},{"line_number":108,"context_line":" | +-+-------+         | /dev/cas1  /dev/cas2  |   |       |"},{"line_number":109,"context_line":" | | osbrick +---------+                       |   |       |"},{"line_number":110,"context_line":" | +---------+ casadm  |        open cas       |   |       |"},{"line_number":111,"context_line":" |                     +-+---+----------+------+   |       |"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_122dcd8f","line":108,"range":{"start_line":108,"start_character":36,"end_line":108,"end_character":45},"updated":"2020-02-19 06:11:25.000000000","message":"Given it belongs to /dev/sdc, in /dev/sdc that cann\u0027t have another open cas, right?\nSo, one /dev/casX is mapping one /dev/sdX, right?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":105,"context_line":" |   |                       |          |          |       |"},{"line_number":106,"context_line":" |   | attach/detach         |          |          |       |"},{"line_number":107,"context_line":" |   |                 +-----+----------+------+   |       |"},{"line_number":108,"context_line":" | +-+-------+         | /dev/cas1  /dev/cas2  |   |       |"},{"line_number":109,"context_line":" | | osbrick +---------+                       |   |       |"},{"line_number":110,"context_line":" | +---------+ casadm  |        open cas       |   |       |"},{"line_number":111,"context_line":" |                     +-+---+----------+------+   |       |"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_f23351c2","line":108,"range":{"start_line":108,"start_character":36,"end_line":108,"end_character":45},"in_reply_to":"3fa7e38b_122dcd8f","updated":"2020-02-19 07:27:44.000000000","message":"yes it is","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":132,"context_line":""},{"line_number":133,"context_line":"  In function _connect_volume():"},{"line_number":134,"context_line":""},{"line_number":135,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cacheable"},{"line_number":136,"context_line":"    property for the volume if caching is allowed. If cacheable is set and"},{"line_number":137,"context_line":"    volume_local_cache_driver in CONF is not empty, then do caching. Otherwise"},{"line_number":138,"context_line":"    just ignore caching."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"  - attach_cache before attach_encryptor, cache lays under encryptor. It is to"},{"line_number":141,"context_line":"    keep encrypted volume secure. No decrypted data would be written to cache"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_8f4e7a6b","line":138,"range":{"start_line":135,"start_character":4,"end_line":138,"end_character":24},"updated":"2020-02-19 06:11:25.000000000","message":"Ah, if Cinder doesnot support \u0027cacheable\u0027 property, Nova will ignore caching.","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":132,"context_line":""},{"line_number":133,"context_line":"  In function _connect_volume():"},{"line_number":134,"context_line":""},{"line_number":135,"context_line":"  - Check if the volume should be cached or not. Cinder would set the cacheable"},{"line_number":136,"context_line":"    property for the volume if caching is allowed. If cacheable is set and"},{"line_number":137,"context_line":"    volume_local_cache_driver in CONF is not empty, then do caching. Otherwise"},{"line_number":138,"context_line":"    just ignore caching."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"  - attach_cache before attach_encryptor, cache lays under encryptor. It is to"},{"line_number":141,"context_line":"    keep encrypted volume secure. No decrypted data would be written to cache"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_d2cad579","line":138,"range":{"start_line":135,"start_character":4,"end_line":138,"end_character":24},"in_reply_to":"3fa7e38b_8f4e7a6b","updated":"2020-02-19 07:27:44.000000000","message":"yes","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":178,"context_line":"  Suggested section: [compute]. Configuration would be like:"},{"line_number":179,"context_line":"  [compute]"},{"line_number":180,"context_line":"  volume_local_cache_driver \u003d \u0027opencas\u0027"},{"line_number":181,"context_line":"  volume_local_cache_instance_ids \u003d 1,15,222"},{"line_number":182,"context_line":""},{"line_number":183,"context_line":"  Instance IDs are separated by commas."},{"line_number":184,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_af9b3614","line":181,"range":{"start_line":181,"start_character":2,"end_line":181,"end_character":33},"updated":"2020-02-19 06:11:25.000000000","message":"Is this ``virsh list`` return the \"id\", Or the ``nova list`` show the instance\u0027s ID(uuid)?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":178,"context_line":"  Suggested section: [compute]. Configuration would be like:"},{"line_number":179,"context_line":"  [compute]"},{"line_number":180,"context_line":"  volume_local_cache_driver \u003d \u0027opencas\u0027"},{"line_number":181,"context_line":"  volume_local_cache_instance_ids \u003d 1,15,222"},{"line_number":182,"context_line":""},{"line_number":183,"context_line":"  Instance IDs are separated by commas."},{"line_number":184,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_92ad3da6","line":181,"range":{"start_line":181,"start_character":2,"end_line":181,"end_character":33},"in_reply_to":"3fa7e38b_af9b3614","updated":"2020-02-19 07:27:44.000000000","message":"this is not returned from openstack, it is from cache software admin tool: casadm -L. Operator gets the instances IDs and fill here","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":259,"context_line":"* Create a dedicated cache cluster. Mount all the cache (NVME SSD) in cache"},{"line_number":260,"context_line":"  cluster as a big cache pool. Then allocate a certain ammount of cache to a"},{"line_number":261,"context_line":"  specific volume. The allocated cache can be mounted on compute node through"},{"line_number":262,"context_line":"  NVMEof protocol. Then still use cache software to do the same cache."},{"line_number":263,"context_line":""},{"line_number":264,"context_line":"  But this would be the compete between local PCIe and remote network. The"},{"line_number":265,"context_line":"  disadvantage if doing like these ways is: the network of the storage server"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_ef5fee49","line":262,"range":{"start_line":262,"start_character":2,"end_line":262,"end_character":8},"updated":"2020-02-19 06:11:25.000000000","message":"white space","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":265,"context_line":"  disadvantage if doing like these ways is: the network of the storage server"},{"line_number":266,"context_line":"  would be bottleneck."},{"line_number":267,"context_line":""},{"line_number":268,"context_line":"  - Latency) Storage cluster typically provide volume through iscsi/fc"},{"line_number":269,"context_line":"    protocol, or through librbd if ceph is used. The latency would be"},{"line_number":270,"context_line":"    millisecond level. Even NVME over TCP, the latency would be hundreds of"},{"line_number":271,"context_line":"    microsecond, depends on the network topology. As a contrast, the latency of"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_6f545e24","line":268,"range":{"start_line":268,"start_character":11,"end_line":268,"end_character":12},"updated":"2020-02-19 06:11:25.000000000","message":"?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":356,"context_line":""},{"line_number":357,"context_line":"* Enable this capability in the libvirt driver if a caches is configured"},{"line_number":358,"context_line":""},{"line_number":359,"context_line":"* Cache the volume during connecting volume"},{"line_number":360,"context_line":""},{"line_number":361,"context_line":"* Release cache during disconnecting volume"},{"line_number":362,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_fdb56781","line":359,"updated":"2020-02-19 06:11:25.000000000","message":"Do we need to bump \u0027opencas\u0027 version to the \"requirements\" to controller the version change that we need?","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":356,"context_line":""},{"line_number":357,"context_line":"* Enable this capability in the libvirt driver if a caches is configured"},{"line_number":358,"context_line":""},{"line_number":359,"context_line":"* Cache the volume during connecting volume"},{"line_number":360,"context_line":""},{"line_number":361,"context_line":"* Release cache during disconnecting volume"},{"line_number":362,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_92a9bd79","line":359,"in_reply_to":"3fa7e38b_fdb56781","updated":"2020-02-19 07:27:44.000000000","message":"yes, I think so. When deploying, need to git clone required tag.","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"b15a578b31431d555d9f27788f9cf0ac6ed9ef81","unresolved":false,"context_lines":[{"line_number":357,"context_line":"* Enable this capability in the libvirt driver if a caches is configured"},{"line_number":358,"context_line":""},{"line_number":359,"context_line":"* Cache the volume during connecting volume"},{"line_number":360,"context_line":""},{"line_number":361,"context_line":"* Release cache during disconnecting volume"},{"line_number":362,"context_line":""},{"line_number":363,"context_line":"* Add switch to enable / disable this feature"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_ef2daead","line":360,"updated":"2020-02-19 06:11:25.000000000","message":"We also should change the ``os-brick`` to support \"set_cache_via_casadm\" by ``opencas``.","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"},{"author":{"_account_id":28948,"name":"Liang Fang","email":"liang.a.fang@intel.com","username":"liang"},"change_message_id":"9078caa46952c49dacc7abb8903f74fde52f8dab","unresolved":false,"context_lines":[{"line_number":357,"context_line":"* Enable this capability in the libvirt driver if a caches is configured"},{"line_number":358,"context_line":""},{"line_number":359,"context_line":"* Cache the volume during connecting volume"},{"line_number":360,"context_line":""},{"line_number":361,"context_line":"* Release cache during disconnecting volume"},{"line_number":362,"context_line":""},{"line_number":363,"context_line":"* Add switch to enable / disable this feature"}],"source_content_type":"text/x-rst","patch_set":10,"id":"3fa7e38b_b231595b","line":360,"in_reply_to":"3fa7e38b_ef2daead","updated":"2020-02-19 07:27:44.000000000","message":"This is documented in Cinder spec: https://review.opendev.org/#/c/684556/","commit_id":"7b138b799834daf69a75c8cf330392b7d7eb27e8"}]}
