)]}'
{"specs/2023.2/approved/generic-vdpa-support.rst":[{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"a3aa04cd6ee0f5f13193ed151495c29167f3a96e","unresolved":true,"context_lines":[{"line_number":27,"context_line":"vDPA-enabled hardware offloads allow the guest to use standard virtio drivers"},{"line_number":28,"context_line":"instead of a vendor specific driver."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"And now, the generic vDPA device in QEMU is degisned to allow people use a"},{"line_number":31,"context_line":"generic \"vhost-vdpa-device-pci\" device, without requiring to touch specific"},{"line_number":32,"context_line":"virtio device type."},{"line_number":33,"context_line":""}],"source_content_type":"text/x-rst","patch_set":5,"id":"49873349_bc61bfba","line":30,"updated":"2023-04-04 16:55:35.000000000","message":"\"designed\"","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":35894,"name":"liang jiechao","display_name":"LiangJiechao","email":"liangjc7@gmail.com","username":"LiangJiechao"},"change_message_id":"072397e80337606b0577179df82988a57ad8d25f","unresolved":false,"context_lines":[{"line_number":27,"context_line":"vDPA-enabled hardware offloads allow the guest to use standard virtio drivers"},{"line_number":28,"context_line":"instead of a vendor specific driver."},{"line_number":29,"context_line":""},{"line_number":30,"context_line":"And now, the generic vDPA device in QEMU is degisned to allow people use a"},{"line_number":31,"context_line":"generic \"vhost-vdpa-device-pci\" device, without requiring to touch specific"},{"line_number":32,"context_line":"virtio device type."},{"line_number":33,"context_line":""}],"source_content_type":"text/x-rst","patch_set":5,"id":"88b7b1e1_cc61f169","line":30,"in_reply_to":"49873349_bc61bfba","updated":"2023-04-07 07:48:06.000000000","message":"Done","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"542c1535fc03797e70f9648959bc3f22f975cf3b","unresolved":true,"context_lines":[{"line_number":47,"context_line":"already implemented in openstack, but need to introduce a mechanism or flag"},{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"}],"source_content_type":"text/x-rst","patch_set":5,"id":"76e8eb9c_bd1d57db","line":50,"updated":"2023-04-04 18:25:57.000000000","message":"this https://www.youtube.com/watch?v\u003dzyDSUU0TLB4 could be useful to review i have not watch it yet but that explains vdpa-blk","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":4393,"name":"Dan Smith","email":"dms@danplanet.com","username":"danms"},"change_message_id":"a3aa04cd6ee0f5f13193ed151495c29167f3a96e","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"9c96d532_c1652a79","line":51,"updated":"2023-04-04 16:55:35.000000000","message":"I\u0027m not very VDPA-literate, so maybe this is a dumb question, but it sounds like there are three VDPA classes: network, storage, and generic(PCI) is that right? Are you proposing to add both storage and generic support, or just one of those two?\n\nOr is the generic/PCI approach something that replaces the existing virtio-net with something that works for both?","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":35894,"name":"liang jiechao","display_name":"LiangJiechao","email":"liangjc7@gmail.com","username":"LiangJiechao"},"change_message_id":"072397e80337606b0577179df82988a57ad8d25f","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"c338d7ee_56074f1e","line":51,"in_reply_to":"3a8dbfa4_b7f1e48a","updated":"2023-04-07 07:48:06.000000000","message":"Q: Or is the generic/PCI approach something that replaces the existing virtio-net with something that works for both?\nA: With the traditional VDPA device, QEMU distinguishes between types such as networking, storage, and so on.\nWith the generic vDPA device, QEMU won\u0027t need to touch the devicetypes any more, such like vfio.\nThe difference between traditional vDPA and generic vDPA is shown in the figure. https://s1.ax1x.com/2023/04/07/ppTgIDf.png","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"0519c3e28710aa204ed4f72df7ad5b6e7ab9bbfe","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"3a8dbfa4_b7f1e48a","line":51,"in_reply_to":"8213d7d7_acccca02","updated":"2023-04-04 18:23:59.000000000","message":"let me spell-check that...\n\n\n\nI think the purpose is to allow setting the device model exposed to the guest as vdpa-PCI or whatever the qemu device model is\n\nwhat is not clear to me is what is used in the block case.\n\nfor example, if we add a vdpa-pci for a cinder volume that requests vdpa offload what provides the vhost-vdpa-x device?\npresumably this is a vf tracked in the pci tracker without a physical_network tag set.\n\n\nare we saying that we will require one vhost-vdpa device per volume basically one pci-request-spec per volume\n\n\nwe would likely need to model the pool of vdpa devices seperatly from those used for neutron port by only using the one without the phsyical_netowrk tag.\n\nor is the intention to request a vdpa nic and have the guest somehow do the offloading? that seems problematic as the guest should not be able to connect directly to the storage backend \n\nit would also be problematic to need to request a neutron port of type vdpa\nto be able to attach a cinder volume of type vdpa.\n\n\nI\u0027m expecting each cinder volume to consume a vdpa device that is created from a VF and the vf to not have the physical_network tag set as such device should not be added to hardware offloaded ovs as it\u0027s not a neutron port.\n\nwhat we also need to be clear on is how is the device configured on the host.\n\nwe need to ensure that the vdpa device is used only for storage and does not provide network access to the guest when it\u0027s used for a volume.\n\nI was expecting the vdpa device on the host level to be a vdpa blk or similar device, not a generic vhost-vdpa device.\n\nwe need to detail exactly how the devices will be pre-created on the host and how Nova will know what a given vdpa device is intended to be used for.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"997becaf602ad259d219a6e4f58394c8c7b2d427","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"e091bf36_d52e8d73","line":51,"in_reply_to":"837ea2c4_fbe5dd6d","updated":"2023-04-18 17:54:23.000000000","message":"we need to capture some of the responce to the questions in the spec.\n\nQ: we would likely need to model the pool of vdpa devices seperatly from those used for neutron port by only using the one without the phsyical_netowrk tag.\nA: In our currently implemention, both types of vdpa devices have the same value of \"fields.PciDeviceType.VDPA\" for their type field. We can distinguish between these two types of devices using their \"vendor_id\" and \"product_id\" since the vf used to generate these two types of vdpa devices has different types(virtio-blk/virtio-net).\n\ngregarding this\n\nwhile usign the \"vendor_id\" and \"product_id\" is cleaver not all hardware vendeor actully directly expose VF with the vritio-net-pci or virtio-blk types when supporting vdpa\n\nnvidia/mellonx connectx6-dx do not create VF with the vrtio pci vendor and product ids.\n\nso we need to declarity say if the device is used for storage or not in the nova pci device list.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":35894,"name":"liang jiechao","display_name":"LiangJiechao","email":"liangjc7@gmail.com","username":"LiangJiechao"},"change_message_id":"590157ad2e05371ba59631f28e5d1aaf7361c9ac","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"df214cdc_01658112","line":51,"in_reply_to":"8890b517_43e379da","updated":"2023-04-18 12:53:31.000000000","message":"The solution hw_vif_model\u003dvdpa \u0026 hw_disk_bus\u003dvdpa-blk|vdpa-scsi was previously mentioned in https://etherpad.opendev.org/p/generic-vdpa.\nWe recently studied hw_xxx again and feel that this solution may not be suitable.\nThe properties hw_vif_model and hw_disk_bus seem to be used to describe how devices are presented inside the guest, such as virtio and e1000. These are front-end properties. Whether it is generic vdpa or traditional vdpa, both present virtio devices in the front-end.(https://s1.ax1x.com/2023/04/07/ppTgIDf.png)\nTherefore, we feel that using the hw_xxx approach may not be suitable.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"16e9f62d47f5347ef22ac54db9e6cd4107dce170","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"8213d7d7_acccca02","line":51,"in_reply_to":"9c96d532_c1652a79","updated":"2023-04-04 18:12:04.000000000","message":"i think the propase is to allwo setign the device tyep expsoed to the guest as vdpa-pci or whatever the qemu device model is\n\nwhat im not clear on is what is used in the block case.\n\nfor example if we add a vdpa-pci for a cinder voluem that requests vdpa offload what provided the vhost-vdpa-x device.\n\n\nare we saying that we will requis one vhost-vdpa device per voluem basicaly one pcid-request-sepc entry per voluem\n\nif so then tha twould precumable not be assocmated to a newton network os we need som host device which is not a nic that expsoe this?\n\n\nor is the intention to request a vdpa nic and have the guest somehow do the offlowing.\n\nthat seams probelmatic as the guest should not be able to conenct directly to the storage backend \n\nit would also be problemaic ot need to request a neutron port of type vdpa\nto be abel to attch a cinder volume of type vdpa.\n\n\nim expecting each cinder volume to consume a vdpa device that is created from a VF and the vf to not have teh physical_network tag set as such device should not be added to hardware offloaded ovs as its not a neutron port.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":35894,"name":"liang jiechao","display_name":"LiangJiechao","email":"liangjc7@gmail.com","username":"LiangJiechao"},"change_message_id":"9fb3d67855be829c6d9bda8bf7176fd8c3984db8","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"8890b517_43e379da","line":51,"in_reply_to":"c338d7ee_56074f1e","updated":"2023-04-07 11:41:10.000000000","message":"Q: for example if we add a vdpa-pci for a cinder voluem that requests vdpa offload what provided the vhost-vdpa-x device.\npresumably this is a vf tracked in the pci tracker without a physical_network tag set.\nare we saying that we will requis one vhost-vdpa device per voluem basicaly one pcid-request-sepc entry per voluem\nA: Yes, one pcid-request-sepc entry per voluem. In our currently implemention, we use fields.PciDeviceType.VDPA instead of fields.PciDeviceType.SRIOV_VF to represent this device in pci tracker. \n\n\nQ: we would likely need to model the pool of vdpa devices seperatly from those used for neutron port by only using the one without the phsyical_netowrk tag.\nA: In our currently implemention, both types of vdpa devices have the same value of \"fields.PciDeviceType.VDPA\" for their type field. We can distinguish between these two types of devices using their \"vendor_id\" and \"product_id\" since the vf used to generate these two types of vdpa devices has different types(virtio-blk/virtio-net).\n\n\nQ: if so then tha twould precumable not be assocmated to a newton network os we need som host device which is not a nic that expsoe this?\nor is the intention to request a vdpa nic and have the guest somehow do the offlowing.\nthat seams probelmatic as the guest should not be able to conenct directly to the storage backend \nit would also be problemaic ot need to request a neutron port of type vdpa to be abel to attch a cinder volume of type vdpa.\nA: In our understanding, this device has nothing to do with Neutron networking or VDPA NIC. It connects to the storage backend through SPDK. Please refer to this figure for more details. https://s1.ax1x.com/2023/04/07/pp7SlFg.png\n\n\nQ: im expecting each cinder volume to consume a vdpa device that is created from a VF and the vf to not have teh physical_network tag set as such device should not be added to hardware offloaded ovs as its not a neutron port.\nA: yes, we also think so.\n\n\nQ: what we also need to be clear on is how is the device configured on the host.\nwe need to ensure that the vdpa device is used only for storage and does not provide network access to the guest when it\u0027s used for a volume.\nI was expecting the vdpa device on the host level to be a vdpa blk or similar device, not a generic vhost-vdpa device.\nwe need to detail exactly how the devices will be pre-created on the host and how Nova will know what a given vdpa device is intended to be used for.\nA: In our current solution, the DPU offload card will offer multiple storage VFs (virtio-blk) and network VFs (virtio-net) on the host side. These VFs are configured as vdpa devices using the \"vdpa dev add\" command.\nRegarding how Nova knows the intended use of a given vdpa device, we follow the current process of Nova calling libvirt to obtain information about the compute node\u0027s devices. All PCI device information is stored in the Nova database\u0027s \"pci_devices\" table, where vdpa devices are distinguished by \"dev_type\" and their intended use as either storage or network devices is identified by their \"product_id\" and \"vendor_id\".\nBased on the above approach, if a volume requests a PCI device, it will receive a vdpa device generated using the virtio-blk VF. Within the guest, this will appear as a virtio-blk device, so it will not provide network access to the guest.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"5acec29d8d66bd9c2dc130b1afe0d592eb226102","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"837ea2c4_fbe5dd6d","line":51,"in_reply_to":"df214cdc_01658112","updated":"2023-04-18 13:06:26.000000000","message":"the image you linked should presenting the deviec as vhost-vdpa-dev-pci\n\nthat was the frontend device to be modeld by hw_vif_model\u003dvdpa and it woudl also be modeld by hw_disk_bus\u003dvdpa\n\nor perhapse it shoudl be hw_disk_bus\u003dvhost-vdpa and hw_vif_model\u003dvhost-vdpa.\n\nin either case you are askign for the host vdpa device to be presented to the guest as a frontend vhost-vdpa-dev-pci instead of \nvirtio-net-pci or virtio-blk.\n\nthat is why its appropriate to ues the front configuration option as you are changing how the device is presented to the guest.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":35894,"name":"liang jiechao","display_name":"LiangJiechao","email":"liangjc7@gmail.com","username":"LiangJiechao"},"change_message_id":"dc51b6feba90b0d62131bb1fa7dac14382ab4094","unresolved":true,"context_lines":[{"line_number":48,"context_line":"of neutron port to generate the generic vDPA XML."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":"For storage case, it may be complicated, cause we don\u0027t have storage offload"},{"line_number":51,"context_line":"solution in openstack yet."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Use Cases"},{"line_number":54,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"a2445adf_1bd96f86","line":51,"in_reply_to":"e091bf36_d52e8d73","updated":"2023-04-19 13:19:21.000000000","message":"“the image you linked should presenting the deviec as vhost-vdpa-dev-pci\nin either case you are askign for the host vdpa device to be presented to the guest as a frontend vhost-vdpa-dev-pci instead of virtio-net-pci or virtio-blk.“\n\nA:There is no vhost-vdpa-dev-pci in the guest, no new device type, just virtio device.I have confirmed this with the commiter of \"qemu generic vDPA patch\".\nRegardless of vdpa-net, vdpa-blk, or generic vDPA, the device presented in the guest side is virtio device.","commit_id":"b02b6c78c6a19cae1118effbfd9e051cc09e8f19"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"1459d4facf10e03afaf1c7a332967fac19e55ae3","unresolved":true,"context_lines":[{"line_number":113,"context_line":""},{"line_number":114,"context_line":"Upgrade impact"},{"line_number":115,"context_line":"--------------"},{"line_number":116,"context_line":""},{"line_number":117,"context_line":""},{"line_number":118,"context_line":""},{"line_number":119,"context_line":"Implementation"}],"source_content_type":"text/x-rst","patch_set":6,"id":"7346b32d_e3fab4d2","line":116,"updated":"2023-05-12 13:12:56.000000000","message":"I think we should discuss about this because I\u0027m pretty sure there is one :-)","commit_id":"6000a7c78afaec9af086743f4476be3a9cda5d71"}]}
