)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"ba7cda7efedec710567d8902c54e44e786e9bdaa","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":10,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":11,"context_line":"in OpenStack Nova."},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Change-Id: I5f8479a4c5e9a2079f2e66389949fd6923009bfb"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"3f79a3b5_fe2dc7ac","line":12,"updated":"2018-12-10 02:46:42.000000000","message":"we should refer the BP at here.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":10,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":11,"context_line":"in OpenStack Nova."},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Change-Id: I5f8479a4c5e9a2079f2e66389949fd6923009bfb"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"3f79a3b5_52caaa46","line":12,"in_reply_to":"3f79a3b5_c1a34eae","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":9,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":10,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":11,"context_line":"in OpenStack Nova."},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Change-Id: I5f8479a4c5e9a2079f2e66389949fd6923009bfb"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":4,"id":"3f79a3b5_c1a34eae","line":12,"in_reply_to":"3f79a3b5_fe2dc7ac","updated":"2018-12-10 03:31:16.000000000","message":"Will change in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"}],"specs/stein/approved/virtual-nvdimm.rst":[{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":38,"context_line":" * What is the life cycle of a vNVDIMM device"},{"line_number":39,"context_line":""},{"line_number":40,"context_line":"Use Cases"},{"line_number":41,"context_line":"---------"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Admin creates flavors with vNVDIMM devices by specifying the number"},{"line_number":44,"context_line":"of vNVDIMM devices, size of each vNVDIMM device and NUMA topology"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_3b82d453","line":41,"updated":"2018-09-14 16:33:42.000000000","message":"the content of this section is not really providing a usecase.\ni would expect somthing like the follow.\n\nAs a enduser i want a highbandwith presistent datastore for use in my\napplication that enable me read/write its content as if it was memory\nbut with a lifetime the out lives a instance reboot.\ne.g. as a developer of a vRouter i may want to leverage pmem to\nstore my routing table such that it can be frequently and quickly updated\nbut will not be lost on a reboot.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":38,"context_line":" * What is the life cycle of a vNVDIMM device"},{"line_number":39,"context_line":""},{"line_number":40,"context_line":"Use Cases"},{"line_number":41,"context_line":"---------"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Admin creates flavors with vNVDIMM devices by specifying the number"},{"line_number":44,"context_line":"of vNVDIMM devices, size of each vNVDIMM device and NUMA topology"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_ad0d3590","line":41,"in_reply_to":"3f79a3b5_3b82d453","updated":"2018-09-16 20:30:07.000000000","message":"Agree, I will ask someone good to answer this to list some use-case.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"1fc532fbc8545c5fe99bf0a79ca6d3ac49092d87","unresolved":false,"context_lines":[{"line_number":38,"context_line":" * What is the life cycle of a vNVDIMM device"},{"line_number":39,"context_line":""},{"line_number":40,"context_line":"Use Cases"},{"line_number":41,"context_line":"---------"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Admin creates flavors with vNVDIMM devices by specifying the number"},{"line_number":44,"context_line":"of vNVDIMM devices, size of each vNVDIMM device and NUMA topology"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_5e968596","line":41,"in_reply_to":"3f79a3b5_3b82d453","updated":"2018-09-17 07:15:19.000000000","message":"Perhaps this is an example.\nhttps://redislabs.com/blog/persistent-memory-and-redis-enterprise/","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":7900,"name":"Tomasz Paszkowski","email":"ss7pro@gmail.com","username":"tpaszkowski"},"change_message_id":"81504ae53a17fe4a66c06b0feb56d67848982c90","unresolved":false,"context_lines":[{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Admin creates flavors with vNVDIMM devices by specifying the number"},{"line_number":44,"context_line":"of vNVDIMM devices, size of each vNVDIMM device and NUMA topology"},{"line_number":45,"context_line":"in flavor extra specs."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"End users select such flavors when creating VMs. The corresponding VMs are"},{"line_number":48,"context_line":"scheduled to hosts with sufficient resources. The vNVDIMM devices are visible"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_0d591b8d","line":45,"updated":"2018-09-12 05:59:43.000000000","message":"Can we include how this sample flavor would look like?","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"67944eea136fff300f2177cb5b6db9a74e4119fc","unresolved":false,"context_lines":[{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Admin creates flavors with vNVDIMM devices by specifying the number"},{"line_number":44,"context_line":"of vNVDIMM devices, size of each vNVDIMM device and NUMA topology"},{"line_number":45,"context_line":"in flavor extra specs."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"End users select such flavors when creating VMs. The corresponding VMs are"},{"line_number":48,"context_line":"scheduled to hosts with sufficient resources. The vNVDIMM devices are visible"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_26797fc4","line":45,"in_reply_to":"3f79a3b5_0d591b8d","updated":"2018-09-13 08:03:50.000000000","message":"L172 is actually an example. However, sure, I will make it more clear.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":58,"context_line":"Background"},{"line_number":59,"context_line":"----------"},{"line_number":60,"context_line":"NVDIMM devices can be partitioned into namespaces which is the only way"},{"line_number":61,"context_line":"for software to use. There are two hardware working types of NVDIMM --"},{"line_number":62,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":63,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":64,"context_line":"\"fsdax\" or \"devdax\" mode exclusively. [2]_"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_5b855058","line":61,"updated":"2018-09-14 16:33:42.000000000","message":"technicallly there are 3 modes that nvdimms can be configued for on the host.\napp direct mode e.g. pmem, block device mode and as ram for the system.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":58,"context_line":"Background"},{"line_number":59,"context_line":"----------"},{"line_number":60,"context_line":"NVDIMM devices can be partitioned into namespaces which is the only way"},{"line_number":61,"context_line":"for software to use. There are two hardware working types of NVDIMM --"},{"line_number":62,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":63,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":64,"context_line":"\"fsdax\" or \"devdax\" mode exclusively. [2]_"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_6d9e3d55","line":61,"in_reply_to":"3f79a3b5_5b855058","updated":"2018-09-16 20:30:07.000000000","message":"I also think about in the beginning, but we found it\u0027s complex, it has two region types(pmem, BLK), and 4 modes on the region type.\n\nMaybe Rui can paste some document to explain that.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"1fc532fbc8545c5fe99bf0a79ca6d3ac49092d87","unresolved":false,"context_lines":[{"line_number":58,"context_line":"Background"},{"line_number":59,"context_line":"----------"},{"line_number":60,"context_line":"NVDIMM devices can be partitioned into namespaces which is the only way"},{"line_number":61,"context_line":"for software to use. There are two hardware working types of NVDIMM --"},{"line_number":62,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":63,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":64,"context_line":"\"fsdax\" or \"devdax\" mode exclusively. [2]_"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_de2cd54b","line":61,"in_reply_to":"3f79a3b5_5b855058","updated":"2018-09-17 07:15:19.000000000","message":"Well, in my opinion \"RAM\" mode is kind of vendor specific, which is that by manufacturing DRAM is logically put in front of NVDIMM as another cache layer. I think the underlying NVDIMM still works in one of the two modes -- \"pmem\" or \"blk\", just it is transparent to software.\nAlso if you look up in the pmem.io official specifications, only the two \"accessing modes\".","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":68,"context_line":"is the device DAX on the real NVDIMM device (e.g., /dev/dax0.0), to"},{"line_number":69,"context_line":"which all guest access do not involve any host-side kernel cache. [3]_"},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on vNVDIMM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of vNVDIMM devices backed by physical NVDIMM \"persistent memory\""},{"line_number":73,"context_line":"(pmem) type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_fb7bfc77","line":71,"updated":"2018-09-14 16:33:42.000000000","message":"i think you ment persistence here not preference","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":68,"context_line":"is the device DAX on the real NVDIMM device (e.g., /dev/dax0.0), to"},{"line_number":69,"context_line":"which all guest access do not involve any host-side kernel cache. [3]_"},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on vNVDIMM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of vNVDIMM devices backed by physical NVDIMM \"persistent memory\""},{"line_number":73,"context_line":"(pmem) type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_0dc0e92a","line":71,"in_reply_to":"3f79a3b5_fb7bfc77","updated":"2018-09-16 20:30:07.000000000","message":"It is \u0027preference\u0027. There are multiple ways we can pass the nvdimm to the guest. Like you can create nvdimm aware filesystem(ext4) on a pmem fsdax mode namespace. Then create file on the FS, and assign that file as nvdimm backend. In the beginning, that is my most flavor way to expose nvdimm to the guest, since FS is there, we needn\u0027t depend on any nvdimm specific tools, and FS fix the fragmental issue.\nBut unfortunately, the virtualization guys teach me, there is bug for this way, when the guest has any PCI passthrough the device, it will force the all the memory page allocated before the guest startup.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":78,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":79,"context_line":"and used by multiple applications. As shown in below diagram,"},{"line_number":80,"context_line":"four applications are using four namespaces each of size 100GB::"},{"line_number":81,"context_line":"   +-----+   +-----+   +-----+    +-----+"},{"line_number":82,"context_line":"   |app1 |   |app2 |   |app3 |    |app4 |"},{"line_number":83,"context_line":"   +--+--+   +--+--+   +--+--+    +--+--+"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_1b7fd866","line":80,"updated":"2018-09-14 16:33:42.000000000","message":"this implies we need to store a memory map in nova or we statically\nconfigre the names spaces and then use assign them to guest.\nif we do the later we have to 0 out all the pmem in a region when\nwhen it will nolonger be used by a guest before assigning it to\nanother guest.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"1fc532fbc8545c5fe99bf0a79ca6d3ac49092d87","unresolved":false,"context_lines":[{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":78,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":79,"context_line":"and used by multiple applications. As shown in below diagram,"},{"line_number":80,"context_line":"four applications are using four namespaces each of size 100GB::"},{"line_number":81,"context_line":"   +-----+   +-----+   +-----+    +-----+"},{"line_number":82,"context_line":"   |app1 |   |app2 |   |app3 |    |app4 |"},{"line_number":83,"context_line":"   +--+--+   +--+--+   +--+--+    +--+--+"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_9ed73d23","line":80,"in_reply_to":"3f79a3b5_1b7fd866","updated":"2018-09-17 07:15:19.000000000","message":"Yes, you are right on both approaches. As Alex mentioned, the static approach is what we are targeting.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":78,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":79,"context_line":"and used by multiple applications. As shown in below diagram,"},{"line_number":80,"context_line":"four applications are using four namespaces each of size 100GB::"},{"line_number":81,"context_line":"   +-----+   +-----+   +-----+    +-----+"},{"line_number":82,"context_line":"   |app1 |   |app2 |   |app3 |    |app4 |"},{"line_number":83,"context_line":"   +--+--+   +--+--+   +--+--+    +--+--+"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_8de159d2","line":80,"in_reply_to":"3f79a3b5_1b7fd866","updated":"2018-09-16 20:30:07.000000000","message":"sorry, Sean, I didn\u0027t get what you said. Just like I say in the PTG, we precreate namespace due to the fragmental issue, not sure this is question you ask.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":108,"context_line":"NVDIMM Namespace Management"},{"line_number":109,"context_line":"---------------------------"},{"line_number":110,"context_line":"Due to the aforementioned fragmentation issue, NVDIMM can not be managed"},{"line_number":111,"context_line":"in the similar way as system memory. In other words, dynamically creating"},{"line_number":112,"context_line":"and deleting NVDIMM namespaces upon VM creation and deletion will result in"},{"line_number":113,"context_line":"fragmentation and also a challenge to track NVDIMM resource."},{"line_number":114,"context_line":"The proposed approach is to use admin pre-created namespaces. In other words,"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_bb95848a","line":111,"updated":"2018-09-14 16:33:42.000000000","message":"it will have to be managed similar to hugepages or cpus for pinning. i dont think that\nwill be too much of an issue. if we allow dynamic sizing of the regions this will\nmake modeling this in placement tricky but if we use fixed size regions that will simplfy\ntacking and assignment.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":108,"context_line":"NVDIMM Namespace Management"},{"line_number":109,"context_line":"---------------------------"},{"line_number":110,"context_line":"Due to the aforementioned fragmentation issue, NVDIMM can not be managed"},{"line_number":111,"context_line":"in the similar way as system memory. In other words, dynamically creating"},{"line_number":112,"context_line":"and deleting NVDIMM namespaces upon VM creation and deletion will result in"},{"line_number":113,"context_line":"fragmentation and also a challenge to track NVDIMM resource."},{"line_number":114,"context_line":"The proposed approach is to use admin pre-created namespaces. In other words,"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_0dd549e6","line":111,"in_reply_to":"3f79a3b5_bb95848a","updated":"2018-09-16 20:30:07.000000000","message":"yes, we don\u0027t want to dynamic creating namespace. Due to the fragmentation issue.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":111,"context_line":"in the similar way as system memory. In other words, dynamically creating"},{"line_number":112,"context_line":"and deleting NVDIMM namespaces upon VM creation and deletion will result in"},{"line_number":113,"context_line":"fragmentation and also a challenge to track NVDIMM resource."},{"line_number":114,"context_line":"The proposed approach is to use admin pre-created namespaces. In other words,"},{"line_number":115,"context_line":"NVDIMM namespaces needs to be created during host provisioning and the"},{"line_number":116,"context_line":"absolute paths of the generated device nodes needs to be put into nova"},{"line_number":117,"context_line":"configuration file by cloud admins. Whether this is done manually or"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_db908099","line":114,"updated":"2018-09-14 16:33:42.000000000","message":"we could have a nova conf option to havef the comute agent precreate them.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":111,"context_line":"in the similar way as system memory. In other words, dynamically creating"},{"line_number":112,"context_line":"and deleting NVDIMM namespaces upon VM creation and deletion will result in"},{"line_number":113,"context_line":"fragmentation and also a challenge to track NVDIMM resource."},{"line_number":114,"context_line":"The proposed approach is to use admin pre-created namespaces. In other words,"},{"line_number":115,"context_line":"NVDIMM namespaces needs to be created during host provisioning and the"},{"line_number":116,"context_line":"absolute paths of the generated device nodes needs to be put into nova"},{"line_number":117,"context_line":"configuration file by cloud admins. Whether this is done manually or"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_8dfab974","line":114,"in_reply_to":"3f79a3b5_db908099","updated":"2018-09-16 20:30:07.000000000","message":"yes, we can, this has two sides. If we let the operator create it, then the operator can choice any size for the namespace, otherwise we need create a complex syntax in the nova conf to specific the size of each namespace.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"36c8ca0faf1a11a6dcdff59422ffaef23165e8c4","unresolved":false,"context_lines":[{"line_number":119,"context_line":"encouraged to take fragmentation into considerations of admins when creating"},{"line_number":120,"context_line":"the NVDIMM namespaces."},{"line_number":121,"context_line":"A new configuration option \"nvdimm_namespaces\" is introduced which is a list"},{"line_number":122,"context_line":"of strings to accommodate the NVDIMM namespace paths."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Resource Tracking"},{"line_number":125,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_ef1c5e14","line":122,"updated":"2018-09-18 13:26:54.000000000","message":"i do not have a spec for this yet but im writing one ill add you as a reviewer when its up. im going to add pmem namespaces as a use case to it but if you can add more deail as tho how i would whitelist the namespaces when its published that would be useful. ill check the pmem.io website for details.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":119,"context_line":"encouraged to take fragmentation into considerations of admins when creating"},{"line_number":120,"context_line":"the NVDIMM namespaces."},{"line_number":121,"context_line":"A new configuration option \"nvdimm_namespaces\" is introduced which is a list"},{"line_number":122,"context_line":"of strings to accommodate the NVDIMM namespace paths."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Resource Tracking"},{"line_number":125,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_7b8fec74","line":122,"updated":"2018-09-14 16:33:42.000000000","message":"i would prefer not to put this in the nova config. ill add this as a usecase to my yaml device whitelist proposal for cyborg/nova device white listing.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":119,"context_line":"encouraged to take fragmentation into considerations of admins when creating"},{"line_number":120,"context_line":"the NVDIMM namespaces."},{"line_number":121,"context_line":"A new configuration option \"nvdimm_namespaces\" is introduced which is a list"},{"line_number":122,"context_line":"of strings to accommodate the NVDIMM namespace paths."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Resource Tracking"},{"line_number":125,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_adff7564","line":122,"in_reply_to":"3f79a3b5_7b8fec74","updated":"2018-09-16 20:30:07.000000000","message":"do you have spec? I\u0027m glad to check that.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":149,"context_line":" |  Namespace0 |  |  Namespace1 | |  Namespace2 |  |  Namespace3 |"},{"line_number":150,"context_line":" +-------------+  +-------------+ +-------------+  +-------------+"},{"line_number":151,"context_line":""},{"line_number":152,"context_line":"Since NUMA topology has not yet moved to placement [4]_, for the first phase"},{"line_number":153,"context_line":"resource providers for NVDIMM namespaces will be directly nested to the"},{"line_number":154,"context_line":"compute node ROOT resource provider. After the movement of NUMA to resource"},{"line_number":155,"context_line":"providers, the NVDIMM namespace resource providers will be nested to their"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_9b8a0862","line":152,"updated":"2018-09-14 16:33:42.000000000","message":"this should be tracked in the exisiting numa topoloy blob in the db\nand the assignment/fitting should be integrated with the numa topoloy filter.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":149,"context_line":" |  Namespace0 |  |  Namespace1 | |  Namespace2 |  |  Namespace3 |"},{"line_number":150,"context_line":" +-------------+  +-------------+ +-------------+  +-------------+"},{"line_number":151,"context_line":""},{"line_number":152,"context_line":"Since NUMA topology has not yet moved to placement [4]_, for the first phase"},{"line_number":153,"context_line":"resource providers for NVDIMM namespaces will be directly nested to the"},{"line_number":154,"context_line":"compute node ROOT resource provider. After the movement of NUMA to resource"},{"line_number":155,"context_line":"providers, the NVDIMM namespace resource providers will be nested to their"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_4df8217b","line":152,"in_reply_to":"3f79a3b5_9b8a0862","updated":"2018-09-16 20:30:07.000000000","message":"that is next step I think, I want the first step, we attach nvdimm NS RP to the compute node RP. We will care NUMA after we move numa to the placement.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"36c8ca0faf1a11a6dcdff59422ffaef23165e8c4","unresolved":false,"context_lines":[{"line_number":172,"context_line":"Examples::"},{"line_number":173,"context_line":""},{"line_number":174,"context_line":" One NUMA node, one 512GB vNVDIMM:"},{"line_number":175,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":176,"context_line":"     hw:numa_nvdimm \u003d 512"},{"line_number":177,"context_line":""},{"line_number":178,"context_line":" One NUMA node, two 512GB vNVDIMMs:"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_8f15aae8","line":175,"updated":"2018-09-18 13:26:54.000000000","message":"cool in that case if you can add that as an example when you respin this that would be great.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":172,"context_line":"Examples::"},{"line_number":173,"context_line":""},{"line_number":174,"context_line":" One NUMA node, one 512GB vNVDIMM:"},{"line_number":175,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":176,"context_line":"     hw:numa_nvdimm \u003d 512"},{"line_number":177,"context_line":""},{"line_number":178,"context_line":" One NUMA node, two 512GB vNVDIMMs:"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_3be73449","line":175,"updated":"2018-09-14 16:33:42.000000000","message":"if this was consitent with other numa extra specs\nsetting hw:numa_nodes to 2 and hw:numa_nvdimm\u003d512 would result in\n2 numa node each with a 256G vnvdimm devices","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":172,"context_line":"Examples::"},{"line_number":173,"context_line":""},{"line_number":174,"context_line":" One NUMA node, one 512GB vNVDIMM:"},{"line_number":175,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":176,"context_line":"     hw:numa_nvdimm \u003d 512"},{"line_number":177,"context_line":""},{"line_number":178,"context_line":" One NUMA node, two 512GB vNVDIMMs:"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_6df5dd82","line":175,"in_reply_to":"3f79a3b5_3be73449","updated":"2018-09-16 20:30:07.000000000","message":"yes, agree with you.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":210,"context_line":"Live Migration"},{"line_number":211,"context_line":"--------------"},{"line_number":212,"context_line":"Live migration with vNVDIMM is supported by QEMU."},{"line_number":213,"context_line":"This should be transparent to Nova."},{"line_number":214,"context_line":""},{"line_number":215,"context_line":"Virtual NVDIMM Hotplug"},{"line_number":216,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_5bea300e","line":213,"updated":"2018-09-14 16:33:42.000000000","message":"we should document somewhare that livemigration with large pmem\nallocation could take a very long time if a guest has hundreds of\nGigs that need to be copied. As such the live migration timeout\nmay need to be extended and there may be other deployer impacts we\nshould document. there are other livemigration concerns such\nas when is the namespace erased on the source node as we may need\nto role back the migration and we would like to avoid coping the\ndata again.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":210,"context_line":"Live Migration"},{"line_number":211,"context_line":"--------------"},{"line_number":212,"context_line":"Live migration with vNVDIMM is supported by QEMU."},{"line_number":213,"context_line":"This should be transparent to Nova."},{"line_number":214,"context_line":""},{"line_number":215,"context_line":"Virtual NVDIMM Hotplug"},{"line_number":216,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_cd2991f2","line":213,"in_reply_to":"3f79a3b5_5bea300e","updated":"2018-09-16 20:30:07.000000000","message":"yes, we will give some test first, I like to put some real test result at here.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"36c8ca0faf1a11a6dcdff59422ffaef23165e8c4","unresolved":false,"context_lines":[{"line_number":230,"context_line":"Data model impact"},{"line_number":231,"context_line":"-----------------"},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"InstanceNUMACell object needs to be updated with::"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_af12e6fb","line":233,"updated":"2018-09-18 13:26:54.000000000","message":"im not really sure its a good idea to not do numa affintiy in the first step. That will force the need for a reshaper job and it will likely result in cross qpi acess to the nvdimm memory severly reducing performance.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":230,"context_line":"Data model impact"},{"line_number":231,"context_line":"-----------------"},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"InstanceNUMACell object needs to be updated with::"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_fbf0dcff","line":233,"updated":"2018-09-14 16:33:42.000000000","message":"we need to modify the existing host numa topology blob to store\nthe namespaces.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"40f1d14bc8d1dea1cd223c2fffdab116f605e0e9","unresolved":false,"context_lines":[{"line_number":230,"context_line":"Data model impact"},{"line_number":231,"context_line":"-----------------"},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"InstanceNUMACell object needs to be updated with::"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_4da20f9c","line":233,"in_reply_to":"3f79a3b5_af12e6fb","updated":"2018-09-20 07:05:18.000000000","message":"It is just a matter of the tactics of how we conquer the dependency on NUMA affinity while it\u0027s being moved to placement.  Having an implementation on the current NUMA affinity will burden the workload of moving it to placement. If that finishes leaving adequate amount of time for us, the second step will be taken in this release also.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":230,"context_line":"Data model impact"},{"line_number":231,"context_line":"-----------------"},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"InstanceNUMACell object needs to be updated with::"},{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_8d2f19f4","line":233,"in_reply_to":"3f79a3b5_fbf0dcff","updated":"2018-09-16 20:30:07.000000000","message":"no, just like I said, that is second step.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"cf6195a1d079fb07c9080ae00a0495975567027f","unresolved":false,"context_lines":[{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"},{"line_number":237,"context_line":" +       \u0027allocated_nvdimms\u0027: obj_fields.ListOfStringsField(nullable\u003dTrue),"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":"Field \u0027nvdimms\u0027 is to track the number of vNVDIMM."},{"line_number":240,"context_line":"Field \u0027allocated_nvdimms\u0027 is to track the host NVDIMM namespaces that the"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_d03724af","line":237,"range":{"start_line":237,"start_character":9,"end_line":237,"end_character":75},"updated":"2018-09-12 20:36:37.000000000","message":"we don\u0027t need this.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"7cfc67ec0649d0b39412a0dcfe43cbafd53ca60a","unresolved":false,"context_lines":[{"line_number":234,"context_line":""},{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"},{"line_number":237,"context_line":" +       \u0027allocated_nvdimms\u0027: obj_fields.ListOfStringsField(nullable\u003dTrue),"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":"Field \u0027nvdimms\u0027 is to track the number of vNVDIMM."},{"line_number":240,"context_line":"Field \u0027allocated_nvdimms\u0027 is to track the host NVDIMM namespaces that the"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_c4ed11f1","line":237,"range":{"start_line":237,"start_character":9,"end_line":237,"end_character":75},"in_reply_to":"3f79a3b5_d03724af","updated":"2018-09-13 21:23:41.000000000","message":"Since each namespace is a resource provider, after placement choice a resource provider, we already know which resource provider assigned to the VM, and the libvirt XML will tell us which namespace assigned to also.\n\nWe only need to know the guest nvdimm request, that is done by the line 236.\n\nMaybe we need to have object instead of a list of integer.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"},{"line_number":237,"context_line":" +       \u0027allocated_nvdimms\u0027: obj_fields.ListOfStringsField(nullable\u003dTrue),"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":"Field \u0027nvdimms\u0027 is to track the number of vNVDIMM."},{"line_number":240,"context_line":"Field \u0027allocated_nvdimms\u0027 is to track the host NVDIMM namespaces that the"},{"line_number":241,"context_line":"guest is using."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_1bf4b8f0","line":238,"updated":"2018-09-14 16:33:42.000000000","message":"each namespace should not be a resouce provider. we should have a resour provider per numa node but if the namespaces are the same size and on the same numanode they should be in a sigle resouce provider with an inventory of multiple nvdimm namespaces","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":235,"context_line":" InstanceNUMACell"},{"line_number":236,"context_line":" +       \u0027nvdimms\u0027: obj_fields.ListOfIntegersField(nullable\u003dTrue),"},{"line_number":237,"context_line":" +       \u0027allocated_nvdimms\u0027: obj_fields.ListOfStringsField(nullable\u003dTrue),"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":"Field \u0027nvdimms\u0027 is to track the number of vNVDIMM."},{"line_number":240,"context_line":"Field \u0027allocated_nvdimms\u0027 is to track the host NVDIMM namespaces that the"},{"line_number":241,"context_line":"guest is using."}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_4d3d812a","line":238,"in_reply_to":"3f79a3b5_1bf4b8f0","updated":"2018-09-16 20:30:07.000000000","message":"We have to make the namespace to be a resource provier. Since we want to allocate the whole namespace to a guest. And the size of namespace shouldn\u0027t be the same, it can be different. Sorry for this spec doesn\u0027t clean about that.\n\nEven the namespace is same size, I don\u0027t want to put them into single RP with inventory. Because I don\u0027t want the user use trait like \"NVDIMM_SIZE_BIG, NVDIMM_SIZE_SMALL\" to request different size nvdimm on different host.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":251,"context_line":""},{"line_number":252,"context_line":"Host NVDIMM namespaces needs to be erased (zeroed) to be reused. This is"},{"line_number":253,"context_line":"the default behavior. However, we introduced a config option to bypass"},{"line_number":254,"context_line":"it if the cloud admin decides it will not be a security concern."},{"line_number":255,"context_line":""},{"line_number":256,"context_line":"Notifications impact"},{"line_number":257,"context_line":"--------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_bbda6476","line":254,"updated":"2018-09-14 16:33:42.000000000","message":"this may make reuse of nvdimm namespaces faster but im not sure it\nworth the extra deployer complexity. i also think that if this is added it should be\na image metatadata option not a config option so the tenant can say if its\nok to not erase the nvdimm region or not.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":251,"context_line":""},{"line_number":252,"context_line":"Host NVDIMM namespaces needs to be erased (zeroed) to be reused. This is"},{"line_number":253,"context_line":"the default behavior. However, we introduced a config option to bypass"},{"line_number":254,"context_line":"it if the cloud admin decides it will not be a security concern."},{"line_number":255,"context_line":""},{"line_number":256,"context_line":"Notifications impact"},{"line_number":257,"context_line":"--------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_0d4309b2","line":254,"in_reply_to":"3f79a3b5_bbda6476","updated":"2018-09-16 20:30:07.000000000","message":"thanks, I also concern the speed we zero the device, I like your idea.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":268,"context_line":""},{"line_number":269,"context_line":"NVDIMMs tend to be large. Zeroing out a NVDIMM namespace requires"},{"line_number":270,"context_line":"a considerable amount of time. This may introduce a negative performance"},{"line_number":271,"context_line":"impact when deleting a guest with a large vNVDIMM."},{"line_number":272,"context_line":""},{"line_number":273,"context_line":"Other deployer impact"},{"line_number":274,"context_line":"---------------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_dbd5e048","line":271,"updated":"2018-09-14 16:33:42.000000000","message":"the same consideration exists for live migration.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"50185ed78a8c18a5853d481396a6f1fb89bfb833","unresolved":false,"context_lines":[{"line_number":273,"context_line":"Other deployer impact"},{"line_number":274,"context_line":"---------------------"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":"The deployer needs to pre-create the NVDIMM namespaces."},{"line_number":277,"context_line":""},{"line_number":278,"context_line":"Developer impact"},{"line_number":279,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_7be4cc3c","line":276,"updated":"2018-09-14 16:33:42.000000000","message":"we could have nova compute virt driver option\nto create the pmem namesapces if not present.\nthis feature also requires a specifc qemu and libvirt version\ncan you document which release it was added in.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"bf3c77286f7e5c09d6b38069efd8a5b24a0e1e5a","unresolved":false,"context_lines":[{"line_number":273,"context_line":"Other deployer impact"},{"line_number":274,"context_line":"---------------------"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":"The deployer needs to pre-create the NVDIMM namespaces."},{"line_number":277,"context_line":""},{"line_number":278,"context_line":"Developer impact"},{"line_number":279,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_cd12f1b4","line":276,"in_reply_to":"3f79a3b5_7be4cc3c","updated":"2018-09-16 20:30:07.000000000","message":"yup, we should","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"7add92a95c91d3db5ca7c6b0b2ac62386b0d8433","unresolved":false,"context_lines":[{"line_number":317,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":318,"context_line":""},{"line_number":319,"context_line":"Real hardware is not necessary since QEMU actually supports using files"},{"line_number":320,"context_line":"as the backend of virtual NVDIMM devices."},{"line_number":321,"context_line":""},{"line_number":322,"context_line":"Documentation Impact"},{"line_number":323,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_bb1fc407","line":320,"updated":"2018-09-14 16:37:35.000000000","message":"we will need to have an extra spec to say if it should be file backed or hardware backed also.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"36c8ca0faf1a11a6dcdff59422ffaef23165e8c4","unresolved":false,"context_lines":[{"line_number":317,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":318,"context_line":""},{"line_number":319,"context_line":"Real hardware is not necessary since QEMU actually supports using files"},{"line_number":320,"context_line":"as the backend of virtual NVDIMM devices."},{"line_number":321,"context_line":""},{"line_number":322,"context_line":"Documentation Impact"},{"line_number":323,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_4fd1d22e","line":320,"updated":"2018-09-18 13:26:54.000000000","message":"yes i think if we plan to test nvdimm support in the upstream ci at some point we will need the filebackend as we are unlikely to have acess to nvdimm flavors form our ci providers. on the thirdparty ci intel can use either real hardare or faked file backends  but long term if we can test this upstream that would be better.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"40f1d14bc8d1dea1cd223c2fffdab116f605e0e9","unresolved":false,"context_lines":[{"line_number":317,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":318,"context_line":""},{"line_number":319,"context_line":"Real hardware is not necessary since QEMU actually supports using files"},{"line_number":320,"context_line":"as the backend of virtual NVDIMM devices."},{"line_number":321,"context_line":""},{"line_number":322,"context_line":"Documentation Impact"},{"line_number":323,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_2de79311","line":320,"in_reply_to":"3f79a3b5_4fd1d22e","updated":"2018-09-20 07:05:18.000000000","message":"I agree. We can have the same bunch of test cases run in both upstream CIs and third party CI with different configurations -- file backend for upstream CIs and real hardware for third party CI.","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"1fc532fbc8545c5fe99bf0a79ca6d3ac49092d87","unresolved":false,"context_lines":[{"line_number":317,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":318,"context_line":""},{"line_number":319,"context_line":"Real hardware is not necessary since QEMU actually supports using files"},{"line_number":320,"context_line":"as the backend of virtual NVDIMM devices."},{"line_number":321,"context_line":""},{"line_number":322,"context_line":"Documentation Impact"},{"line_number":323,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":3,"id":"3f79a3b5_5ef8c57a","line":320,"in_reply_to":"3f79a3b5_bb1fc407","updated":"2018-09-17 07:15:19.000000000","message":"This spec only seeks to expose hardware NVDIMM namespaces to VMs. Do you see a requirement to support vNVIDIMM devices backed by files not residing on real NVDIMM?\nThis sentence is put here only to make go for a certain of non-hardware-centric test cases, even though a hardware CI is in plan","commit_id":"d35eef01d920c3b8834c696b8ea90e78a0fea0f0"}],"specs/stein/approved/virtual-persistent-memory.rst":[{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_e2cf6f23","line":13,"range":{"start_line":13,"start_character":26,"end_line":13,"end_character":50},"updated":"2018-12-09 17:55:17.000000000","message":"s/has already been enabled/is now supported in/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_928e0223","line":13,"range":{"start_line":13,"start_character":26,"end_line":13,"end_character":50},"in_reply_to":"3f79a3b5_81079615","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":10,"context_line":""},{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_81079615","line":13,"range":{"start_line":13,"start_character":26,"end_line":13,"end_character":50},"in_reply_to":"3f79a3b5_e2cf6f23","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_a2d5f7b6","line":14,"range":{"start_line":14,"start_character":25,"end_line":14,"end_character":66},"updated":"2018-12-09 17:55:17.000000000","message":"s/the support for virtual persistent memory in/to enable this support in/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_b291be46","line":14,"range":{"start_line":14,"start_character":25,"end_line":14,"end_character":66},"in_reply_to":"3f79a3b5_21124257","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":11,"context_line":"https://blueprints.launchpad.net/nova/+spec/virtual-persistent-memory"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"Virtual persistent memory has already been enabled in both QEMU and"},{"line_number":14,"context_line":"libvirt. This spec seeks the support for virtual persistent memory"},{"line_number":15,"context_line":"in OpenStack Nova."},{"line_number":16,"context_line":""},{"line_number":17,"context_line":"Problem description"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_21124257","line":14,"range":{"start_line":14,"start_character":25,"end_line":14,"end_character":66},"in_reply_to":"3f79a3b5_a2d5f7b6","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"For many years computer applications organized their data between"},{"line_number":21,"context_line":"two tiers: memory and storage. The emerging `persistent memory`_"},{"line_number":22,"context_line":"technologies introduce a third tier. Persistent memory"},{"line_number":23,"context_line":"(or PMEM for short) is accessed like volatile memory, using processor"},{"line_number":24,"context_line":"load and store instructions, but it retains its contents across power"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_42eae376","line":21,"range":{"start_line":21,"start_character":31,"end_line":21,"end_character":43},"updated":"2018-12-09 17:55:17.000000000","message":"s/The emerging/Emerging/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"For many years computer applications organized their data between"},{"line_number":21,"context_line":"two tiers: memory and storage. The emerging `persistent memory`_"},{"line_number":22,"context_line":"technologies introduce a third tier. Persistent memory"},{"line_number":23,"context_line":"(or PMEM for short) is accessed like volatile memory, using processor"},{"line_number":24,"context_line":"load and store instructions, but it retains its contents across power"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_529cea5a","line":21,"range":{"start_line":21,"start_character":31,"end_line":21,"end_character":43},"in_reply_to":"3f79a3b5_410dfe30","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":18,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":19,"context_line":""},{"line_number":20,"context_line":"For many years computer applications organized their data between"},{"line_number":21,"context_line":"two tiers: memory and storage. The emerging `persistent memory`_"},{"line_number":22,"context_line":"technologies introduce a third tier. Persistent memory"},{"line_number":23,"context_line":"(or PMEM for short) is accessed like volatile memory, using processor"},{"line_number":24,"context_line":"load and store instructions, but it retains its contents across power"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_410dfe30","line":21,"range":{"start_line":21,"start_character":31,"end_line":21,"end_character":43},"in_reply_to":"3f79a3b5_42eae376","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":25,"context_line":"loss like storage."},{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":28,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_9dd7fc4d","line":28,"range":{"start_line":28,"start_character":62,"end_line":28,"end_character":66},"updated":"2018-12-09 17:55:17.000000000","message":"s/virtual PMEM/guest memory/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":25,"context_line":"loss like storage."},{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":28,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_41265eb5","line":28,"range":{"start_line":28,"start_character":62,"end_line":28,"end_character":66},"in_reply_to":"3f79a3b5_9dd7fc4d","updated":"2018-12-10 03:31:16.000000000","message":"The intention of this spec is not enabling PMEM namespace as regular guest memory. QEMU has already virtualized Persistent Memory, which means the exposed host PMEM namespace will be used as the backend of the virtual Persistent Memory in guest. In other words, the guest will see a PMEM device and can use it in PMEM\u0027s unique usage model.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":28,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""},{"line_number":32,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_3dcca8bc","line":29,"range":{"start_line":29,"start_character":10,"end_line":29,"end_character":29},"updated":"2018-12-09 17:55:17.000000000","message":"not sure this is the appropriate term for what you\u0027re describing here, especially considering in OpenStack we generally refer to things like Heat and Mistral as \"the orchestration layer\".","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":28,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""},{"line_number":32,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_c1682ebc","line":29,"range":{"start_line":29,"start_character":10,"end_line":29,"end_character":29},"in_reply_to":"3f79a3b5_3dcca8bc","updated":"2018-12-10 03:31:16.000000000","message":"OK, to be specific, will change it to Nova?","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":26,"context_line":""},{"line_number":27,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":28,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""},{"line_number":32,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_329dae54","line":29,"range":{"start_line":29,"start_character":10,"end_line":29,"end_character":29},"in_reply_to":"3f79a3b5_c1682ebc","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""},{"line_number":32,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"},{"line_number":33,"context_line":" * The discovery and resource tracking of PMEM"},{"line_number":34,"context_line":" * How does the user specify the desired amount of virtual PMEM and"},{"line_number":35,"context_line":"   other attributes"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_fdc15084","line":32,"range":{"start_line":32,"start_character":53,"end_line":32,"end_character":65},"updated":"2018-12-09 17:55:17.000000000","message":"s/virtual PMEM/memory to guests/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":29,"context_line":"As far as orchestration layer is concerned, several problems needs"},{"line_number":30,"context_line":"to be addressed:"},{"line_number":31,"context_line":""},{"line_number":32,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"},{"line_number":33,"context_line":" * The discovery and resource tracking of PMEM"},{"line_number":34,"context_line":" * How does the user specify the desired amount of virtual PMEM and"},{"line_number":35,"context_line":"   other attributes"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_61635adf","line":32,"range":{"start_line":32,"start_character":53,"end_line":32,"end_character":65},"in_reply_to":"3f79a3b5_fdc15084","updated":"2018-12-10 03:31:16.000000000","message":"See the reply to L28","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":39,"context_line":"---------"},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":42,"context_line":"address space and direct access the PMEM. This is the primary use case"},{"line_number":43,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_ddba540a","line":42,"range":{"start_line":42,"start_character":18,"end_line":42,"end_character":24},"updated":"2018-12-09 17:55:17.000000000","message":"s/direct/directly/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":39,"context_line":"---------"},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":42,"context_line":"address space and direct access the PMEM. This is the primary use case"},{"line_number":43,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_d2b1dae4","line":42,"range":{"start_line":42,"start_character":18,"end_line":42,"end_character":24},"in_reply_to":"3f79a3b5_2159e228","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":39,"context_line":"---------"},{"line_number":40,"context_line":""},{"line_number":41,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":42,"context_line":"address space and direct access the PMEM. This is the primary use case"},{"line_number":43,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_2159e228","line":42,"range":{"start_line":42,"start_character":18,"end_line":42,"end_character":24},"in_reply_to":"3f79a3b5_ddba540a","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":48,"context_line":"in the same way as traditional block devices but with better"},{"line_number":49,"context_line":"performance."},{"line_number":50,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_9db05c2a","line":47,"range":{"start_line":47,"start_character":33,"end_line":47,"end_character":37},"updated":"2018-12-09 17:55:17.000000000","message":"s/into/as/","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":48,"context_line":"in the same way as traditional block devices but with better"},{"line_number":49,"context_line":"performance."},{"line_number":50,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_92a7e2a4","line":47,"range":{"start_line":47,"start_character":33,"end_line":47,"end_character":37},"in_reply_to":"3f79a3b5_41541ef1","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":44,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":45,"context_line":"oracle_."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":48,"context_line":"in the same way as traditional block devices but with better"},{"line_number":49,"context_line":"performance."},{"line_number":50,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_41541ef1","line":47,"range":{"start_line":47,"start_character":33,"end_line":47,"end_character":37},"in_reply_to":"3f79a3b5_9db05c2a","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":45,"context_line":"oracle_."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":48,"context_line":"in the same way as traditional block devices but with better"},{"line_number":49,"context_line":"performance."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"},{"line_number":52,"context_line":"    This feature only works with libvirt driver."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_3d3108bc","line":49,"range":{"start_line":48,"start_character":54,"end_line":49,"end_character":12},"updated":"2018-12-09 17:55:17.000000000","message":"better performance than SSDs? or better performance than HDDs?","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":45,"context_line":"oracle_."},{"line_number":46,"context_line":""},{"line_number":47,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":48,"context_line":"in the same way as traditional block devices but with better"},{"line_number":49,"context_line":"performance."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"},{"line_number":52,"context_line":"    This feature only works with libvirt driver."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_c1c4ae95","line":49,"range":{"start_line":48,"start_character":54,"end_line":49,"end_character":12},"in_reply_to":"3f79a3b5_3d3108bc","updated":"2018-12-10 03:31:16.000000000","message":"Better than SSDs. The hardware interface is DIMM.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":57,"context_line":"Background"},{"line_number":58,"context_line":"----------"},{"line_number":59,"context_line":"PMEM devices can be partitioned into `PMEM namespaces`_ which is the only"},{"line_number":60,"context_line":"way for software to use. There are two hardware working types of PMEM --"},{"line_number":61,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_5d3404ab","line":60,"range":{"start_line":60,"start_character":17,"end_line":60,"end_character":23},"updated":"2018-12-09 17:55:17.000000000","message":"to use the PMEM","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":57,"context_line":"Background"},{"line_number":58,"context_line":"----------"},{"line_number":59,"context_line":"PMEM devices can be partitioned into `PMEM namespaces`_ which is the only"},{"line_number":60,"context_line":"way for software to use. There are two hardware working types of PMEM --"},{"line_number":61,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_61bfdafc","line":60,"range":{"start_line":60,"start_character":17,"end_line":60,"end_character":23},"in_reply_to":"3f79a3b5_5d3404ab","updated":"2018-12-10 03:31:16.000000000","message":"Will reword in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":57,"context_line":"Background"},{"line_number":58,"context_line":"----------"},{"line_number":59,"context_line":"PMEM devices can be partitioned into `PMEM namespaces`_ which is the only"},{"line_number":60,"context_line":"way for software to use. There are two hardware working types of PMEM --"},{"line_number":61,"context_line":"\"persistent memory\" (in short \"pmem\") and \"block window\" (in short \"blk\")."},{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_72c0062d","line":60,"range":{"start_line":60,"start_character":17,"end_line":60,"end_character":23},"in_reply_to":"3f79a3b5_61bfdafc","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."},{"line_number":64,"context_line":""},{"line_number":65,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":66,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":67,"context_line":"can guarantee the guest write persistence is the device DAX on the real"},{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_7d4a2025","line":69,"range":{"start_line":65,"start_character":0,"end_line":69,"end_character":27},"updated":"2018-12-09 17:55:17.000000000","message":"this sentence needs rewriting. It assumes the reader understands what DAX is and how a \"host-side kernel cache\" works.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."},{"line_number":64,"context_line":""},{"line_number":65,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":66,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":67,"context_line":"can guarantee the guest write persistence is the device DAX on the real"},{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_c1526ed9","line":69,"range":{"start_line":65,"start_character":0,"end_line":69,"end_character":27},"in_reply_to":"3f79a3b5_7d4a2025","updated":"2018-12-10 03:31:16.000000000","message":"Will change to -- \"currently the only one that can guarantee the guest write persistence is PMEM namespaces (acces method--\"pmem\") in \"devdax\" mode\".\nWill also reword the \"two hardware working types\" to \"two access methods\"","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":62,"context_line":"Each type can be further partitioned into namespaces in \"raw\", \"sector\","},{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."},{"line_number":64,"context_line":""},{"line_number":65,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":66,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":67,"context_line":"can guarantee the guest write persistence is the device DAX on the real"},{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_12bbd2be","line":69,"range":{"start_line":65,"start_character":0,"end_line":69,"end_character":27},"in_reply_to":"3f79a3b5_c1526ed9","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_9d45bc15","line":72,"range":{"start_line":72,"start_character":11,"end_line":72,"end_character":18},"updated":"2018-12-09 17:55:17.000000000","message":"virtual","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_c1390ea1","line":72,"range":{"start_line":72,"start_character":11,"end_line":72,"end_character":18},"in_reply_to":"3f79a3b5_9d45bc15","updated":"2018-12-10 03:31:16.000000000","message":"will correct in the next patchset.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_32b60ed7","line":72,"range":{"start_line":72,"start_character":11,"end_line":72,"end_character":18},"in_reply_to":"3f79a3b5_c1390ea1","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_3d5a68f4","line":74,"range":{"start_line":71,"start_character":0,"end_line":74,"end_character":19},"updated":"2018-12-09 17:55:17.000000000","message":"A link to some documentation that explains what all the above means would be useful.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"64fa8f08f7f6201a16097f0908756f63cfadecdb","unresolved":false,"context_lines":[{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_f2d97626","line":74,"range":{"start_line":71,"start_character":0,"end_line":74,"end_character":19},"in_reply_to":"3f79a3b5_019e2667","updated":"2018-12-10 07:37:38.000000000","message":"Done","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":68,"context_line":"PMEM device (e.g., /dev/dax0.0), to which all guest access do not involve"},{"line_number":69,"context_line":"any host-side kernel cache."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":72,"context_line":"support of virutal PMEM backed by physical PMEM \"persistent memory\""},{"line_number":73,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":74,"context_line":"(e.g. /dev/daxX.Y)."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_019e2667","line":74,"range":{"start_line":71,"start_character":0,"end_line":74,"end_character":19},"in_reply_to":"3f79a3b5_3d5a68f4","updated":"2018-12-10 03:31:16.000000000","message":"Will add this link https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Eventually each PMEM namespace will be represented by a nested resource"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_1d53ecde","line":123,"range":{"start_line":123,"start_character":4,"end_line":123,"end_character":17},"updated":"2018-12-09 17:55:17.000000000","message":"it cannot be a single generic PMEM resource, since above in the PMEM namespace management section you just said that a PMEM namespace is actually the resource that will be consumed by a user/instance, and those namespaces will have a specific size associated with them.\n\nTherefore, a custom resource class should be used that indicates the size of the namespace. For example, CUSTOM_PMEM_100GB. Either that, or the resource needs to actually be consumable in dynamically-assigned number of bytes, which it sounds like is not possible due to the hardware design of PMEM. :(","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"57038034f061750a3517bbf5ab45aaf40d2a729d","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Eventually each PMEM namespace will be represented by a nested resource"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_7e6bf7ec","line":123,"range":{"start_line":123,"start_character":4,"end_line":123,"end_character":17},"in_reply_to":"3f79a3b5_1d53ecde","updated":"2018-12-10 02:43:34.000000000","message":"Jay, I see your point, that makes sense also. In the initial, I\u0027m trying to avoid coding the `size` into the RC name, just like we avoid coding the size(quantitative) into the traits name also. Also try to make the PMEM_GB as standard RC(I guess we prefer standard resource than custom resource), with the `size` inside the RC name, it must be the custom RC. The downside I can see, the admin only can found which size they can request by looking into the \u0027min_size\u0027 and \u0027max_size\u0027 for each inventory. I\u0027m just hard to get enough reasons to choose one of this two ways.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Eventually each PMEM namespace will be represented by a nested resource"}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_be7bcfef","line":123,"range":{"start_line":123,"start_character":4,"end_line":123,"end_character":17},"in_reply_to":"3f79a3b5_1d53ecde","updated":"2018-12-10 03:31:16.000000000","message":"Maybe it was the problem of my expression :)\nThe PMEM namespaces won\u0027t be tracked by a single \"PMEM resource\". Instead, as you pointed out, one PMEM namespace will be one resource provider with Resource Class as \"VPMEM_GB\" or a customized resource class as \"CUSTOM_VPMEM_GB\".\nHowever, we guarantee each PMEM namespace be allocated at a time by setting the values of\n   \"max_size\"\n   \"min_size\",\n   \"total_size\"\nall to the real size of the PMEM namespace. i.e., max_size \u003d min_size \u003d total_size \u003d 100 in your example.\nWill this work? If yes, maybe we can save the \"XXX\" in \"CUSTOM_VPMEM_XXX_GB\" since the size is already tracked by \"xxx_size\" of the resource provider.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"91f9039e5a7d9f804bff80602961a4fb79118f96","unresolved":false,"context_lines":[{"line_number":177,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":178,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":179,"context_line":""},{"line_number":180,"context_line":"The resource class (RC) for PMEM devices are defined as \"VPMEM_GB\"."},{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_bd6d189b","line":181,"range":{"start_line":180,"start_character":0,"end_line":181,"end_character":61},"updated":"2018-12-09 17:55:17.000000000","message":"see above. you can\u0027t have it called VPMEM_GB since the PMEM namespace is a specific, immutable size of GB, and is consumed all or nothing, therefore you need to create multiple custom resource classes called CUSTOM_PMEM_XXXGB.","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3a4f57351bf8b16555eea2d46c534e600cd531a9","unresolved":false,"context_lines":[{"line_number":177,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":178,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":179,"context_line":""},{"line_number":180,"context_line":"The resource class (RC) for PMEM devices are defined as \"VPMEM_GB\"."},{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."}],"source_content_type":"text/x-rst","patch_set":4,"id":"3f79a3b5_a1fe5200","line":181,"range":{"start_line":180,"start_character":0,"end_line":181,"end_character":61},"in_reply_to":"3f79a3b5_bd6d189b","updated":"2018-12-10 03:31:16.000000000","message":"Can VPMEM_GB be made a standard RC so that it can be used by every RP representing a PMEM namespace?","commit_id":"de2234bb1812a2b2751a38563f618a9ac090b410"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":175,"context_line":"resource providers will be nested to their corresponding NUMA cell"},{"line_number":176,"context_line":"resource providers. This also means that for the first phase,"},{"line_number":177,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":178,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":179,"context_line":""},{"line_number":180,"context_line":"The resource class (RC) for PMEM devices are defined as \"VPMEM_GB\"."},{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_d03162a4","line":178,"updated":"2019-01-02 15:52:48.000000000","message":"this will be done the /reshaper , presumably?","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":175,"context_line":"resource providers will be nested to their corresponding NUMA cell"},{"line_number":176,"context_line":"resource providers. This also means that for the first phase,"},{"line_number":177,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":178,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":179,"context_line":""},{"line_number":180,"context_line":"The resource class (RC) for PMEM devices are defined as \"VPMEM_GB\"."},{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_5bd2af36","line":178,"in_reply_to":"1f769fc5_d03162a4","updated":"2019-01-03 04:46:15.000000000","message":"Yes, it would be ideal that the numa reshape work takes this. However we are also glad to make it a follow up patch to the numa reshape work.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"ca1ca8f943f69ebf6084ad5a1fce38f4c53a6537","unresolved":false,"context_lines":[{"line_number":175,"context_line":"resource providers will be nested to their corresponding NUMA cell"},{"line_number":176,"context_line":"resource providers. This also means that for the first phase,"},{"line_number":177,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":178,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":179,"context_line":""},{"line_number":180,"context_line":"The resource class (RC) for PMEM devices are defined as \"VPMEM_GB\"."},{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_66d27637","line":178,"in_reply_to":"ffd0ebdf_5bd2af36","updated":"2019-01-03 06:34:31.000000000","message":"Yes, probably we just mention this is done by the /reshape API in the future in this spec.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_d063a2b0","line":184,"updated":"2019-01-02 15:52:48.000000000","message":"This modelling could potentially work, but you have to be able to guarantee that there is a single authority for max, min and total for the PMEM namespaces and other than authority will every manipulate those values. That\u0027s not something the placement itself can guarantee (nor should it).\n\nIt\u0027s probably also worth mentioning here that allocation_ratio must always be 1.0.\n\nA benefit of this model is we can use a single standard resource class, and there is no forced constraints are the namespace sizes.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_9bd9f723","line":184,"in_reply_to":"1f769fc5_d063a2b0","updated":"2019-01-03 04:46:15.000000000","message":"Yes, the single authority is the libvirt driver who is responsible for reporting the PMEM resources. You may also want to refer to the libvirt driver spec https://review.openstack.org/#/c/622893/.\n\nBased on the discussion on the mailing list, we are changing this design to adopt the custom resource class approach as mentioned by Jay Pipes.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"5b424e1e0b7b8437b055a61511b2e6347b8ae3db","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_cf8d727c","line":184,"in_reply_to":"ffd0ebdf_3ca47e08","updated":"2019-01-03 13:15:30.000000000","message":"To be absolutely clear, I *also* have a general preference for using non-custom resource classes.\n\nIt would have been great if the PMEM could have been just another block device-like resource that functioned like DISK_GB does. Then, a user could have said \"I want 200GB of persistent memory\" and we could have had a PMEM_GB (or PMEM_BYTE) resource class.\n\nThis seems to be what the \"block mode\" in the original PMEM spec was all about, as opposed to the \"Persistent Memory Namespace\" mode that is used here in this spec. The PMEM namespace is not a generic resource. The \"resource\" is the pre-built, pre-cleaned/wiped, pre-sized namespace. It\u0027s not just a bunch of GB of persistent memory. The resource *is* the namespace itself...\n\nNow, if you all want to scrap the PMEM namespace mode and use the block mode translation systems described in the original Intel PMEM spec (http://pmem.io/documents/NVDIMM_Namespace_Spec.pdf), I think that would be more in-line with a generic PMEM_BYTE resource class. But that would require not using all the namespace management software that has been built, right?","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"ed8373e5e911f87acdab16db47889414272fbfd0","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_3ca47e08","line":184,"in_reply_to":"ffd0ebdf_86d75a24","updated":"2019-01-03 12:04:46.000000000","message":"If the type of thing being provided (and the way we want to make it visible) is a PMEM namespace of a specific size, and there is a limited number of those types, then Jay\u0027s suggestion makes fine sense, especially as user requests don\u0027t need to express the custom resource class, nova is doing that for us.\n\nMy interest in the VPMEM_GB style was that it could allow a more direct expression of user requirements, via an inventory request in the flavor: VPMEM_GB\u003d2 and the details of how the namespaces were managed could be hidden under the covers.\n\nBut that hiding is probably not desirable.\n\nI think either could work and don\u0027t have a particular preference.\n\nI have a tendency to want to avoid custom resource classes, because that can introduce interoperability concerns between deployments, but in this case it does not because the custom classes are no visible externally.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"ca1ca8f943f69ebf6084ad5a1fce38f4c53a6537","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_86d75a24","line":184,"in_reply_to":"ffd0ebdf_9bd9f723","updated":"2019-01-03 06:34:31.000000000","message":"@Chris, also interesting your prefer on single RC or the way Jay said.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"5454f5dabc481990c79b1c37ad534bfacb7ec173","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_4fa3c250","line":184,"in_reply_to":"ffd0ebdf_cf8d727c","updated":"2019-01-03 14:00:53.000000000","message":"@Chris, thanks for help me think about this clear. Agree with you, there won\u0027t be too much RC in the real deployment, so that should be fine. And even we use VPMEM_GB\u003d2, that still have discoverable problem, like the we only have 2GB namespace, but the end user request VPMEM_GB\u003d3, that will fail, and there is no way for the end user to know which value he can request.\n\n@Jay, the PMEM namespace is the only way suggest by the qemu. The way you expect and we expect in the initially is build a filesystem on those PMEM namespace, unfortunately, there is bug in the virtualization layer (or kernel? I can\u0027t remember the detail now, @Rui can give better explain). So this is only way suggested by the qemu. I think @Rui can help me answer more on this.\n\nactually, we are already working on the new version. based on custom RC. thanks for response those discussion!","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"494b3b9fb606b2fe01a57d624d639ec1e8c9c337","unresolved":false,"context_lines":[{"line_number":181,"context_line":"Each PMEM namespace resource provider is consumed as a whole."},{"line_number":182,"context_line":"So the values for \"max_size\", \"min_size\" and \"total_size\" of each PMEM"},{"line_number":183,"context_line":"namespace resource provider all equal to the real size of the physical"},{"line_number":184,"context_line":"PMEM namespace. Different PMEM namespaces can be of different sizes."},{"line_number":185,"context_line":""},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Virtual PMEM guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_d24439ba","line":184,"in_reply_to":"ffd0ebdf_cf8d727c","updated":"2019-01-03 14:21:38.000000000","message":"Hi Jay, the term -- \"block mode\" may have different meanings regarding different layers. \n1) it may refer to creating a PMEM namespace in `fsdev` mode, from which a \"block device\" is created with the PMEM namespace as the backend. And file systems can be created on top of the \"block device\". Applications can map files on the filesystem into their memory spaces. If the file system is capable of \"direct access\" -- \"DAX\" attribute, currently EXT4 and XFS IIRC --  then the applications\u0027 access to the hardware PMEM is direct access (meaning just one instruction). This is the perfect usage model so far. However this usage model was reported to have severe issues when the VMs having hardware pass-ed through do DMA operations. So the virtualization layer does not recommend us to use this model.\n\n2) it may refer to one of the underlying hardware access method called \"block window\" or \"blk\" in this spec. When access using \"block window\" method, the PMEM hardware maintains an aperture to the internal memory space. Thus each access may involve setting up the aperture (register operations). This is hardly \"direct access\" and does not render the best performance.\n\nSo we are advised by the virtualization people to use the model as proposed in this spec.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":218,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":219,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":220,"context_line":""},{"line_number":221,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":222,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":223,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":224,"context_line":""}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_50e23203","line":221,"range":{"start_line":221,"start_character":1,"end_line":221,"end_character":74},"updated":"2019-01-02 15:52:48.000000000","message":"who/what will do that translation? are you thinking request filters, or some other mechanism?","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":218,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":219,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":220,"context_line":""},{"line_number":221,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":222,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":223,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":224,"context_line":""}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_7b18d3d2","line":221,"range":{"start_line":221,"start_character":1,"end_line":221,"end_character":74},"in_reply_to":"1f769fc5_50e23203","updated":"2019-01-03 04:46:15.000000000","message":"The implementation is in nova/scheduler/utils.py . We introduced several new functions to ResourceRequest class to help convert from flavor extra specs to resource request together with other types of resource request conversion.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"ca1ca8f943f69ebf6084ad5a1fce38f4c53a6537","unresolved":false,"context_lines":[{"line_number":218,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":219,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":220,"context_line":""},{"line_number":221,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":222,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":223,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":224,"context_line":""}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_66a036a7","line":221,"range":{"start_line":221,"start_character":1,"end_line":221,"end_character":74},"in_reply_to":"ffd0ebdf_7b18d3d2","updated":"2019-01-03 06:34:31.000000000","message":"yes, as Rui said, we did that in `utils.resources_from_request_spec` method, we will submit the patch soon. Not sure request filters is a place can do that. The request filter\u0027s output is still the request spec. What I need to do is get the guest numa_topology from the request spec, and translate those into the placement resource request. At least I can\u0027t do that with current request filter.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":228,"context_line":"Virtual PMEM Disposal"},{"line_number":229,"context_line":"---------------------"},{"line_number":230,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":231,"context_line":"of vPMEM in guest will be erased before it can be reassigned to another"},{"line_number":232,"context_line":"VM instance. Otherwise there will be security concerns."},{"line_number":233,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":234,"context_line":"a performance penalty to guest deletion or any other actions involving"}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_70b64efd","line":231,"range":{"start_line":231,"start_character":12,"end_line":231,"end_character":61},"updated":"2019-01-02 15:52:48.000000000","message":"This is ambiguous. This sounds like the data will remain until the namespace is next requested, but the next sentence (more correctly) implies that it will be deleted at the time the vm is.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":228,"context_line":"Virtual PMEM Disposal"},{"line_number":229,"context_line":"---------------------"},{"line_number":230,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":231,"context_line":"of vPMEM in guest will be erased before it can be reassigned to another"},{"line_number":232,"context_line":"VM instance. Otherwise there will be security concerns."},{"line_number":233,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":234,"context_line":"a performance penalty to guest deletion or any other actions involving"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_fb236304","line":231,"range":{"start_line":231,"start_character":12,"end_line":231,"end_character":61},"in_reply_to":"1f769fc5_70b64efd","updated":"2019-01-03 04:46:15.000000000","message":"Will rephrase to \"the content of vPMEM in guest will be erased upon instance deletion\"","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"e4ad6a9f492ee7c01163d557d409e900192835dc","unresolved":false,"context_lines":[{"line_number":228,"context_line":"Virtual PMEM Disposal"},{"line_number":229,"context_line":"---------------------"},{"line_number":230,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":231,"context_line":"of vPMEM in guest will be erased before it can be reassigned to another"},{"line_number":232,"context_line":"VM instance. Otherwise there will be security concerns."},{"line_number":233,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":234,"context_line":"a performance penalty to guest deletion or any other actions involving"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_a1efe8e7","line":231,"range":{"start_line":231,"start_character":12,"end_line":231,"end_character":61},"in_reply_to":"ffd0ebdf_fb236304","updated":"2019-01-03 08:29:18.000000000","message":"Changed to \"the contect of vPMEM in guest will be erased immediately once the vPMEM is no longer associated with a VM instance.\" because there are other circumstances that require data erase other than VM deletion.","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_70abae88","line":347,"range":{"start_line":347,"start_character":22,"end_line":347,"end_character":33},"updated":"2019-01-02 15:52:48.000000000","message":"configure","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11564,"name":"Chris Dent","email":"cdent@anticdent.org","username":"chdent"},"change_message_id":"8eaf7a13ced1b80ff3ae276a1c1fafc355befb00","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"1f769fc5_90ae8a96","line":347,"range":{"start_line":347,"start_character":46,"end_line":347,"end_character":49},"updated":"2019-01-02 15:52:48.000000000","message":"fixed","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"e4ad6a9f492ee7c01163d557d409e900192835dc","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_81bc24bc","line":347,"range":{"start_line":347,"start_character":22,"end_line":347,"end_character":33},"in_reply_to":"1f769fc5_70abae88","updated":"2019-01-03 08:29:18.000000000","message":"Done","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_5b786ff7","line":347,"range":{"start_line":347,"start_character":22,"end_line":347,"end_character":33},"in_reply_to":"1f769fc5_70abae88","updated":"2019-01-03 04:46:15.000000000","message":"Will correct in the next patchset","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"e4ad6a9f492ee7c01163d557d409e900192835dc","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_41b63cdc","line":347,"range":{"start_line":347,"start_character":46,"end_line":347,"end_character":49},"in_reply_to":"1f769fc5_90ae8a96","updated":"2019-01-03 08:29:18.000000000","message":"Done","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c27fc1866539b799f11d6d86ebddf1aa806ca8f5","unresolved":false,"context_lines":[{"line_number":344,"context_line":"Other deployer impact"},{"line_number":345,"context_line":"---------------------"},{"line_number":346,"context_line":""},{"line_number":347,"context_line":"The deployer needs to configurate the desired fix sizes for PMEM namespaces."},{"line_number":348,"context_line":""},{"line_number":349,"context_line":"Developer impact"},{"line_number":350,"context_line":"----------------"}],"source_content_type":"text/x-rst","patch_set":5,"id":"ffd0ebdf_7b7d33e6","line":347,"range":{"start_line":347,"start_character":46,"end_line":347,"end_character":49},"in_reply_to":"1f769fc5_90ae8a96","updated":"2019-01-03 04:46:15.000000000","message":"Will correct in the next patchset","commit_id":"5f557adcac948c8bca4e08bc833c22d7605ea35f"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"894ce410cfc6bc56c745ba7280303db0b9a021e0","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource provider once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider.  This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":176,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":6,"id":"ffd0ebdf_92931150","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"updated":"2019-01-03 14:15:35.000000000","message":"but this way, we can do NUMA affinity, since we need to store which namespace is allocated in host numa topology objects anyway, then it is easy to do the NUMA affinity in the legacy numa scheduler filter.\n\nSo we still use placement to do initial filtering and using the legacy numa scheduler filter to do the NUMA affinity.","commit_id":"d67b086270e1ebb2128b778a8363f97b1ca145c6"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"537980a4eacc9a2892e10212aa4737c8fab4f31f","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource provider once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider.  This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":176,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":6,"id":"ffd0ebdf_603805d3","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"in_reply_to":"ffd0ebdf_323a65e7","updated":"2019-01-04 06:47:16.000000000","message":"Checked again, we needn\u0027t store the host usage, we can populate from the libvirt. So we needn\u0027t implement numa affinity that in the first version, waiting for numa move to placement.","commit_id":"d67b086270e1ebb2128b778a8363f97b1ca145c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"059b4f58d051e55290ebd79a207f6b60eab92c20","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource provider once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider.  This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":176,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":6,"id":"ffd0ebdf_323a65e7","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"in_reply_to":"ffd0ebdf_92931150","updated":"2019-01-03 14:37:31.000000000","message":"Oh right. You have mentioned that. I forgot. Will remove this sentence.","commit_id":"d67b086270e1ebb2128b778a8363f97b1ca145c6"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0de5c4f76d69ff7b12f15d49cf3e69c6b7163bcc","unresolved":false,"context_lines":[{"line_number":276,"context_line":""},{"line_number":277,"context_line":"Data model impact"},{"line_number":278,"context_line":"-----------------"},{"line_number":279,"context_line":""},{"line_number":280,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"},{"line_number":281,"context_line":"of an instance::"},{"line_number":282,"context_line":""}],"source_content_type":"text/x-rst","patch_set":6,"id":"ffd0ebdf_b2c41542","line":279,"updated":"2019-01-03 14:17:20.000000000","message":"we need to update at here about the host numa topology object to store which namespace is free and which namespace is allocated.","commit_id":"d67b086270e1ebb2128b778a8363f97b1ca145c6"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This feature only works with libvirt driver."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Proposed change"},{"line_number":54,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_d32aff8e","line":51,"range":{"start_line":50,"start_character":1,"end_line":51,"end_character":48},"updated":"2019-01-07 06:15:25.000000000","message":"is this referring to the spec as a whole of block io mode.\n\nmy understanding is the block io mode would be hypervisor independent but this spec only intends to enable virtual persistent memory for the libvirt driver","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c482d99266dfcfe75c78a48a5a5fbe5a76a399ed","unresolved":false,"context_lines":[{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This feature only works with libvirt driver."},{"line_number":52,"context_line":""},{"line_number":53,"context_line":"Proposed change"},{"line_number":54,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_445ba386","line":51,"range":{"start_line":50,"start_character":1,"end_line":51,"end_character":48},"in_reply_to":"dfd5e7cf_d32aff8e","updated":"2019-01-07 08:52:00.000000000","message":"Yes the block io mode or any of the access modes of persistent memory is hypervisor independent, but sorry, how does that conclude the spec is \"as a whole of block io mode\"?\n\nWill reword the sentence to \"this spec only intends to enable virtual persistent memory for the libvirt driver\". Is it what you are asking for?","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."},{"line_number":64,"context_line":""},{"line_number":65,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":66,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":67,"context_line":"can guarantee the guest write persistence is \"devdax\" mode PMEM namespaces"},{"line_number":68,"context_line":"while accessed by \"pmem\" method."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":71,"context_line":"support of virtual PMEM backed by physical PMEM \"persistent memory\""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_53184f80","line":68,"range":{"start_line":66,"start_character":43,"end_line":68,"end_character":32},"updated":"2019-01-07 06:15:25.000000000","message":"nit:\n\"currently the only one that\ncan guarantee the guest write persistence is \"devdax\" mode PMEM namespaces\nwhile accessed by \"pmem\" method.\"\n\n-\u003e\n\"currently the only one that\ncan guarantee the guest write persistence, when accessed by the ``pmem`` method is ``devdax`` mode.\"","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c482d99266dfcfe75c78a48a5a5fbe5a76a399ed","unresolved":false,"context_lines":[{"line_number":63,"context_line":"\"fsdax\" or \"devdax\" mode exclusively."},{"line_number":64,"context_line":""},{"line_number":65,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":66,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":67,"context_line":"can guarantee the guest write persistence is \"devdax\" mode PMEM namespaces"},{"line_number":68,"context_line":"while accessed by \"pmem\" method."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":71,"context_line":"support of virtual PMEM backed by physical PMEM \"persistent memory\""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_04035b44","line":68,"range":{"start_line":66,"start_character":43,"end_line":68,"end_character":32},"in_reply_to":"dfd5e7cf_53184f80","updated":"2019-01-07 08:52:00.000000000","message":"Will change accordingly in the next patchset.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":67,"context_line":"can guarantee the guest write persistence is \"devdax\" mode PMEM namespaces"},{"line_number":68,"context_line":"while accessed by \"pmem\" method."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":71,"context_line":"support of virtual PMEM backed by physical PMEM \"persistent memory\""},{"line_number":72,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":73,"context_line":"(e.g. /dev/daxX.Y). Please refer to the `NVDIMM Linux kernel document`_"},{"line_number":74,"context_line":"for the terms and details."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_6e0d6636","line":74,"range":{"start_line":70,"start_character":0,"end_line":74,"end_character":26},"updated":"2019-01-07 06:15:25.000000000","message":"nit: qemu is not a person and therefor cannot have a preference. \n\nthe docs team would prefer we did not do this \n\nhttps://docs.openstack.org/doc-contrib-guide/writing-style/general-writing-guidelines.html#do-not-humanize-inanimate-objects\n\nhttps://docs.openstack.org/doc-contrib-guide/writing-style/general-writing-guidelines.html#avoid-personification\n\nwhat i think you meant to convey is as follows.\n\nDue to the limitations write persistence, partitioning and qemu support, this spec proposes to support virtual PMEM backed by physical PMEM \"persistent memory\", partionined into ``pmem`` type namespaces in ``devdax`` mode. Please refer to the `NVDIMM Linux kernel document`_\nfor the terms and details.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c482d99266dfcfe75c78a48a5a5fbe5a76a399ed","unresolved":false,"context_lines":[{"line_number":67,"context_line":"can guarantee the guest write persistence is \"devdax\" mode PMEM namespaces"},{"line_number":68,"context_line":"while accessed by \"pmem\" method."},{"line_number":69,"context_line":""},{"line_number":70,"context_line":"Due to QEMU\u0027s preference on virtual PMEM backends, this spec only seeks the"},{"line_number":71,"context_line":"support of virtual PMEM backed by physical PMEM \"persistent memory\""},{"line_number":72,"context_line":"(\"pmem\") type namespaces and partitioned into \"devdax\" mode namespaces"},{"line_number":73,"context_line":"(e.g. /dev/daxX.Y). Please refer to the `NVDIMM Linux kernel document`_"},{"line_number":74,"context_line":"for the terms and details."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_2400df34","line":74,"range":{"start_line":70,"start_character":0,"end_line":74,"end_character":26},"in_reply_to":"dfd5e7cf_6e0d6636","updated":"2019-01-07 08:52:00.000000000","message":"Thanks! Will change accordingly in the next patchset.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":73,"context_line":"(e.g. /dev/daxX.Y). Please refer to the `NVDIMM Linux kernel document`_"},{"line_number":74,"context_line":"for the terms and details."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The \"pmem\" type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":78,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":79,"context_line":"and used by multiple applications. As shown in below diagram,"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_2e0f5e2f","line":76,"range":{"start_line":76,"start_character":4,"end_line":76,"end_character":10},"updated":"2019-01-07 06:15:25.000000000","message":"``pmem``","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Custom Resource Classes are used to represent PMEM namespace resource."}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ee187676","line":123,"range":{"start_line":123,"start_character":1,"end_line":123,"end_character":55},"updated":"2019-01-07 06:15:25.000000000","message":"you will also need to track it in the resouce tracker in the compute node so that you can do assignment of the precreated namespces to the vm. placement will not be able to provide that functionality.\n\ni particular when an instance is spawed the resouce tracker will have to select a set of pmem namespaces and record there asignment to the vm and the order in which the need to be mapped to each virtrual pmem device in the gust so that if the guest is rebooted the same toplogy can be recreated.\n\nplacement will not know this assignment or topology and we cannot used it to track it so the resource tracker will have to be extended to do so as we do for cpu pinning or pci devices today.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"936edc4ecffaaf873498be3cf8a5fae2d230696e","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Custom Resource Classes are used to represent PMEM namespace resource."}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_543a6ce7","line":123,"range":{"start_line":123,"start_character":1,"end_line":123,"end_character":55},"in_reply_to":"dfd5e7cf_ee187676","updated":"2019-01-07 16:26:40.000000000","message":"The overall PMEM namespaces are in the configuration file. And the allocation information to VM instances is in InstanceNUMACell object as in Line287. So actually these resources are already properly tracked.\nWill enrich this ``resource tracking`` section to include both placement and namespace assignment tracking.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c482d99266dfcfe75c78a48a5a5fbe5a76a399ed","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"},{"line_number":122,"context_line":"----------------------"},{"line_number":123,"context_line":"The PMEM resource will be tracked by placement service."},{"line_number":124,"context_line":""},{"line_number":125,"context_line":"Compute agent will report PMEM namespace resource to placement service."},{"line_number":126,"context_line":"Custom Resource Classes are used to represent PMEM namespace resource."}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_240a1f26","line":123,"range":{"start_line":123,"start_character":1,"end_line":123,"end_character":55},"in_reply_to":"dfd5e7cf_ee187676","updated":"2019-01-07 08:52:00.000000000","message":"Yes, in this case, the libvirt driver maintains such information -- which namespace is assigned and which is not, and which namespace maps to which virtual pmem of a VM instance. \nWill be more explicit and add it to the libvirt driver spec.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":165,"context_line":"         ..."},{"line_number":166,"context_line":" }"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ee3156ee","line":169,"range":{"start_line":168,"start_character":0,"end_line":169,"end_character":54},"updated":"2019-01-07 06:15:25.000000000","message":"we will also need to extend the numa topology filter\nto validate that if multiple pmem regions are requested by a guest the are 1.) located on the same numa node and 2 contiguous.\n\nplacement could address the first point once PMEM is modeled under NUMA nodes however the second point will have to always be done in a nova filter.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":23598,"name":"Zhong Luyao","email":"luyao.zhong@intel.com","username":"ZhongLuyao"},"change_message_id":"6d0bf2a53812f91852a5a4f018a537245412e22f","unresolved":false,"context_lines":[{"line_number":165,"context_line":"         ..."},{"line_number":166,"context_line":" }"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_a505a87c","line":169,"range":{"start_line":168,"start_character":0,"end_line":169,"end_character":54},"in_reply_to":"dfd5e7cf_4983bc05","updated":"2019-01-07 13:25:11.000000000","message":"@Alex I guess \"contiguous\" means 2 NUMA node with one or more pmem on each.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"433411a737bb7f83e09a4cf84b6bc469cca87554","unresolved":false,"context_lines":[{"line_number":165,"context_line":"         ..."},{"line_number":166,"context_line":" }"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_134abe26","line":169,"range":{"start_line":168,"start_character":0,"end_line":169,"end_character":54},"in_reply_to":"dfd5e7cf_a505a87c","updated":"2019-01-07 13:48:53.000000000","message":"emm...thanks luyao, I think that should work also.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":165,"context_line":"         ..."},{"line_number":166,"context_line":" }"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_4f3ba0d0","line":169,"range":{"start_line":168,"start_character":0,"end_line":169,"end_character":54},"in_reply_to":"dfd5e7cf_ee3156ee","updated":"2019-01-08 14:40:21.000000000","message":"We do not support (or do not want to) user\u0027s specification of whether multiple PMEM namespaces required are contiguous or not. Do you see a strong use case to demand that? Qemu does not make special treatment to contiguous multiple PMEM namespaces either no matter in host or reflection in guest.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":165,"context_line":"         ..."},{"line_number":166,"context_line":" }"},{"line_number":167,"context_line":""},{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_4983bc05","line":169,"range":{"start_line":168,"start_character":0,"end_line":169,"end_character":54},"in_reply_to":"dfd5e7cf_ee3156ee","updated":"2019-01-07 07:33:55.000000000","message":"yes for the first point. Sorry, I didn\u0027t get your second point, is that related to your next comment?","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified. In future, the movement"},{"line_number":174,"context_line":"to NUMA resource providers shall be done by the methodology exposed by"},{"line_number":175,"context_line":"the placement \u0027/reshaper\u0027 API."},{"line_number":176,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_6e5b262e","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"updated":"2019-01-07 06:15:25.000000000","message":"we do not need to adress it in placement but we will have to  address it in the nova filter schduler.\n\nif the guest request a 512GB pmem region\nwe cannot provide it with two disjoint 256GB regions for two different numa nodes or even 2 distinct regions from the same numa node unless qemu can combine multiple host namespaces into a single guest namespace transparently.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified. In future, the movement"},{"line_number":174,"context_line":"to NUMA resource providers shall be done by the methodology exposed by"},{"line_number":175,"context_line":"the placement \u0027/reshaper\u0027 API."},{"line_number":176,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_4c240ec7","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"in_reply_to":"dfd5e7cf_6e5b262e","updated":"2019-01-08 14:40:21.000000000","message":"To echo Alex\u0027s reply. Hey Sean, placement will do an exact match. If the guest requests 512GB, placement won\u0027t give it 2 256GB namespaces even if they are contiguous.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":168,"context_line":"The PMEM resource inventories will belong to NUMA resource providers once"},{"line_number":169,"context_line":"`NUMA topology with resource providers`_ work is done. For the initial"},{"line_number":170,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":171,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":172,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":173,"context_line":"virtual PMEM guest NUMA topology is specified. In future, the movement"},{"line_number":174,"context_line":"to NUMA resource providers shall be done by the methodology exposed by"},{"line_number":175,"context_line":"the placement \u0027/reshaper\u0027 API."},{"line_number":176,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_c9ab2c73","line":173,"range":{"start_line":171,"start_character":42,"end_line":173,"end_character":46},"in_reply_to":"dfd5e7cf_6e5b262e","updated":"2019-01-07 07:33:55.000000000","message":"yes, agree with you the expected behavor. But I didn\u0027t see why Placement can\u0027t do it.\n\nIf the user request 512GB, then the placement will tell me which namespace with 512GB size and located on the same numa node with VM is available. Then when we don\u0027t have NUMA topo in placement, we just choice any free 512GB namespace for the guest. After we have NUMA topo in placement, the virt driver will choice a free 512GB namespace in specific numa node for the guest.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":187,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":188,"context_line":"     hw:numa_pmem \u003d 512"},{"line_number":189,"context_line":""},{"line_number":190,"context_line":" One NUMA node, two 512GB vPMEM:"},{"line_number":191,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":192,"context_line":"     hw:numa_pmem.0.0 \u003d 512"},{"line_number":193,"context_line":"     hw:numa_pmem.0.1 \u003d 512"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":" Two NUMA nodes, each node has one 512GB vPMEM:"},{"line_number":196,"context_line":"     hw:numa_nodes \u003d 2"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ae882e71","line":193,"range":{"start_line":190,"start_character":1,"end_line":193,"end_character":27},"updated":"2019-01-07 06:15:25.000000000","message":"note that this is not the same as \n     hw:numa_nodes\u003d1\n     hw:numa_pmem\u003d1024\n\none line 200 below you state that \n\n     hw:numa_nodes\u003d2\n     hw:numa_pmem \u003d 1024\nhas the same semantics as\n     hw:numa_nodes \u003d 2\n     hw:numa_pmem.0.0 \u003d 512\n     hw:numa_pmem.1.0 \u003d 512\n\nthat means the smantics are  you devide the PMEM resouce evenly by the number of numa nodes.\n\ntherefor\n     hw:numa_nodes\u003d1\n     hw:numa_pmem\u003d1024\n\nis the equivalent of\n     hw:numa_nodes \u003d 1\n     hw:numa_pmem.0.0 \u003d 1024\n\na request for a single pmem in the guest of 1024GB in size.\nthis can be proved by two contiguous","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":187,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":188,"context_line":"     hw:numa_pmem \u003d 512"},{"line_number":189,"context_line":""},{"line_number":190,"context_line":" One NUMA node, two 512GB vPMEM:"},{"line_number":191,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":192,"context_line":"     hw:numa_pmem.0.0 \u003d 512"},{"line_number":193,"context_line":"     hw:numa_pmem.0.1 \u003d 512"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":" Two NUMA nodes, each node has one 512GB vPMEM:"},{"line_number":196,"context_line":"     hw:numa_nodes \u003d 2"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_69b6c0dc","line":193,"range":{"start_line":190,"start_character":1,"end_line":193,"end_character":27},"in_reply_to":"dfd5e7cf_ae882e71","updated":"2019-01-07 07:33:55.000000000","message":"I only thought we just provide one 1024GB namespace for the guest. I didn\u0027t consider give it two 512GB namespace, If that is your \u0027two contiguous\u0027 mean.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"2ba162dcc420320b9de303116c272253350ec665","unresolved":false,"context_lines":[{"line_number":187,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":188,"context_line":"     hw:numa_pmem \u003d 512"},{"line_number":189,"context_line":""},{"line_number":190,"context_line":" One NUMA node, two 512GB vPMEM:"},{"line_number":191,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":192,"context_line":"     hw:numa_pmem.0.0 \u003d 512"},{"line_number":193,"context_line":"     hw:numa_pmem.0.1 \u003d 512"},{"line_number":194,"context_line":""},{"line_number":195,"context_line":" Two NUMA nodes, each node has one 512GB vPMEM:"},{"line_number":196,"context_line":"     hw:numa_nodes \u003d 2"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_414ffebc","line":193,"range":{"start_line":190,"start_character":1,"end_line":193,"end_character":27},"in_reply_to":"dfd5e7cf_ae882e71","updated":"2019-01-08 09:00:09.000000000","message":"Qemu does not support using two 512G contiguous physical PMEM namespace to back up one 1024G virtual PMEM in guest.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":208,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":209,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":210,"context_line":""},{"line_number":211,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":212,"context_line":"The translation is done in the `utils.resources_from_request_spec` method in"},{"line_number":213,"context_line":"which guest NUMA topologies are converted to placement resource requests."},{"line_number":214,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":215,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":216,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_6e456617","line":213,"range":{"start_line":211,"start_character":0,"end_line":213,"end_character":73},"updated":"2019-01-07 06:15:25.000000000","message":"im not sure how that would work.\n\nat the time of the placement request we do not know what hypervior is running on the host in question.\nit could be hyperv, libvirt, vmware.\nit would not be correct to add libvirt specifc info to this code.\n\nthe hw:numa_pmem* extra specs can be used by the libvirt driver to assigin namepaces to the guest however the placemetn queries will need to be build from resouce extraspecs e.g. resource:CUSTOM_PMEM_256GB\u003d4\n\nif you want to express multiple numa nodes then the numbered resource groups syntax will need to be used in the flavor.\n\nsaid another way we cannot generate resource request from hypervisor specific extra specs. if this spec was defining this feature in an libvirt/hypervior independent way and we belived the same extra specs could be reused verbatim across all hyperviors i would be ok with the extention of the resouces_form_request_sepc function as i have advacted for extending it for cpu pinning in the past to generate a request for vcpus or pcpus based in the presence of hw:cpu_policy\u003dshared|dedicated but hw:cpu_policy is supported by multiple hypervisors.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":208,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":209,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":210,"context_line":""},{"line_number":211,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":212,"context_line":"The translation is done in the `utils.resources_from_request_spec` method in"},{"line_number":213,"context_line":"which guest NUMA topologies are converted to placement resource requests."},{"line_number":214,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":215,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":216,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_8905247d","line":213,"range":{"start_line":211,"start_character":0,"end_line":213,"end_character":73},"in_reply_to":"dfd5e7cf_6e456617","updated":"2019-01-07 07:33:55.000000000","message":"Those extra specs isn\u0027t hypervisor specific, just like other numa related extra spec.\n\nAnd the user needn\u0027t request pmem by `resource:CUSTOM_PMEM_256GB\u003d4`, it will be request by the extra specs, the request can be as below:\nhw:numa_nodes\u003d2\n     hw:numa_pmem.0.0 \u003d 256\n     hw:numa_pmem.0.1 \u003d 256\n     hw:numa_pmem.1.0 \u003d 256\n     hw:numa_pmem.1.1 \u003d 256\n\nor another other topology the user prefer. Then it will be translate to \u0027resources:CUSTOM_PMEM_256GB\u003d4\u0027. When we have numa topo in placement, the output will be more complex which match the numa request.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"433411a737bb7f83e09a4cf84b6bc469cca87554","unresolved":false,"context_lines":[{"line_number":208,"context_line":"     hw:numa_pmem.1.0 \u003d 512"},{"line_number":209,"context_line":"     hw:numa_pmem.1.1 \u003d 512"},{"line_number":210,"context_line":""},{"line_number":211,"context_line":"The extra specs will be translated to placement API requests accordlingly."},{"line_number":212,"context_line":"The translation is done in the `utils.resources_from_request_spec` method in"},{"line_number":213,"context_line":"which guest NUMA topologies are converted to placement resource requests."},{"line_number":214,"context_line":"By matching guest PMEM NUMA topologies to host NUMA topologies, best"},{"line_number":215,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":216,"context_line":""}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_d36c369e","line":213,"range":{"start_line":211,"start_character":0,"end_line":213,"end_character":73},"in_reply_to":"dfd5e7cf_8905247d","updated":"2019-01-07 13:48:53.000000000","message":"If we depend on placement to do the numa affinity, the translation output should be: resources1:CUSTOM_PMEM_256GB\u003d2\u0026resources2:CUSTOM_PMEM_256GB\u003d2.\n\nI think we should update the spec to describe the output of translation.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":220,"context_line":"Virtual PMEM Disposal"},{"line_number":221,"context_line":"---------------------"},{"line_number":222,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":223,"context_line":"of vPMEM in guest will be erased immediately once the vPMEM is no longer"},{"line_number":224,"context_line":"associated with a VM instance. Otherwise there will be security concerns."},{"line_number":225,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":226,"context_line":"a performance penalty to guest deletion or any other actions involving"},{"line_number":227,"context_line":"erasing PMEM namespaces."}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_6e5ec6e7","line":224,"range":{"start_line":223,"start_character":45,"end_line":224,"end_character":29},"updated":"2019-01-07 06:15:25.000000000","message":"this is refering to instance delete or when the vm is migrated or shveled correct.\n\nthe pmem region should obviosly not be erased on instnace reboot or when a instance is stopped but not deleted.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":220,"context_line":"Virtual PMEM Disposal"},{"line_number":221,"context_line":"---------------------"},{"line_number":222,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":223,"context_line":"of vPMEM in guest will be erased immediately once the vPMEM is no longer"},{"line_number":224,"context_line":"associated with a VM instance. Otherwise there will be security concerns."},{"line_number":225,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":226,"context_line":"a performance penalty to guest deletion or any other actions involving"},{"line_number":227,"context_line":"erasing PMEM namespaces."}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_29f67891","line":224,"range":{"start_line":223,"start_character":45,"end_line":224,"end_character":29},"in_reply_to":"dfd5e7cf_6e5ec6e7","updated":"2019-01-07 07:33:55.000000000","message":"yup","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":234,"context_line":"size is supported in order to avoid data loss."},{"line_number":235,"context_line":""},{"line_number":236,"context_line":"By default the content of the original virtual PMEM is copied to the"},{"line_number":237,"context_line":"new virtual PMEM (if there is). This could be time consuming,"},{"line_number":238,"context_line":"so a config option \"pmem_copy_namespace\" (\u0027True\u0027 by default) is"},{"line_number":239,"context_line":"introduced to change the default behavior. When set to \u0027False\u0027,"},{"line_number":240,"context_line":"the data in namespaces will not be copied."},{"line_number":241,"context_line":""},{"line_number":242,"context_line":"Live Migration"},{"line_number":243,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ceefb2e7","line":240,"range":{"start_line":237,"start_character":32,"end_line":240,"end_character":42},"updated":"2019-01-07 06:15:25.000000000","message":"this introduces an interoperability problem as the value of the config option is not discoverable via the api.\n\na flavor extra spec and image metadata value would be a more friendly choice for end users as the could discover the behavior of the could programmatically.\n\nideally the image woudl state if it expected PMEM namespaces to be coppied and the flavor would state if it was allowsed which could defualt to true if unset in both cases.\n\nso that would translate to the following\nflavor:\nhw:allow_pmem_copy\u003dtrue|false (default true)\nimage: hw_copy_pmem\u003dtrue|false (default ture)\n\non a resize if either the source or dest flavor had\nhw:allow_pmem_copy\u003dtrue and the image had hw_copy_pmem\u003dtrue\nthe pmem namespace data would be copied.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":234,"context_line":"size is supported in order to avoid data loss."},{"line_number":235,"context_line":""},{"line_number":236,"context_line":"By default the content of the original virtual PMEM is copied to the"},{"line_number":237,"context_line":"new virtual PMEM (if there is). This could be time consuming,"},{"line_number":238,"context_line":"so a config option \"pmem_copy_namespace\" (\u0027True\u0027 by default) is"},{"line_number":239,"context_line":"introduced to change the default behavior. When set to \u0027False\u0027,"},{"line_number":240,"context_line":"the data in namespaces will not be copied."},{"line_number":241,"context_line":""},{"line_number":242,"context_line":"Live Migration"},{"line_number":243,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_afeecc85","line":240,"range":{"start_line":237,"start_character":32,"end_line":240,"end_character":42},"in_reply_to":"dfd5e7cf_ceefb2e7","updated":"2019-01-08 14:40:21.000000000","message":"Added flavor extra spec. This iteration we do not want to support image metadata to stay align with line 217.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":234,"context_line":"size is supported in order to avoid data loss."},{"line_number":235,"context_line":""},{"line_number":236,"context_line":"By default the content of the original virtual PMEM is copied to the"},{"line_number":237,"context_line":"new virtual PMEM (if there is). This could be time consuming,"},{"line_number":238,"context_line":"so a config option \"pmem_copy_namespace\" (\u0027True\u0027 by default) is"},{"line_number":239,"context_line":"introduced to change the default behavior. When set to \u0027False\u0027,"},{"line_number":240,"context_line":"the data in namespaces will not be copied."},{"line_number":241,"context_line":""},{"line_number":242,"context_line":"Live Migration"},{"line_number":243,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_490dfc8e","line":240,"range":{"start_line":237,"start_character":32,"end_line":240,"end_character":42},"in_reply_to":"dfd5e7cf_ceefb2e7","updated":"2019-01-07 07:33:55.000000000","message":"I\u0027m ok with extraspec/metadata.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":252,"context_line":""},{"line_number":253,"context_line":"VM snapshot"},{"line_number":254,"context_line":"-----------"},{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_8e2d2a48","line":256,"range":{"start_line":255,"start_character":0,"end_line":256,"end_character":71},"updated":"2019-01-07 06:15:25.000000000","message":"i would assume if it was included at a later date it would be stored as a seperate image in glance form the main image.\n\nas this could be quite large this may take a lot of time and \nbe expencive to store which means we may want a way to explcitly state if we want to include pmem in snapshots when it is eventually supported.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":252,"context_line":""},{"line_number":253,"context_line":"VM snapshot"},{"line_number":254,"context_line":"-----------"},{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_cf623017","line":256,"range":{"start_line":255,"start_character":0,"end_line":256,"end_character":71},"in_reply_to":"dfd5e7cf_8e2d2a48","updated":"2019-01-08 14:40:21.000000000","message":"Done","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":252,"context_line":""},{"line_number":253,"context_line":"VM snapshot"},{"line_number":254,"context_line":"-----------"},{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_c9756cfc","line":256,"range":{"start_line":255,"start_character":0,"end_line":256,"end_character":71},"in_reply_to":"dfd5e7cf_8e2d2a48","updated":"2019-01-07 07:33:55.000000000","message":"yes, it also can be an extraspec or image metadata","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"},{"line_number":260,"context_line":"Shelving a VM is to upload the VM snapshot to Glance service. Since the"},{"line_number":261,"context_line":"virtual PMEM image will not be included in the VM snapshot,"},{"line_number":262,"context_line":"VM shelve/unshelve will not automatically save/restore the virtual PMEM"},{"line_number":263,"context_line":"for the current iteration."},{"line_number":264,"context_line":""},{"line_number":265,"context_line":"Alternatives"},{"line_number":266,"context_line":"------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_6eafa694","line":263,"range":{"start_line":258,"start_character":1,"end_line":263,"end_character":26},"updated":"2019-01-07 06:15:25.000000000","message":"personally i think we should explcitly not support shelve/unshelve with pmem resources until such a point as we can store the pmem region in glance.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"936edc4ecffaaf873498be3cf8a5fae2d230696e","unresolved":false,"context_lines":[{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"},{"line_number":260,"context_line":"Shelving a VM is to upload the VM snapshot to Glance service. Since the"},{"line_number":261,"context_line":"virtual PMEM image will not be included in the VM snapshot,"},{"line_number":262,"context_line":"VM shelve/unshelve will not automatically save/restore the virtual PMEM"},{"line_number":263,"context_line":"for the current iteration."},{"line_number":264,"context_line":""},{"line_number":265,"context_line":"Alternatives"},{"line_number":266,"context_line":"------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_54134c67","line":263,"range":{"start_line":258,"start_character":1,"end_line":263,"end_character":26},"in_reply_to":"dfd5e7cf_6eafa694","updated":"2019-01-07 16:26:40.000000000","message":"Agree","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"},{"line_number":260,"context_line":"Shelving a VM is to upload the VM snapshot to Glance service. Since the"},{"line_number":261,"context_line":"virtual PMEM image will not be included in the VM snapshot,"},{"line_number":262,"context_line":"VM shelve/unshelve will not automatically save/restore the virtual PMEM"},{"line_number":263,"context_line":"for the current iteration."},{"line_number":264,"context_line":""},{"line_number":265,"context_line":"Alternatives"},{"line_number":266,"context_line":"------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_af180c81","line":263,"range":{"start_line":258,"start_character":1,"end_line":263,"end_character":26},"in_reply_to":"dfd5e7cf_6eafa694","updated":"2019-01-08 14:40:21.000000000","message":"Done","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"16602bd69a5bdb562612efecc2ea6d44743ebca3","unresolved":false,"context_lines":[{"line_number":255,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":256,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"VM shelve/unshelve"},{"line_number":259,"context_line":"------------------"},{"line_number":260,"context_line":"Shelving a VM is to upload the VM snapshot to Glance service. Since the"},{"line_number":261,"context_line":"virtual PMEM image will not be included in the VM snapshot,"},{"line_number":262,"context_line":"VM shelve/unshelve will not automatically save/restore the virtual PMEM"},{"line_number":263,"context_line":"for the current iteration."},{"line_number":264,"context_line":""},{"line_number":265,"context_line":"Alternatives"},{"line_number":266,"context_line":"------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_e972b011","line":263,"range":{"start_line":258,"start_character":1,"end_line":263,"end_character":26},"in_reply_to":"dfd5e7cf_6eafa694","updated":"2019-01-07 07:33:55.000000000","message":"ok","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":271,"context_line":" This ways is more flexible than the fix sized approach, however will"},{"line_number":272,"context_line":" result in fragmentation as detailed in the \"Background\" section."},{"line_number":273,"context_line":""},{"line_number":274,"context_line":"Resource tracking::"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":" PMEM resources can be tracked in a separate DB table or embed into the"},{"line_number":277,"context_line":" current NUMA topology DB tables."},{"line_number":278,"context_line":" The trend is to move resource tracking to placement. So it will"},{"line_number":279,"context_line":" eventually go to placement anyway."},{"line_number":280,"context_line":""},{"line_number":281,"context_line":"Data model impact"},{"line_number":282,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_4e98a269","line":279,"range":{"start_line":274,"start_character":0,"end_line":279,"end_character":35},"updated":"2019-01-07 06:15:25.000000000","message":"this is not an alternitve. you have miss understood the trend. placement maintains a tally count of resource x in inentory y provided by provider z.\n\nit does not model assingment of the second and third pmem namespace to a vm . it only recored that the vm is allocated 2 pmem namespaces or rather 2 units of custom_pmem_256GB resource form inventory y on resouce provider z.\n\nas such using placement allow there is no way to track which pmem namespaces are assigned to a specific vm or to do that assignment in the first place which mean we need to extend the resource track to do that.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"936edc4ecffaaf873498be3cf8a5fae2d230696e","unresolved":false,"context_lines":[{"line_number":271,"context_line":" This ways is more flexible than the fix sized approach, however will"},{"line_number":272,"context_line":" result in fragmentation as detailed in the \"Background\" section."},{"line_number":273,"context_line":""},{"line_number":274,"context_line":"Resource tracking::"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":" PMEM resources can be tracked in a separate DB table or embed into the"},{"line_number":277,"context_line":" current NUMA topology DB tables."},{"line_number":278,"context_line":" The trend is to move resource tracking to placement. So it will"},{"line_number":279,"context_line":" eventually go to placement anyway."},{"line_number":280,"context_line":""},{"line_number":281,"context_line":"Data model impact"},{"line_number":282,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_a80d7838","line":279,"range":{"start_line":274,"start_character":0,"end_line":279,"end_character":35},"in_reply_to":"dfd5e7cf_4e98a269","updated":"2019-01-07 16:26:40.000000000","message":"OK, will remove this. And will mention it in the ``resource tracking`` section.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":271,"context_line":" This ways is more flexible than the fix sized approach, however will"},{"line_number":272,"context_line":" result in fragmentation as detailed in the \"Background\" section."},{"line_number":273,"context_line":""},{"line_number":274,"context_line":"Resource tracking::"},{"line_number":275,"context_line":""},{"line_number":276,"context_line":" PMEM resources can be tracked in a separate DB table or embed into the"},{"line_number":277,"context_line":" current NUMA topology DB tables."},{"line_number":278,"context_line":" The trend is to move resource tracking to placement. So it will"},{"line_number":279,"context_line":" eventually go to placement anyway."},{"line_number":280,"context_line":""},{"line_number":281,"context_line":"Data model impact"},{"line_number":282,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ca0d3e38","line":279,"range":{"start_line":274,"start_character":0,"end_line":279,"end_character":35},"in_reply_to":"dfd5e7cf_a80d7838","updated":"2019-01-08 14:40:21.000000000","message":"Done","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":278,"context_line":" The trend is to move resource tracking to placement. So it will"},{"line_number":279,"context_line":" eventually go to placement anyway."},{"line_number":280,"context_line":""},{"line_number":281,"context_line":"Data model impact"},{"line_number":282,"context_line":"-----------------"},{"line_number":283,"context_line":""},{"line_number":284,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_8eb76ad9","line":281,"range":{"start_line":281,"start_character":0,"end_line":281,"end_character":17},"updated":"2019-01-07 06:15:25.000000000","message":"note the data model changes below are extentions to instance numa toployg object which is by the hardware module to caluate the assignemnt to host numa node when doing cpu pinning and hugepage assignment. we will also need to extend the host numa toplogy blob that is used by the numa reosuce tracker and numa topolgy filter to account for the pmem namespaces to provide the infomation need so that the numa reouces tracker code can preform the assignment of pmem namespaces to the guest correctly.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"936edc4ecffaaf873498be3cf8a5fae2d230696e","unresolved":false,"context_lines":[{"line_number":278,"context_line":" The trend is to move resource tracking to placement. So it will"},{"line_number":279,"context_line":" eventually go to placement anyway."},{"line_number":280,"context_line":""},{"line_number":281,"context_line":"Data model impact"},{"line_number":282,"context_line":"-----------------"},{"line_number":283,"context_line":""},{"line_number":284,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_a82c9847","line":281,"range":{"start_line":281,"start_character":0,"end_line":281,"end_character":17},"in_reply_to":"dfd5e7cf_8eb76ad9","updated":"2019-01-07 16:26:40.000000000","message":"I agree that the host physical PMEM NUMA affinity information should be tracked together with other NUMA affinity stuff in NUMA topology. However isn\u0027t it that part (exiting NUMA topology and filter) being moved to placement? We just do not want to jump in the middle of that. We want to wait for it to be moved to placement. And after that, I think the NUMA topology tracking and filtering will be done automatically by placement. So it is sufficient for us to just track the instance + PMEM assignment association in the below data model change.","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"6996a69485dd80db6569f5dd1440b4c89192c4fb","unresolved":false,"context_lines":[{"line_number":284,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"},{"line_number":285,"context_line":"of an instance::"},{"line_number":286,"context_line":""},{"line_number":287,"context_line":"+@base.NovaObjectRegistry.register"},{"line_number":288,"context_line":"+class VirtualPMEM(base.NovaObject,"},{"line_number":289,"context_line":"+                    base.NovaObjectDictCompat):"},{"line_number":290,"context_line":"+    # Version 1.0: Initial version"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_ceab3273","line":287,"range":{"start_line":287,"start_character":0,"end_line":287,"end_character":1},"updated":"2019-01-07 06:15:25.000000000","message":"the + is not required","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0f830fe1f169cac68365d985ff3848aeb68e780a","unresolved":false,"context_lines":[{"line_number":284,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"},{"line_number":285,"context_line":"of an instance::"},{"line_number":286,"context_line":""},{"line_number":287,"context_line":"+@base.NovaObjectRegistry.register"},{"line_number":288,"context_line":"+class VirtualPMEM(base.NovaObject,"},{"line_number":289,"context_line":"+                    base.NovaObjectDictCompat):"},{"line_number":290,"context_line":"+    # Version 1.0: Initial version"}],"source_content_type":"text/x-rst","patch_set":8,"id":"dfd5e7cf_8a0f362d","line":287,"range":{"start_line":287,"start_character":0,"end_line":287,"end_character":1},"in_reply_to":"dfd5e7cf_ceab3273","updated":"2019-01-08 14:40:21.000000000","message":"Done","commit_id":"cc7221ea0f13b06c7dcfd78715810b01107302fa"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":33,"context_line":" * How does the user specify the desired amount of virtual PMEM and"},{"line_number":34,"context_line":"   other attributes"},{"line_number":35,"context_line":" * What is the life cycle of virtual PMEM"},{"line_number":36,"context_line":""},{"line_number":37,"context_line":"Use Cases"},{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_8fa4f6ba","line":36,"updated":"2019-01-10 15:27:28.000000000","message":"Just for my knowledge, does it require some specific instructions like IO-MMU to be enabled at boot ?","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":33,"context_line":" * How does the user specify the desired amount of virtual PMEM and"},{"line_number":34,"context_line":"   other attributes"},{"line_number":35,"context_line":" * What is the life cycle of virtual PMEM"},{"line_number":36,"context_line":""},{"line_number":37,"context_line":"Use Cases"},{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_2ae9937f","line":36,"in_reply_to":"dfd5e7cf_8fa4f6ba","updated":"2019-01-11 03:46:03.000000000","message":"The spec per se does not mentinon IOMMU specifically, but up to my understanding IOMMU may be required to prevent DMA to persistent memory for best IO performance.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7900,"name":"Tomasz Paszkowski","email":"ss7pro@gmail.com","username":"tpaszkowski"},"change_message_id":"c7380e08cca75b60e0bfcc85f53a453c85f96734","unresolved":false,"context_lines":[{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":""},{"line_number":40,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":41,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":42,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_648ef228","line":41,"updated":"2019-01-10 23:15:27.000000000","message":"I would highlight that this is called app direct mode.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":""},{"line_number":40,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":41,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":42,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_aafca33e","line":41,"in_reply_to":"bfdaf3ff_648ef228","updated":"2019-01-11 03:46:03.000000000","message":"App direct is a vendor (intel) specific word. We are proposing an neutral solution.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7900,"name":"Tomasz Paszkowski","email":"ss7pro@gmail.com","username":"tpaszkowski"},"change_message_id":"c7380e08cca75b60e0bfcc85f53a453c85f96734","unresolved":false,"context_lines":[{"line_number":41,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":42,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_047a8623","line":44,"updated":"2019-01-10 23:15:27.000000000","message":"I would also add SAP Hana here, Aerospike.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":41,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":42,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_05ab60d6","line":44,"in_reply_to":"bfdaf3ff_047a8623","updated":"2019-01-11 03:46:03.000000000","message":"Done","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7900,"name":"Tomasz Paszkowski","email":"ss7pro@gmail.com","username":"tpaszkowski"},"change_message_id":"c7380e08cca75b60e0bfcc85f53a453c85f96734","unresolved":false,"context_lines":[{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_24b1cae2","line":46,"updated":"2019-01-10 23:15:27.000000000","message":"VM users can also configure PMEM into block devices and use\nin the same way as traditional block devices. With dax filesystem all mmap file into memory is setting direct patch to memory (without page cache). This gives the best possible performance for accessing memory with file backend.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":43,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_ and"},{"line_number":44,"context_line":"oracle_."},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_05704072","line":46,"in_reply_to":"bfdaf3ff_24b1cae2","updated":"2019-01-11 03:46:03.000000000","message":"Done","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This spec only intends to enable virtual persistent memory"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_aac9a878","line":48,"updated":"2019-01-10 15:27:28.000000000","message":"hmmm... ok \"better performance\" is a buzz word, but I understand you","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":45,"context_line":""},{"line_number":46,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":47,"context_line":"in the same way as traditional block devices but with better"},{"line_number":48,"context_line":"performance."},{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This spec only intends to enable virtual persistent memory"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_853bd09a","line":48,"in_reply_to":"dfd5e7cf_aac9a878","updated":"2019-01-11 03:46:03.000000000","message":"Changed to the wording that Tomasz has suggested right above","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This spec only intends to enable virtual persistent memory"},{"line_number":52,"context_line":"    for the libvirt driver."},{"line_number":53,"context_line":""},{"line_number":54,"context_line":"Proposed change"},{"line_number":55,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_4fae0e99","line":52,"updated":"2019-01-10 15:27:28.000000000","message":"tbc, libvirt/QEMU and libvirt/KVM.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":49,"context_line":""},{"line_number":50,"context_line":".. important ::"},{"line_number":51,"context_line":"    This spec only intends to enable virtual persistent memory"},{"line_number":52,"context_line":"    for the libvirt driver."},{"line_number":53,"context_line":""},{"line_number":54,"context_line":"Proposed change"},{"line_number":55,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_05cda05c","line":52,"in_reply_to":"dfd5e7cf_4fae0e99","updated":"2019-01-11 03:46:03.000000000","message":"libvirt KVM","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":65,"context_line":""},{"line_number":66,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":67,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":68,"context_line":"can guarantee the guest write persistence when accessed by \"pmem\" method"},{"line_number":69,"context_line":"is \"devdax\" mode."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_657693a7","line":68,"range":{"start_line":68,"start_character":0,"end_line":68,"end_character":41},"updated":"2019-01-10 15:27:28.000000000","message":"you mean, about syncing I/Os ?","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":65,"context_line":""},{"line_number":66,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":67,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":68,"context_line":"can guarantee the guest write persistence when accessed by \"pmem\" method"},{"line_number":69,"context_line":"is \"devdax\" mode."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_e5265c17","line":68,"range":{"start_line":68,"start_character":0,"end_line":68,"end_character":41},"in_reply_to":"dfd5e7cf_657693a7","updated":"2019-01-11 03:46:03.000000000","message":"Oh, this write persistence is in the circumstances of power failure.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":66,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":67,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":68,"context_line":"can guarantee the guest write persistence when accessed by \"pmem\" method"},{"line_number":69,"context_line":"is \"devdax\" mode."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"},{"line_number":72,"context_line":"this spec proposes to support virtual PMEM backed by physical PMEM,"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_c55e5f11","line":69,"range":{"start_line":69,"start_character":3,"end_line":69,"end_character":11},"updated":"2019-01-10 15:27:28.000000000","message":"nit (c) stephenfin : ``devdax``","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":66,"context_line":"Though QEMU supports multiple types of virtual PMEM backends (in QEMU\u0027s"},{"line_number":67,"context_line":"word `virtual NVDIMM backends`_) on Linux, currently the only one that"},{"line_number":68,"context_line":"can guarantee the guest write persistence when accessed by \"pmem\" method"},{"line_number":69,"context_line":"is \"devdax\" mode."},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"},{"line_number":72,"context_line":"this spec proposes to support virtual PMEM backed by physical PMEM,"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_a52054f9","line":69,"range":{"start_line":69,"start_character":3,"end_line":69,"end_character":11},"in_reply_to":"dfd5e7cf_c55e5f11","updated":"2019-01-11 03:46:03.000000000","message":"Done","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"},{"line_number":72,"context_line":"this spec proposes to support virtual PMEM backed by physical PMEM,"},{"line_number":73,"context_line":"partitioned to ``pmem`` type namespaces in ``devdax`` mode. Please refer"},{"line_number":74,"context_line":"to the `NVDIMM Linux kernel document`_ for the terms and details."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The ``pmem`` type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_e5ae03e6","line":74,"updated":"2019-01-10 15:27:28.000000000","message":"If you don\u0027t want to not support the blk method, then please remove the 2nd use-case above (L46)","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":71,"context_line":"Due to the limitations on write persistence, partitioning and qemu support,"},{"line_number":72,"context_line":"this spec proposes to support virtual PMEM backed by physical PMEM,"},{"line_number":73,"context_line":"partitioned to ``pmem`` type namespaces in ``devdax`` mode. Please refer"},{"line_number":74,"context_line":"to the `NVDIMM Linux kernel document`_ for the terms and details."},{"line_number":75,"context_line":""},{"line_number":76,"context_line":"The ``pmem`` type namespaces require contiguous physical space and"},{"line_number":77,"context_line":"are not managed in pages as ordinary system memory. This introduces"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_a59134f4","line":74,"in_reply_to":"dfd5e7cf_e5ae03e6","updated":"2019-01-11 03:46:03.000000000","message":"Ah, I can see where comes the confusion. A block device (and thus filesystems ontop) can be created from a ``pmem`` type namespace (not necessarily accessed by blk method). So inside the VM, the user can still have such a use case as L46.\nHerein, we are talking about the backend of the virtual PMEM, which we only want to support the physical ``pmem`` access method.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":114,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":115,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":116,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":117,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":118,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":119,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_65e9b318","line":118,"range":{"start_line":117,"start_character":39,"end_line":118,"end_character":50},"updated":"2019-01-10 15:27:28.000000000","message":"what if the operator wants to modify the list of sizes ?","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7900,"name":"Tomasz Paszkowski","email":"ss7pro@gmail.com","username":"tpaszkowski"},"change_message_id":"c7380e08cca75b60e0bfcc85f53a453c85f96734","unresolved":false,"context_lines":[{"line_number":114,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":115,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":116,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":117,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":118,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":119,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_c41d5ebe","line":118,"range":{"start_line":117,"start_character":39,"end_line":118,"end_character":50},"in_reply_to":"dfd5e7cf_65e9b318","updated":"2019-01-10 23:15:27.000000000","message":"He will need migrate all VMs from a node and than reconfigure it.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":114,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":115,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":116,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":117,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":118,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":119,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_e5b09c8e","line":118,"range":{"start_line":117,"start_character":39,"end_line":118,"end_character":50},"in_reply_to":"dfd5e7cf_65e9b318","updated":"2019-01-11 03:46:03.000000000","message":"Yes, Tomasz gave a good answer.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":180,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":181,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":182,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":183,"context_line":"virtual PMEM guest NUMA topology is specified. In future, the movement"},{"line_number":184,"context_line":"to NUMA resource providers shall be done by the methodology exposed by"},{"line_number":185,"context_line":"the placement \u0027/reshaper\u0027 API."},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Besides tracking PMEM resources in placement. Each compute agent needs"},{"line_number":188,"context_line":"to know the local host PMEM namespaces assignment, i.e. which PMEM"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_45f6effb","line":185,"range":{"start_line":183,"start_character":47,"end_line":185,"end_character":30},"updated":"2019-01-10 15:27:28.000000000","message":"Huh. I think you should still make sure that if there are children, you shouldn\u0027t create the PMEM inventories into the root RP.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":180,"context_line":"iteration without NUMA resource providers, they will be put directly"},{"line_number":181,"context_line":"into the compute node resource provider. This also means that for now,"},{"line_number":182,"context_line":"host NUMA topology is not considered during scheduling even though"},{"line_number":183,"context_line":"virtual PMEM guest NUMA topology is specified. In future, the movement"},{"line_number":184,"context_line":"to NUMA resource providers shall be done by the methodology exposed by"},{"line_number":185,"context_line":"the placement \u0027/reshaper\u0027 API."},{"line_number":186,"context_line":""},{"line_number":187,"context_line":"Besides tracking PMEM resources in placement. Each compute agent needs"},{"line_number":188,"context_line":"to know the local host PMEM namespaces assignment, i.e. which PMEM"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_c52a388f","line":185,"range":{"start_line":183,"start_character":47,"end_line":185,"end_character":30},"in_reply_to":"dfd5e7cf_45f6effb","updated":"2019-01-11 03:46:03.000000000","message":"Do you mean after NUMA topology moved to placement? The PMEM resource will be embedded inside of the NUMA inventories. So they won\u0027t be in root RP in that case.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":7166,"name":"Sylvain Bauza","email":"sbauza@redhat.com","username":"sbauza"},"change_message_id":"657fb1a93b5def4bfd5ac1b9f4a5d6b0d39d2ec5","unresolved":false,"context_lines":[{"line_number":192,"context_line":"Please refer to `Data model impact`_ for details. The total PMEM"},{"line_number":193,"context_line":"namespaces are in config and the assigned ones are in DB, so the available"},{"line_number":194,"context_line":"ones can be calculated."},{"line_number":195,"context_line":""},{"line_number":196,"context_line":""},{"line_number":197,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":198,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":10,"id":"dfd5e7cf_45cfcfaf","line":195,"updated":"2019-01-10 15:27:28.000000000","message":"You should also verify that if the config value is modified (for example by changing the value of the PMEM size), then the compute service will get an exception if there are already some allocations.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c5ec5ba6538c24efbb643071d71abcb69c0f2d32","unresolved":false,"context_lines":[{"line_number":192,"context_line":"Please refer to `Data model impact`_ for details. The total PMEM"},{"line_number":193,"context_line":"namespaces are in config and the assigned ones are in DB, so the available"},{"line_number":194,"context_line":"ones can be calculated."},{"line_number":195,"context_line":""},{"line_number":196,"context_line":""},{"line_number":197,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":198,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":10,"id":"bfdaf3ff_376fb7c6","line":195,"in_reply_to":"dfd5e7cf_45cfcfaf","updated":"2019-01-11 03:46:03.000000000","message":"Good point! In general, any inconsistency among the DB records, the config values and actual namespace allocation will give the compute service an exception.","commit_id":"435da64dc10d6e8209e42c393baed3a9b0f1b4c6"}],"specs/train/approved/virtual-persistent-memory.rst":[{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":25,"context_line":""},{"line_number":26,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":27,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":28,"context_line":"As far as Nova is concerned, several problems needs"},{"line_number":29,"context_line":"to be addressed:"},{"line_number":30,"context_line":""},{"line_number":31,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_1c2d258e","line":28,"range":{"start_line":28,"start_character":46,"end_line":28,"end_character":51},"updated":"2019-04-10 21:17:15.000000000","message":"need","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":25,"context_line":""},{"line_number":26,"context_line":"Virtualization layer has already supported virtual PMEM which means virtual"},{"line_number":27,"context_line":"machines now can have physical PMEM as the backend of virtual PMEM."},{"line_number":28,"context_line":"As far as Nova is concerned, several problems needs"},{"line_number":29,"context_line":"to be addressed:"},{"line_number":30,"context_line":""},{"line_number":31,"context_line":" * How is the physical PMEM managed and presented as virtual PMEM"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_f2cb2350","line":28,"range":{"start_line":28,"start_character":46,"end_line":28,"end_character":51},"in_reply_to":"5fc1f717_1c2d258e","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":37,"context_line":"Use Cases"},{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":40,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":41,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":42,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_1c5be521","line":40,"range":{"start_line":40,"start_character":44,"end_line":40,"end_character":72},"updated":"2019-04-10 21:17:15.000000000","message":"this isn\u0027t a use case. I think the use case is \"Provide applications with the ability to load large contiguous segments of memory that retain their data across power cycles\"","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":37,"context_line":"Use Cases"},{"line_number":38,"context_line":"---------"},{"line_number":39,"context_line":"Applications in the VMs can memory map the virtual PMEM to its memory"},{"line_number":40,"context_line":"address space and directly access the PMEM. This is the primary use case"},{"line_number":41,"context_line":"of PMEM and has been explored by applications who heavily use memory"},{"line_number":42,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_adfd4a3b","line":40,"range":{"start_line":40,"start_character":44,"end_line":40,"end_character":72},"in_reply_to":"5fc1f717_1c5be521","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_1c42053c","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"updated":"2019-04-10 21:17:15.000000000","message":"this sentence doesn\u0027t make sense to me. could you rewrite it please?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_cda67e54","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"5fc1f717_1c42053c","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_72ae812f","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"5fc1f717_1c42053c","updated":"2019-04-23 13:39:26.000000000","message":"i think they meant to say \n\nif the filesystem support direct io\nthen you can mmap the file into memory and bypass the\nguest kernels page cache.\n\nthat said im guessing a bit more then i woudl like with that translation.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"9d3c505604e0bcadca3f3f1346c440d1e8fc1894","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_cfad71eb","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"dfbec78f_cf3a51cc","updated":"2019-05-02 16:50:12.000000000","message":"John also pointed out (at the PTG) that HPC people want this because PMEM is cheaper than RAM.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"41d9d8f06513b35cb6fed71e69d44e64e21e8491","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_a970f0dd","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"dfbec78f_cf3a51cc","updated":"2019-05-08 02:49:35.000000000","message":"This section must be re-written and I am doing so :).\nWhat it tried to describe here is inside VM instances. Meaning that applications inside VMs can mmap files (residing on``direct access`` capable file systems and that file system on top of persistent memory) into their address space and thus get ``direct access`` to the underlying persistent memory hardware.\n\nOf course applications on host can also do the same mmap thing. Once QEMU does that, it is \"file-backed memory\". And if the underlying filesystem supports ``direct access`` then the mapping bypasses kernel page cache. And it should be much faster on top of physical persistent memory than on SSDs or hard drivers.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"41d9d8f06513b35cb6fed71e69d44e64e21e8491","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_4977f4d5","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"dfbec78f_cfad71eb","updated":"2019-05-08 02:49:35.000000000","message":"Yes, thanks, I will add it in the new patchset.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"0cc6267bca4df8a192098d663d11876606073e48","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"dfbec78f_cf3a51cc","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"ffb9cba7_20f50158","updated":"2019-05-02 16:43:14.000000000","message":"How does this compare/constrast to the file-backed memory feature we added in Rocky?\n\nhttps://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/libvirt-file-backed-memory.html","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"fae3d66e71f587eaa3f87b61b6b644aca6dfd032","unresolved":false,"context_lines":[{"line_number":43,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":44,"context_line":""},{"line_number":45,"context_line":"VM users can also configure PMEM into block devices and use"},{"line_number":46,"context_line":"in the same way as traditional block devices. With ``direct access``"},{"line_number":47,"context_line":"capable filesystems, all mmap file into memory is setting direct path"},{"line_number":48,"context_line":"to memory (without page cache). This gives the best possible performance"},{"line_number":49,"context_line":"for accessing memory with file backend."},{"line_number":50,"context_line":""},{"line_number":51,"context_line":".. important ::"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_20f50158","line":48,"range":{"start_line":46,"start_character":46,"end_line":48,"end_character":31},"in_reply_to":"ffb9cba7_72ae812f","updated":"2019-04-24 04:39:57.000000000","message":"Basically, it is talking about the ways you can use those PMEM inside a guest. And yes, just as Sean said.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":76,"context_line":""},{"line_number":77,"context_line":"The ``pmem`` type namespaces require contiguous physical space and"},{"line_number":78,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":79,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":80,"context_line":"and used by multiple applications. As shown in below diagram,"},{"line_number":81,"context_line":"four applications are using four namespaces each of size 100GB::"},{"line_number":82,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_dc631db0","line":79,"range":{"start_line":79,"start_character":57,"end_line":79,"end_character":68},"updated":"2019-04-10 21:17:15.000000000","message":"that are created\n\nor:\n\nwhen they are created","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":76,"context_line":""},{"line_number":77,"context_line":"The ``pmem`` type namespaces require contiguous physical space and"},{"line_number":78,"context_line":"are not managed in pages as ordinary system memory. This introduces"},{"line_number":79,"context_line":"a fragmentation issue with regard to multiple namespaces are created"},{"line_number":80,"context_line":"and used by multiple applications. As shown in below diagram,"},{"line_number":81,"context_line":"four applications are using four namespaces each of size 100GB::"},{"line_number":82,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_5276ddda","line":79,"range":{"start_line":79,"start_character":57,"end_line":79,"end_character":68},"in_reply_to":"5fc1f717_dc631db0","updated":"2019-04-23 13:39:26.000000000","message":"or \n\n\"... with regard to how multiple namespaces are created ...\"","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_fcfe19ad","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"updated":"2019-04-10 21:17:15.000000000","message":"why? why not have an external agent or something else set this up and just have the nova-compute do the *discovery* of the namespaces instead of the programming/creation of them?\n\nI view this kind of thing in a similar fashion to how we handle SR-IOV devices. We don\u0027t rely on the nova-compute to create devices or bonded interfaces or anything like that. Instead, we rely on the operator to configure their compute host appropriately and then tell nova-compute (via the nova.conf file and options like CONF.pci_passthrough_whitelist) which devices are consumable by guests.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"60c9050e3ab00ccf21e78523378e1d9ef0c64f2b","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_06891674","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"in_reply_to":"5fc1f717_fcfe19ad","updated":"2019-04-22 21:55:08.000000000","message":"It concerns me that the PCI passthrough_whitelist is being held up as a precedent for... anything.\n\nThe analogous behavior for pmem would be for the admin to carve out the namespaces and then also list them in conf. You\u0027re suggesting instead that the admin carve out the namespaces and then have nova discover them, which is better.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"3a67cf83cf35c41039a97c1d3f2521daf612aa53","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_bcf7f2f5","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"in_reply_to":"ffb9cba7_06891674","updated":"2019-04-23 11:45:00.000000000","message":"Please actually read what I wrote.\n\nI am saying that for all other resources/devices we do NOT have the compute manager, virt driver or resource tracker *create* the devices. Instead, for all devices on the compute node, we only *discover* devices that have been created by the deployer or administrator outside of the scope of Nova and the hypervisor.\n\nThe CONF.pci_passthrough_whitelist is a terrible CONF option, true. But the *point* of the configuration option is to inform the PCI device manager which *pre-created* devices are available for guests to consume.\n\nWhat I\u0027m suggesting here is that we do not change the fundamental logic and assumptions in nova-compute that assume devices have *already* been created and just need to be discovered and not \"created or programmed\" as this spec proposes.\n\nDo that creation and programming *outside of Nova*. Same with Cyborg, frankly. The whole \"I\u0027m going to change Nova to dynamically program free regions of an FPGA\" use case is similar to what is being proposed here, and I strongly am pushing back on that.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"df196c89f1598f1c5e038a45ddcb692583c0e985","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_f28d80d3","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"in_reply_to":"ffb9cba7_20c3c1ef","updated":"2019-04-24 05:46:32.000000000","message":"To all: OK, the namespaces will be pre-created by admin and discovered by nova compute agent. \nWhat concerns me now is how to properly and flexibly report the sizes of the namespaces. If a global conf option for the namespace naming pattern is going to be adopted (something like CONF.pmem_namespace_prefix), nova compute needs to discovered not only the namespaces but also the sizes of these namespaces. However the current `ndctl` tool can only report the namespace size in \u0027bytes\u0027 or in \u0027human readable\u0027 format (which is the largest convenient unit to choose). For example, \"size\":68717379584 is converted to 64GiB in human readable format.\n\nSo to allow more flexible Resource Class, I am suggesting to have a pmem config on each host in the format of a list of:\n    PMEM_NAMESPACE_NAME:$STRING\n\nWhile $STRING is normally the size in the admin preferred unit, be it xGB, yMB or zB, it also could be something denoting string like \"SMALL\", \"MEDIUM\" or \"LARGE\". Resource Class names are created as CUSTOM_PMEM_NAMESPACE_$STRING.\nAnd flavors would be created accordingly, for example:\n    hw.numa_pmem0.0\u003d64G\n    or\n    hw.numa_pmem0.1\u003dLARGE\nIt is all up to the admin to properly design the granularity.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_d2940deb","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"in_reply_to":"ffb9cba7_bcf7f2f5","updated":"2019-04-23 13:39:26.000000000","message":"nit technically for vGPUs we create the mdevs when the agent start but i agree with jay that it would be better to have the operator precreate the namespaces and have  nova just report what it discovered.\n\nwe will still need some kind of whitelist to allow some namespaces to be used on the host and not report them to placement but that should be purly filtering with no other implications.\n\non the cyborg front dynamic allocaiton of Pmem namespace could be delegated to cyborg at some point in the future if the static management is not enough.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"fae3d66e71f587eaa3f87b61b6b644aca6dfd032","unresolved":false,"context_lines":[{"line_number":115,"context_line":"fragmentation and also a challenge to track PMEM resource."},{"line_number":116,"context_line":"The proposed approach is to use fix sized namespaces. In other words,"},{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_20c3c1ef","line":119,"range":{"start_line":118,"start_character":39,"end_line":119,"end_character":50},"in_reply_to":"ffb9cba7_d2940deb","updated":"2019-04-24 04:39:57.000000000","message":"My colleague has concern on adding the burden of the operator, they hope the operator can get a working Nova out-of-box. I totatlly understand that concern. But I still on the side of moving that hardware configuration out-of-nova, and push to the deployment stage, and we can do more flexible configuration on that side. In nova, it is really hard to handle a lot of corner case of configuring those pmem.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"bfbc7c1120153b37475c0b3ffc2478a277902d38","unresolved":false,"context_lines":[{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"},{"line_number":123,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_6497be3c","line":120,"updated":"2019-03-19 17:08:04.000000000","message":"It\u0027s clear to me that the virt driver will be responsible for processing the config to inform the resource provider inventories; but does anything actually need to be done on the system to carve out the namespaces at the same time?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"},{"line_number":123,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_3479d6fb","line":120,"in_reply_to":"5fc1f717_1c13e502","updated":"2019-04-23 13:39:26.000000000","message":"i think the only thing the virt dirver needs to process is a listing of what namespaces may be provided to the vms.\n\nwe can do that as a blacklist or whitelist model but\nanything beyond that is complexity we dont need.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"},{"line_number":123,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_1c13e502","line":120,"in_reply_to":"5fc1f717_33f7b5d7","updated":"2019-04-10 21:17:15.000000000","message":"-1 to this, see my note above. I\u0027d much much the operator be responsible for carving out the namespaces themselves outside the context of the virt driver and then just let libvirt discover those namespaces.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c78bd589e59aebe49dbb9a65c6fd6053752ba8fa","unresolved":false,"context_lines":[{"line_number":117,"context_line":"cloud admin can define a list of sizes for the namespaces to be created"},{"line_number":118,"context_line":"on a certain host in nova config file. Nova compute agent will create"},{"line_number":119,"context_line":"the namespaces of the specified sizes accordingly. Please refer to the"},{"line_number":120,"context_line":"`libvirt driver implementation spec`_ for implementation details."},{"line_number":121,"context_line":""},{"line_number":122,"context_line":"PMEM Resource Tracking"},{"line_number":123,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_33f7b5d7","line":120,"in_reply_to":"5fc1f717_6497be3c","updated":"2019-03-20 02:55:23.000000000","message":"Yes, the namespaces need to be created by the `ndctl` command. The admin just has to specify the sizes of the namespaces, libvirt driver will create the namespaces accordingly.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":131,"context_line":" `size` being the PMEM namespace size of this Resource Class."},{"line_number":132,"context_line":""},{"line_number":133,"context_line":"The reason of not adopting measurement units other than Megabytes is"},{"line_number":134,"context_line":"that different units do not convert freely from one to another while embeded"},{"line_number":135,"context_line":"in Custom Resource Classe names. Meaning a request for a 128GB PMEM"},{"line_number":136,"context_line":"namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB resource, but"},{"line_number":137,"context_line":"can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB resource even"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_bcbc11dc","line":134,"range":{"start_line":134,"start_character":69,"end_line":134,"end_character":76},"updated":"2019-04-10 21:17:15.000000000","message":"embedded","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":135,"context_line":"in Custom Resource Classe names. Meaning a request for a 128GB PMEM"},{"line_number":136,"context_line":"namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB resource, but"},{"line_number":137,"context_line":"can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB resource even"},{"line_number":138,"context_line":"though they are of the same quantity."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Each compute node has the fixed sizes for the PMEM namespaces in its"},{"line_number":141,"context_line":"config file. The sizes are in the format of [size]MB. The Custom Resource"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_5c9e4d54","line":138,"updated":"2019-04-10 21:17:15.000000000","message":"I support the use of custom resource classes for the differently-sized PMEM namespaces.\n\nA slightly different approach, though, would be to have a nova CONF option that mapped a custom resource class *name* to a size, in bytes, of the namespace.\n\nFor example, consider an operator who creates two resource classes:\n\n CUSTOM_PMEM_SMALL\n CUSTOM_PMEM_LARGE\n\nand a CONF option that looks like this:\n\n pmem_namespace_resource_class_size_map \u003d CUSTOM_PMEM_SMALL:10737418240,CUSTOM_PMEM_LARGE:53687091200\n\nsmall \u003d\u003d 10G and large \u003d\u003d 50G\n\nJust a suggestion, of course. This kind of aligns with the PCI device aliasing thing we support in the CONF file.\n\nYou could also have resource classes CUSTOM_PMEM_NAMESPACE_256MB and CUSTOM_PMEM_NAMESPACE_512GB if you wanted and not worry about the whole issue with mismatched units in the resource class name. :)","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"60c9050e3ab00ccf21e78523378e1d9ef0c64f2b","unresolved":false,"context_lines":[{"line_number":135,"context_line":"in Custom Resource Classe names. Meaning a request for a 128GB PMEM"},{"line_number":136,"context_line":"namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB resource, but"},{"line_number":137,"context_line":"can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB resource even"},{"line_number":138,"context_line":"though they are of the same quantity."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Each compute node has the fixed sizes for the PMEM namespaces in its"},{"line_number":141,"context_line":"config file. The sizes are in the format of [size]MB. The Custom Resource"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_c62c5e49","line":138,"in_reply_to":"5fc1f717_5c9e4d54","updated":"2019-04-22 21:55:08.000000000","message":"\u003e pmem_namespace_resource_class_size_map \u003d CUSTOM_PMEM_SMALL:10737418240,CUSTOM_PMEM_LARGE:53687091200\n\nEw\n\n \u003e This kind of aligns with the PCI\n \u003e device aliasing thing we support in the CONF file.\n\nOy, again with this. PCI alias should not be used as an example to be followed.\n\n \u003e You could also have resource classes CUSTOM_PMEM_NAMESPACE_256MB\n \u003e and CUSTOM_PMEM_NAMESPACE_512GB if you wanted and not worry about\n \u003e the whole issue with mismatched units in the resource class name.\n \u003e :)\n\nHow would you figure out which is which? Maintain a hardcoded mapping?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"fae3d66e71f587eaa3f87b61b6b644aca6dfd032","unresolved":false,"context_lines":[{"line_number":135,"context_line":"in Custom Resource Classe names. Meaning a request for a 128GB PMEM"},{"line_number":136,"context_line":"namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB resource, but"},{"line_number":137,"context_line":"can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB resource even"},{"line_number":138,"context_line":"though they are of the same quantity."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Each compute node has the fixed sizes for the PMEM namespaces in its"},{"line_number":141,"context_line":"config file. The sizes are in the format of [size]MB. The Custom Resource"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_607d39b0","line":138,"in_reply_to":"ffb9cba7_1cab9e1d","updated":"2019-04-24 04:39:57.000000000","message":"I leave this to Rui, we have go through this. I think he can summary this better. :)","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"3a67cf83cf35c41039a97c1d3f2521daf612aa53","unresolved":false,"context_lines":[{"line_number":135,"context_line":"in Custom Resource Classe names. Meaning a request for a 128GB PMEM"},{"line_number":136,"context_line":"namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB resource, but"},{"line_number":137,"context_line":"can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB resource even"},{"line_number":138,"context_line":"though they are of the same quantity."},{"line_number":139,"context_line":""},{"line_number":140,"context_line":"Each compute node has the fixed sizes for the PMEM namespaces in its"},{"line_number":141,"context_line":"config file. The sizes are in the format of [size]MB. The Custom Resource"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_1cab9e1d","line":138,"in_reply_to":"ffb9cba7_c62c5e49","updated":"2019-04-23 11:45:00.000000000","message":"\u003e \u003e pmem_namespace_resource_class_size_map \u003d CUSTOM_PMEM_SMALL:10737418240,CUSTOM_PMEM_LARGE:53687091200\n \u003e \n \u003e Ew\n\nWell, given Nova has one way of configuring things, it may be eww, but until a YAML-based configuration system is in place, it is what we have...\n\n \u003e \u003e This kind of aligns with the PCI\n \u003e \u003e device aliasing thing we support in the CONF file.\n \u003e \n \u003e Oy, again with this. PCI alias should not be used as an example to\n \u003e be followed.\n\nOy yourself. It\u0027s the pci_passthrough_whitelist CONF option that is primarily awful. The alias\u0027ing this just allows a simple string to be used to refer to the complicated set of device tags, PCI address regexes and vendor identifiers. \\\n\nNone of those things are relevant here, though. It\u0027s just saying \"here\u0027s an option that you can use to map a custom resource class name to a specific size in bytes for a PMEM namespace.\n\n \u003e \u003e You could also have resource classes CUSTOM_PMEM_NAMESPACE_256MB\n \u003e \u003e and CUSTOM_PMEM_NAMESPACE_512GB if you wanted and not worry about\n \u003e \u003e the whole issue with mismatched units in the resource class name.\n \u003e \u003e :)\n \u003e \n \u003e How would you figure out which is which? Maintain a hardcoded\n \u003e mapping?\n\nBy name? \n\npmem_namespace_resource_class_size_map \u003d CUSTOM_PMEM_NAMESPACE_256MB:268435456,CUSTOM_PMEM_NAMESPACE_512GB:549755813888\n\nThe point is to not encode multiple bits of information (size, unit size and name) in a single string. Instead, just use a string name for the resource class and have a map that points to the size, in bytes, of the PMEM namespace that would be created by that custom resource class.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":145,"context_line":"size configuration."},{"line_number":146,"context_line":""},{"line_number":147,"context_line":"The \u0027total\u0027 value of each Custom Resource Class is the *number* of the"},{"line_number":148,"context_line":"PMEM namespaces belong to this Resource Class."},{"line_number":149,"context_line":""},{"line_number":150,"context_line":"The \u0027max_unit\u0027 would be set to the same value as \u0027total\u0027 since it is"},{"line_number":151,"context_line":"possible to attach all of the PMEM namespaces in a certain Resource"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_8ce5473b","line":148,"updated":"2019-04-10 21:17:15.000000000","message":"✔","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":149,"context_line":""},{"line_number":150,"context_line":"The \u0027max_unit\u0027 would be set to the same value as \u0027total\u0027 since it is"},{"line_number":151,"context_line":"possible to attach all of the PMEM namespaces in a certain Resource"},{"line_number":152,"context_line":"class to one instance."},{"line_number":153,"context_line":""},{"line_number":154,"context_line":"The values of \u0027min_unit\u0027 and \u0027step_size\u0027 would be 1."},{"line_number":155,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_2cd0d350","line":152,"updated":"2019-04-10 21:17:15.000000000","message":"✔","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":153,"context_line":""},{"line_number":154,"context_line":"The values of \u0027min_unit\u0027 and \u0027step_size\u0027 would be 1."},{"line_number":155,"context_line":""},{"line_number":156,"context_line":"The value of \u0027allocation_ratio\u0027 would be 1.0."},{"line_number":157,"context_line":""},{"line_number":158,"context_line":"To give an example, say there are 8 PMEM namespaces of size 128GB,"},{"line_number":159,"context_line":"4 PMEM namespaces of size 256GB in a system. The response to a `GET`"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_ecc16b82","line":156,"updated":"2019-04-10 21:17:15.000000000","message":"✔","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":185,"context_line":"iteration without NUMA resource providers, they are put directly into"},{"line_number":186,"context_line":"the compute node ROOT resource provider. In future, the movement to NUMA"},{"line_number":187,"context_line":"resource providers shall be done by the methodology exposed by the"},{"line_number":188,"context_line":"placement \u0027/reshaper\u0027 API."},{"line_number":189,"context_line":""},{"line_number":190,"context_line":"PMEM NUMA affinity information is added to the `NUMACell` object to be"},{"line_number":191,"context_line":"used by the numa_topology filter. Please refer to the `Data model impact`_"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_8c6c679d","line":188,"updated":"2019-04-10 21:17:15.000000000","message":"✔","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":193,"context_line":""},{"line_number":194,"context_line":"The existing `InstanceNUMACell` object is also extended to carry information"},{"line_number":195,"context_line":"on PMEM namespace assignment. Please refer to `Data model impact`_"},{"line_number":196,"context_line":"for details."},{"line_number":197,"context_line":""},{"line_number":198,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":199,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_4cd2ff36","line":196,"updated":"2019-04-10 21:17:15.000000000","message":"what about non-NUMA systems or systems that don\u0027t care about NUMA?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":193,"context_line":""},{"line_number":194,"context_line":"The existing `InstanceNUMACell` object is also extended to carry information"},{"line_number":195,"context_line":"on PMEM namespace assignment. Please refer to `Data model impact`_"},{"line_number":196,"context_line":"for details."},{"line_number":197,"context_line":""},{"line_number":198,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":199,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_b45f0640","line":196,"in_reply_to":"3fce034c_4cd2ff36","updated":"2019-04-23 13:39:26.000000000","message":"from a resouce track point of view all systems will have at least one host numa cell but for guests. given the\nguests will be acessing the pmem via memory mapped io\ni think we woudl would want this to be numa affiend.\n\nthat said i hate the fact we invent a numa topology fo 1 when you enable hugepages or cpu pinning so min not really a fan of making it implicit.\n\nso i personaly would be fine with saying this will only be supporte with numa guests.\n\ni can live with \nhw:numa_pmem implying hw:numa_node\u003d1 since numa is in the name so it kind of obvious but i would be happier requireing the you spefiy hw:numa_node\u003d1 expcltily.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"fae3d66e71f587eaa3f87b61b6b644aca6dfd032","unresolved":false,"context_lines":[{"line_number":193,"context_line":""},{"line_number":194,"context_line":"The existing `InstanceNUMACell` object is also extended to carry information"},{"line_number":195,"context_line":"on PMEM namespace assignment. Please refer to `Data model impact`_"},{"line_number":196,"context_line":"for details."},{"line_number":197,"context_line":""},{"line_number":198,"context_line":"Virtual PMEM guest NUMA topology"},{"line_number":199,"context_line":"-----------------------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_805f0d40","line":196,"in_reply_to":"ffb9cba7_b45f0640","updated":"2019-04-24 04:39:57.000000000","message":"Actually 1 numa node guest, it already means you don\u0027t care about the NUMA. \n\nSpecial for 1 numa node guest on 1 numa node host, actually, it is no different with the VM no any NUMA topo defined in flavor.\n\nIf you concern the complex for the end user, we can allow the user only to write one \u0027hw:numa_pmem\u003d512GB\u0027 in the extra spec. Then we will create 1 numa node guest underneath for him. This just same as we only write \u0027hw:cpu_policy\u003ddedicated\u0027 in the extra spec today, it will create 1 numa node for the guest underneath also.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"bfbc7c1120153b37475c0b3ffc2478a277902d38","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_c4ec722b","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"updated":"2019-03-19 17:08:04.000000000","message":"I don\u0027t think this is a good idea. The complexity of having to calculate the distribution outweighs the minor convenience to the operator.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"2fbfbecfcb8d6b0b6b195840e0ca046f79887f92","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_61e81c16","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"3fce034c_216f74c6","updated":"2019-04-11 03:09:51.000000000","message":"One more point, the libvirt is asking for filling the virtual pmem should be in which guest numa node. so we have to specify the vpmem in which numa node in the begining.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"3a67cf83cf35c41039a97c1d3f2521daf612aa53","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_5c5fd640","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"3fce034c_61e81c16","updated":"2019-04-23 11:45:00.000000000","message":"Why are we requiring the use of the NUMATopology and InstanceNUMATopology objects to store this PMEM information? Why not put it in its own object? Is this just so we don\u0027t need to add another database field to the instance_extra table?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e93a8772cfcf5e03ce806f49e0d904115b4a7a6e","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_216f74c6","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"3fce034c_ac73634d","updated":"2019-04-11 02:58:42.000000000","message":"if you just add one extra spec \u0027hw:numa_pmem\u003d1024GB\u0027, but in the underneath, we will create an instance with one numa node, and in that numa node, we have 1024GB pmem.\n\nThis follows the pattern we have today, when only have one extra spec \u0027hw:cpu_policy\u003ddedicated\u0027, then in the underneath, we create an instance with one numa node.\n\nI think we didn\u0027t mean to bind to the pmem with NUMA, it is how nova works today, the NUMA topo objs is the only place to track the instance cpu topo stuff.\n\nFrom the usecase side to say, when the people want dedicated CPU or the pmem in their instance, they are must looking for performance, then the NUMA affinity should be there, otherwise what is point give them those performance stuff but without NUMA affintiy.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"27f445195a2c6d580256ffc5a3b4785dc26e85fc","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_549a666e","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"5fc1f717_5390414f","updated":"2019-03-20 08:07:15.000000000","message":"i agree with rui zang.\nwe should keep this.\nfrom my past experience most people that use\nor attempt to use the hw:numa_mem.X or hw:numa_cpu.X extra spec dont reason about them correctly when planning there available capasity as a result it almost always results in memory or cpu fragmentation and leads to under utiliation of the cloud.\n\nsimply setting hw:numa_pmem \u003d 1024GB and deviding the pmem evenly across the numa nodes will almost always be the right thing to do as it has been for memory and cpus todate.\n\nif we were to remove anything i would drop the more explict \nhw:numa_pmem.X.Y syntax as i think people will contiue to mess up there capasity planning if we provide it that siad it enables limited uscases that might otherwise not work.\n\neffectivly you should only use the hw:numa_pmem.X.Y syntax\nif you have already used the hw:numa_mem.X and/or hw:numa_cpu.X syntax to create an imblanced numa topology already. in which case you may need to allcoated more PMEM to the node with the larger allcotion of cpus or the smaller/larger allocation of memory depending on your workload","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e61fc182a22891c820f35fc13d4ccb775c216e2c","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_cb58edfb","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"5fc1f717_549a666e","updated":"2019-03-20 11:49:38.000000000","message":"Okay, I didn\u0027t have that context, thanks for the education.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c78bd589e59aebe49dbb9a65c6fd6053752ba8fa","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_5390414f","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"5fc1f717_c4ec722b","updated":"2019-03-20 02:55:23.000000000","message":"Well, I agree it is complex. However it just follows the existing guest huma topology conventions.\n\nhttps://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/virt-driver-numa-placement.html\n\nQuote: \"The most common case will be that the admin only sets hw:numa_nodes and then the flavor vCPUs and memory will be divided equally across the NUMA nodes. \"","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_ac73634d","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"5fc1f717_cb58edfb","updated":"2019-04-10 21:17:15.000000000","message":"What about systems that don\u0027t care about NUMA at all? How would the flavor request some amount of PMEM namespaces? Normally that would be by creating a flavor extra spec like so:\n\n resources:CUSTOM_PMEM_NAMESPACE_20000MB\u003d2\n\nbut it seems that with this setup, there\u0027s no way of using PMEM unless the user also specifies NUMA stuff. :(","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"fae3d66e71f587eaa3f87b61b6b644aca6dfd032","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_602bf993","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"ffb9cba7_3728f884","updated":"2019-04-24 04:39:57.000000000","message":"For the VM, yes, we can put those in another object and in instance_extra table. We also can have NUMA affinity info in that new obj. But I\u0027m not clear why we need a new object? Let me know, I\u0027m happy to change that if that is better for us.\n\nFor the host, yes, we also can have own object instead of NUMATopology. Just like pci stuff probably, also have a field about the numa node mapping in new object. But yes, the same question I\u0027m not clear why we do that. Let me know also.\n\nAlso another point, when people want the pmem, I guess they are purchase high performance, so...I\u0027m confuse on whether we should care about the case of no numa affinity.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":221,"context_line":"     hw:numa_pmem.1.0 \u003d 512GB"},{"line_number":222,"context_line":""},{"line_number":223,"context_line":"     Below notation has the same semantics as the above:"},{"line_number":224,"context_line":"     hw:numa_nodes\u003d2"},{"line_number":225,"context_line":"     hw:numa_pmem \u003d 1024GB"},{"line_number":226,"context_line":""},{"line_number":227,"context_line":" Two NUMA nodes, each node has two 512GB vPMEM:"},{"line_number":228,"context_line":"     hw:numa_nodes\u003d2"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_3728f884","line":225,"range":{"start_line":224,"start_character":0,"end_line":225,"end_character":26},"in_reply_to":"ffb9cba7_5c5fd640","updated":"2019-04-23 13:39:26.000000000","message":"we could could put it in its own object but i would be a bit concerned by the latency and bandwith implciaton of pmem from a remote numa node.\n\nif we assume/require that only numa guests can use pmem then i think its oke to reuse the object. if we want to ever support non numa affiend guest the a seperate object for the instance might be best.\n\nfor the host i would still proably put in in the hostCell object.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":256,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Specifying virtual PMEM guest NUMA topology in image metadata will not"},{"line_number":259,"context_line":"be supported for this iteration."},{"line_number":260,"context_line":""},{"line_number":261,"context_line":"Virtual PMEM Disposal"},{"line_number":262,"context_line":"---------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_0cb157e2","line":259,"updated":"2019-04-10 21:17:15.000000000","message":"I have to say, I was actually supporting this spec pretty much 100% up until the NUMA stuff. :( NUMA support has just added so much complexity and weirdness to Nova that to this day, I still question whether it was all worth it...","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e93a8772cfcf5e03ce806f49e0d904115b4a7a6e","unresolved":false,"context_lines":[{"line_number":256,"context_line":"scheduling decisions can be made to ensure least host cross NUMA accesses."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Specifying virtual PMEM guest NUMA topology in image metadata will not"},{"line_number":259,"context_line":"be supported for this iteration."},{"line_number":260,"context_line":""},{"line_number":261,"context_line":"Virtual PMEM Disposal"},{"line_number":262,"context_line":"---------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_a16284d7","line":259,"in_reply_to":"3fce034c_0cb157e2","updated":"2019-04-11 02:58:42.000000000","message":"reply at above","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":262,"context_line":"---------------------"},{"line_number":263,"context_line":"Due to the persistent nature of host PMEM namespaces, the content"},{"line_number":264,"context_line":"of vPMEM in guest will be zeroed out immediately once the vPMEM is no longer"},{"line_number":265,"context_line":"associated with a VM instance. Otherwise there will be security concerns."},{"line_number":266,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":267,"context_line":"a performance penalty to guest deletion or any other actions involving"},{"line_number":268,"context_line":"erasing PMEM namespaces."}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_4cdc3f18","line":265,"updated":"2019-04-10 21:17:15.000000000","message":"✔","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":265,"context_line":"associated with a VM instance. Otherwise there will be security concerns."},{"line_number":266,"context_line":"Since PMEM devices are typically of large size, this may introduce"},{"line_number":267,"context_line":"a performance penalty to guest deletion or any other actions involving"},{"line_number":268,"context_line":"erasing PMEM namespaces."},{"line_number":269,"context_line":""},{"line_number":270,"context_line":"VM resize"},{"line_number":271,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_cccfcf55","line":268,"updated":"2019-04-10 21:17:15.000000000","message":"well, deletion is done asynchronously so it\u0027s not like the user is sitting there waiting on a blocking call for the VM to terminate :) so, I think we\u0027re ok on this one.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"bfbc7c1120153b37475c0b3ffc2478a277902d38","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_44c16280","line":284,"updated":"2019-03-19 17:08:04.000000000","message":"If false, what happens? I come up and my \"persistent\" storage is now blank?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"5bde632c645fe2f6e030cbb1dd16e318e176a643","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_e92f7985","line":284,"in_reply_to":"3fce034c_b2ef38af","updated":"2019-04-19 04:09:17.000000000","message":"\u003e A few points...\n \u003e \n \u003e 1) For ANY resize operation, if the new flavor has *fewer* amount\n \u003e of PMEM namespace resources associated with it, I think the API\n \u003e should return an error. Because otherwise we\u0027re going to need to\n \u003e pick which of the namespaces to destroy/zero out, and that\u0027s not\n \u003e possible to do.\n\nYes, that is what L274 and L275 wanted to say, your wording is more clear.\n\n \u003e \n \u003e 2) For resize to same host, I don\u0027t see any use in copying anything\n \u003e at all. If the new flavor has a greater amount of PMEM namespace\n \u003e resources associated with it, then one or more new blank PMEM\n \u003e namespaces should be connected to the resized VM.\n\nIt could not happen. There are basically two ways to make what you described happen:\n1) dynamically grow the existing namespace to the desired size -- blocked by using fixed sized namespaces. Even with dynamically allocated namespaces, there are possibilities that the namespace can not grow because of no adjacent free space.\n2) maintain a page table or LVM like mapping/grouping of scattered namespaces to a logical namespace. This is not how \"pmem\" type namespaces work. \"pmem\" type namespaces are contiguous.\n\n \u003e \n \u003e 3) For resize to another host, I don\u0027t see any use case for *not*\n \u003e copying all of the PMEM namespace contents to the other host. I\n \u003e mean, this is data after all. We don\u0027t give the user the option to\n \u003e throw away their data on block devices during a resize/migrate\n \u003e operation. I see no reason to allow something like this for this\n \u003e \"special\" type of block device data. If the user doesn\u0027t care about\n \u003e their data being lost, then they shouldn\u0027t resize/migrate. Just\n \u003e start up a new VM of the same flavor.\n\nOne reason to use persistent memory is to persist data cross reboots -- just like disk drives but faster. This reason justifies what you said. The data needs to be copied anyway. \nAnother reason is to persist data in case of accidental power failures so that data can be recovered.\nSome application fall in to the second category. For example data caches may exist in persistent memory. Upon power failure, the data in the cache that were not flushed out to volumes can be recovered. One instance being ceph RBD write cache https://github.com/ceph/ceph/pull/24066\nFor such applications, a graceful shutdown will flush data into volumes. There is no need to copy the pmem namespaces since the applications will re-construct what they had in the new VM.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_b2ef38af","line":284,"in_reply_to":"5fc1f717_0bd02250","updated":"2019-04-10 21:17:15.000000000","message":"A few points...\n\n1) For ANY resize operation, if the new flavor has *fewer* amount of PMEM namespace resources associated with it, I think the API should return an error. Because otherwise we\u0027re going to need to pick which of the namespaces to destroy/zero out, and that\u0027s not possible to do.\n\n2) For resize to same host, I don\u0027t see any use in copying anything at all. If the new flavor has a greater amount of PMEM namespace resources associated with it, then one or more new blank PMEM namespaces should be connected to the resized VM.\n\n3) For resize to another host, I don\u0027t see any use case for *not* copying all of the PMEM namespace contents to the other host. I mean, this is data after all. We don\u0027t give the user the option to throw away their data on block devices during a resize/migrate operation. I see no reason to allow something like this for this \"special\" type of block device data. If the user doesn\u0027t care about their data being lost, then they shouldn\u0027t resize/migrate. Just start up a new VM of the same flavor.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"63f49ac479a22035dc97ef9d907f814293d790f7","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_0bd02250","line":284,"in_reply_to":"5fc1f717_14093edc","updated":"2019-03-21 08:32:18.000000000","message":"I meant a reboot is a included step of resize :) If the application will treat the persistent memory as blank anyway upon startup, then the copy can be avoided.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c78bd589e59aebe49dbb9a65c6fd6053752ba8fa","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_d3e0b1cf","line":284,"in_reply_to":"5fc1f717_44c16280","updated":"2019-03-20 02:55:23.000000000","message":"If both the source are target set this flag to \u0027false\u0027 then the data in the source PMEM is not copied to the target PMEM, which means the target PMEM is in initial state (zeored out). Yes, blank.\nIt is the choice of the user. If the content of the PMEM is useless after the resize (reboot), then the time-consuming copy can be avoid by setting the flags.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"27f445195a2c6d580256ffc5a3b4785dc26e85fc","unresolved":false,"context_lines":[{"line_number":281,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":282,"context_line":""},{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_14093edc","line":284,"in_reply_to":"5fc1f717_d3e0b1cf","updated":"2019-03-20 08:07:15.000000000","message":"a reboot and a resize are very different things.\n\na reboot with hw:allow_pmem_copy\u003dfalse should not result in the data being erased.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"},{"line_number":288,"context_line":"Live migration with PMEM is supported by QEMU."},{"line_number":289,"context_line":"The live migration of instances with virtual PMEMs should work as is."},{"line_number":290,"context_line":"However since virtual PMEM is bound to instance NUMA topology, it should"},{"line_number":291,"context_line":"work after numa-aware-live-migration_ being done."},{"line_number":292,"context_line":""},{"line_number":293,"context_line":"Virtual PMEM Hotplug"},{"line_number":294,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_a66fd890","line":291,"range":{"start_line":286,"start_character":0,"end_line":291,"end_character":49},"updated":"2019-03-20 13:08:15.000000000","message":"am this is some what tangential but im just curious.\n\nbut other then the fact that it does not exist winch admittedly is a significant hurdle :) is there anything in principal  that would prevent pmem being used remotely via \nan NVMeoF style rdma over ethernet potocol in the future?\n\nit occurend to me as i re read this section that jsut liek the nvme over fabric cinder driver is a thing, while there would be a latency cost i wondered if remote PMEM devices exposed via cinder or cyborg could conceivable be a thing in the future?\n\none of the overheads with live migration with pmem devices \nas is the case with resieze is copying the pmem data\nunlink resize we cant disable that copying with\nhw:allow_pmem_copy\u003dtrue|false\nas it would break the application using the pmem resouce\nif it was whiped while it was running.\n\nso if live migration is to be supported with large pmem data set it occured to me that idealy you would want to not need to move the data which promted the previous question.\ndoes a remote pmem device make sense or does the latency and protocol over head that even an rdma based protocl would add mean it is nolonger useful in a 3 thired storage architecutre.\n\nif remote pmem had similar performance to NVMeoF then it would not make much sense.\n\nanyway this is not really germain to the spec but i was just wondering as transfering a multi TB PMEM instance as part of a live migration is proably going to fail, or fail to meet SLAs just because of how long it will take to do and the performace degradate teh guest will experince.\n\na 2TB pmem guest woudl take over 26mins to copy the pmem data over a 10Gb link or 4 and a half hours over a 1G link so larg pmem guests are not going to live migrate quickly.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"1e3fb2f2f8b96d547786a59ba68b3e8eaf35755c","unresolved":false,"context_lines":[{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"},{"line_number":288,"context_line":"Live migration with PMEM is supported by QEMU."},{"line_number":289,"context_line":"The live migration of instances with virtual PMEMs should work as is."},{"line_number":290,"context_line":"However since virtual PMEM is bound to instance NUMA topology, it should"},{"line_number":291,"context_line":"work after numa-aware-live-migration_ being done."},{"line_number":292,"context_line":""},{"line_number":293,"context_line":"Virtual PMEM Hotplug"},{"line_number":294,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_d21cfc6b","line":291,"range":{"start_line":286,"start_character":0,"end_line":291,"end_character":49},"in_reply_to":"3fce034c_d9f5c4ee","updated":"2019-04-24 04:43:51.000000000","message":"\u003e If the node01\u0027s namespaces have \"region0\" and \"region1\", and node02\n \u003e have \"region0\" and \"region2\", is this server can  be migrated?\n \u003e What should be the conditions for live migration? Is the host where\n \u003e the server is located have the same configuration? For example, the\n \u003e same NUMA topology, the same PMEM namespace setting (size or name)?\n\nIt will be ok, since we don\u0027t track the region inside nova, we only track the size of namespace, if we can same size namespace in the destination host, everything will be ok.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"},{"line_number":288,"context_line":"Live migration with PMEM is supported by QEMU."},{"line_number":289,"context_line":"The live migration of instances with virtual PMEMs should work as is."},{"line_number":290,"context_line":"However since virtual PMEM is bound to instance NUMA topology, it should"},{"line_number":291,"context_line":"work after numa-aware-live-migration_ being done."},{"line_number":292,"context_line":""},{"line_number":293,"context_line":"Virtual PMEM Hotplug"},{"line_number":294,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_d73ddcbb","line":291,"range":{"start_line":286,"start_character":0,"end_line":291,"end_character":49},"in_reply_to":"3fce034c_d9f5c4ee","updated":"2019-04-23 13:39:26.000000000","message":"you would have to update the libvirt xml on migration but we do this in other cases.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":26458,"name":"Brin Zhang","email":"zhangbailin@inspur.com","username":"zhangbailin"},"change_message_id":"2d334bf04f861deeec42ec27fca98f35bce57751","unresolved":false,"context_lines":[{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"},{"line_number":288,"context_line":"Live migration with PMEM is supported by QEMU."},{"line_number":289,"context_line":"The live migration of instances with virtual PMEMs should work as is."},{"line_number":290,"context_line":"However since virtual PMEM is bound to instance NUMA topology, it should"},{"line_number":291,"context_line":"work after numa-aware-live-migration_ being done."},{"line_number":292,"context_line":""},{"line_number":293,"context_line":"Virtual PMEM Hotplug"},{"line_number":294,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_d9f5c4ee","line":291,"range":{"start_line":286,"start_character":0,"end_line":291,"end_character":49},"in_reply_to":"5fc1f717_3cd68651","updated":"2019-04-18 06:10:12.000000000","message":"If the node01\u0027s namespaces have \"region0\" and \"region1\", and node02 have \"region0\" and \"region2\", is this server can  be migrated?\nWhat should be the conditions for live migration? Is the host where the server is located have the same configuration? For example, the same NUMA topology, the same PMEM namespace setting (size or name)?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"63f49ac479a22035dc97ef9d907f814293d790f7","unresolved":false,"context_lines":[{"line_number":283,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":284,"context_line":"data in virtual PMEM will be copied."},{"line_number":285,"context_line":""},{"line_number":286,"context_line":"Live Migration"},{"line_number":287,"context_line":"--------------"},{"line_number":288,"context_line":"Live migration with PMEM is supported by QEMU."},{"line_number":289,"context_line":"The live migration of instances with virtual PMEMs should work as is."},{"line_number":290,"context_line":"However since virtual PMEM is bound to instance NUMA topology, it should"},{"line_number":291,"context_line":"work after numa-aware-live-migration_ being done."},{"line_number":292,"context_line":""},{"line_number":293,"context_line":"Virtual PMEM Hotplug"},{"line_number":294,"context_line":"----------------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_3cd68651","line":291,"range":{"start_line":286,"start_character":0,"end_line":291,"end_character":49},"in_reply_to":"5fc1f717_a66fd890","updated":"2019-03-21 08:32:18.000000000","message":"This is an interesting topic :). There are some explorations on persistent memory RDMA (or Persistent Memory Over Fabric -PMoF) by various vendors. I believe the standard or production is still in the air. There are 3 major use cases of PMoF.\n1st, remove replication, meaning replicate local PMEM to remote. This can be helpful for the current live migration.\n2nd, Remote persistent memory access. I believe it is also acceptable for the live-migrated VM to access its original PMEM (which is remote after migration).\n3rd, shared persistent memory.\n\nI believe when their days come, there can be lots of improvements to the current software model.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":298,"context_line":"-----------"},{"line_number":299,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":300,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":301,"context_line":"In future, virtual PMEM images would be stored in glance as a separate"},{"line_number":302,"context_line":"image format. And flavor extra specs can be used to specify whether"},{"line_number":303,"context_line":"to save virtual PMEM image during VM snapshot."},{"line_number":304,"context_line":""},{"line_number":305,"context_line":"VM shelve/unshelve"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_527854f7","line":302,"range":{"start_line":301,"start_character":0,"end_line":302,"end_character":13},"updated":"2019-04-10 21:17:15.000000000","message":"out of curiosity, what separate image format would this be?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":298,"context_line":"-----------"},{"line_number":299,"context_line":"The current VM snapshots do not include memory images. For the current"},{"line_number":300,"context_line":"phase the virtual PMEM images will not be included in the VM snapshots."},{"line_number":301,"context_line":"In future, virtual PMEM images would be stored in glance as a separate"},{"line_number":302,"context_line":"image format. And flavor extra specs can be used to specify whether"},{"line_number":303,"context_line":"to save virtual PMEM image during VM snapshot."},{"line_number":304,"context_line":""},{"line_number":305,"context_line":"VM shelve/unshelve"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_f76fc0c2","line":302,"range":{"start_line":301,"start_character":0,"end_line":302,"end_character":13},"in_reply_to":"3fce034c_527854f7","updated":"2019-04-23 13:39:26.000000000","message":"raw?\n\nif you can map it on the host as a block deivce im sure you could dd it to a raw image or use qemu-iamge to create a qcow2.\n\nthere might be a more efficent way but i dont see why either of the above would not work in principal.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"bfbc7c1120153b37475c0b3ffc2478a277902d38","unresolved":false,"context_lines":[{"line_number":318,"context_line":" PMEM namespaces can be created/destroyed on the fly as VM"},{"line_number":319,"context_line":" creation/deletion."},{"line_number":320,"context_line":" This ways is more flexible than the fix sized approach, however will"},{"line_number":321,"context_line":" result in fragmentation as detailed in the \"Background\" section."},{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_c4d6b228","line":321,"updated":"2019-03-19 17:08:04.000000000","message":"The impact would be the potential for \"late fail\" of a spawn request. E.g. in the example in the Background section, a request for 150GB would be satisfied by Placement, but then the virt driver\u0027s spawn() method would raise a (reschedulable) exception.\n\nThinking through the ramifications, I grudgingly agree that the fragmentation would eventually become a problem, and solving it (defrag!) would be pretty hairy.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c78bd589e59aebe49dbb9a65c6fd6053752ba8fa","unresolved":false,"context_lines":[{"line_number":318,"context_line":" PMEM namespaces can be created/destroyed on the fly as VM"},{"line_number":319,"context_line":" creation/deletion."},{"line_number":320,"context_line":" This ways is more flexible than the fix sized approach, however will"},{"line_number":321,"context_line":" result in fragmentation as detailed in the \"Background\" section."},{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_335ed545","line":321,"in_reply_to":"5fc1f717_c4d6b228","updated":"2019-03-20 02:55:23.000000000","message":"Thank you!","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"bfbc7c1120153b37475c0b3ffc2478a277902d38","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_a45e06e7","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"updated":"2019-03-19 17:08:04.000000000","message":"Kind of wondering whether we really need this. Isn\u0027t it duplicating information we\u0027re putting in Placement?","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"19ee805087bc149bcd54c8cfb90a323dcb1f87da","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"ffb9cba7_b7dce874","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"in_reply_to":"3fce034c_7266b054","updated":"2019-04-23 13:39:26.000000000","message":"well PMEM is meory and in the nvdimm form factor literally plugs in to the same dimm slots as your ram and is accessed via memory mapped io.\n\nso form a numa perspecitive its identical to ram.\n\nyou can get optane memory in other form factors but i do not belive you can use it a pmem storage when connected over pcie.\n\nso i think of pmem as ram that does not loses it contents when you reboot. in that context why woudl it not have numa effects like all the other dimms beside it on the same bus?\n\nif we wanted to drop the numa struff in v1 of this we coudl but if people deploy this we will need to add numa affinity in U because the performance will suck without it.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_8b02e53d","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"in_reply_to":"5fc1f717_332335c1","updated":"2019-03-20 13:08:15.000000000","message":"moving numa to placement is exceedingly non trivial.\n\nit effect the modelin of almost every resoucne provider on a comptue node requiring a reshape and will effectivly require\nall instance going forward to have an implcit numa toplogy of 1 when a explcit numa topology is not specirfed.\n\nthis is a behavior change and prevent some usecase while fix some longstanding sharp edges.\n\nall that is to say i woudl not depend in numa in placemnt in Train and would instead extend the numa resouce tracker.\nto understand pmem. long term placement may be the answer but\nim increaingly thinking that numa in placement should be adressed in U unless we decide i a very high priorit for the train cycle. there is still a lot of ground work to do to make numa modelin in placement possible.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":7,"name":"Jay Pipes","email":"jaypipes@gmail.com","username":"jaypipes"},"change_message_id":"f2979f0c2452a3c98aea28e6fef632e03070bc7b","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"3fce034c_7266b054","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"in_reply_to":"5fc1f717_8b02e53d","updated":"2019-04-10 21:17:15.000000000","message":"I\u0027m still not sure why we\u0027re coupling PMEM namespaces with NUMA at all. We seem to be making the same coupling mistake that was made with CPU pinning and NUMA.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"be4235ce0f621c371239611b9677de041376e62e","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_bcc81a74","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"in_reply_to":"5fc1f717_8b02e53d","updated":"2019-03-22 08:06:42.000000000","message":"Thank you for the insights.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"c78bd589e59aebe49dbb9a65c6fd6053752ba8fa","unresolved":false,"context_lines":[{"line_number":322,"context_line":""},{"line_number":323,"context_line":"Data model impact"},{"line_number":324,"context_line":"-----------------"},{"line_number":325,"context_line":"A new PMENNamespace object is introduced to track each physical PMEM"},{"line_number":326,"context_line":"namespace::"},{"line_number":327,"context_line":""},{"line_number":328,"context_line":" +class PMEMNamespace(base.NovaObject,"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_332335c1","line":325,"range":{"start_line":325,"start_character":2,"end_line":325,"end_character":19},"in_reply_to":"5fc1f717_a45e06e7","updated":"2019-03-20 02:55:23.000000000","message":"NUMA topology hasn\u0027t been moved to placement yet. You are right that this information is not needed once numa topology made placement. Perhaps the removal of this should be in the same patch of numa topology\u0027s movement to placement.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":331,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":332,"context_line":" +"},{"line_number":333,"context_line":" +    fields \u003d {"},{"line_number":334,"context_line":" +        \u0027uuid\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":335,"context_line":" +        \u0027name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":336,"context_line":" +        \u0027region\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":337,"context_line":" +        \u0027dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":338,"context_line":" +        \u0027size_mb\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":339,"context_line":" +        \u0027alignment\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":340,"context_line":" +        \u0027assigned\u0027: fields.BooleanField(default\u003dFalse),"},{"line_number":341,"context_line":" +    }"},{"line_number":342,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_e6eac0aa","line":339,"range":{"start_line":334,"start_character":9,"end_line":339,"end_character":58},"updated":"2019-03-20 13:08:15.000000000","message":"nit im not sure these all need to be nullable.\n\ndoes it ever make sense to have a pmem device without a size or a uuid?\n\nwe could tweak this in the implementation so i dont want to rat hole on it too much but this will presumably be sotored in the existing numa topology blob so we will not be using this object to generate new dbtables. the nullable argument is only used oslo.db as you can have non nullable fields and still have them unset.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"be4235ce0f621c371239611b9677de041376e62e","unresolved":false,"context_lines":[{"line_number":331,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":332,"context_line":" +"},{"line_number":333,"context_line":" +    fields \u003d {"},{"line_number":334,"context_line":" +        \u0027uuid\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":335,"context_line":" +        \u0027name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":336,"context_line":" +        \u0027region\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":337,"context_line":" +        \u0027dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":338,"context_line":" +        \u0027size_mb\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":339,"context_line":" +        \u0027alignment\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":340,"context_line":" +        \u0027assigned\u0027: fields.BooleanField(default\u003dFalse),"},{"line_number":341,"context_line":" +    }"},{"line_number":342,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_1ce12efd","line":339,"range":{"start_line":334,"start_character":9,"end_line":339,"end_character":58},"in_reply_to":"5fc1f717_e6eac0aa","updated":"2019-03-22 08:06:42.000000000","message":"hmm，after revisiting the code, these values should not be nullable. Will be corrected in the next patchset.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":357,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":358,"context_line":" +"},{"line_number":359,"context_line":" +    fields \u003d {"},{"line_number":360,"context_line":" +        \u0027id\u0027: obj_fields.IntegerField(),"},{"line_number":361,"context_line":" +        \u0027size_mb\u0027: obj_fields.IntegerField(),"},{"line_number":362,"context_line":" +        \u0027backend_dev\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":363,"context_line":" +        \u0027backend_ns_uuid\u0027: obj_fields.StringField(default\u003dNone),"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_66efb0b0","line":360,"range":{"start_line":360,"start_character":10,"end_line":360,"end_character":42},"updated":"2019-03-20 13:08:15.000000000","message":"what does this id filed map too.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"be4235ce0f621c371239611b9677de041376e62e","unresolved":false,"context_lines":[{"line_number":357,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":358,"context_line":" +"},{"line_number":359,"context_line":" +    fields \u003d {"},{"line_number":360,"context_line":" +        \u0027id\u0027: obj_fields.IntegerField(),"},{"line_number":361,"context_line":" +        \u0027size_mb\u0027: obj_fields.IntegerField(),"},{"line_number":362,"context_line":" +        \u0027backend_dev\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":363,"context_line":" +        \u0027backend_ns_uuid\u0027: obj_fields.StringField(default\u003dNone),"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_9cd51e1e","line":360,"range":{"start_line":360,"start_character":10,"end_line":360,"end_character":42},"in_reply_to":"5fc1f717_66efb0b0","updated":"2019-03-22 08:06:42.000000000","message":"Good catch! This will be removed in the next patchset.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":360,"context_line":" +        \u0027id\u0027: obj_fields.IntegerField(),"},{"line_number":361,"context_line":" +        \u0027size_mb\u0027: obj_fields.IntegerField(),"},{"line_number":362,"context_line":" +        \u0027backend_dev\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":363,"context_line":" +        \u0027backend_ns_uuid\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":364,"context_line":" +    }"},{"line_number":365,"context_line":""},{"line_number":366,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_66d6d0f4","line":363,"range":{"start_line":363,"start_character":40,"end_line":363,"end_character":51},"updated":"2019-03-20 13:08:15.000000000","message":"we have uuid filed that we shoudl use for uuids...\n\nthe same comment applies to line 334 actully above.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"be4235ce0f621c371239611b9677de041376e62e","unresolved":false,"context_lines":[{"line_number":360,"context_line":" +        \u0027id\u0027: obj_fields.IntegerField(),"},{"line_number":361,"context_line":" +        \u0027size_mb\u0027: obj_fields.IntegerField(),"},{"line_number":362,"context_line":" +        \u0027backend_dev\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":363,"context_line":" +        \u0027backend_ns_uuid\u0027: obj_fields.StringField(default\u003dNone),"},{"line_number":364,"context_line":" +    }"},{"line_number":365,"context_line":""},{"line_number":366,"context_line":""}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_3cdc2a45","line":363,"range":{"start_line":363,"start_character":40,"end_line":363,"end_character":51},"in_reply_to":"5fc1f717_66d6d0f4","updated":"2019-03-22 08:06:42.000000000","message":"Yes, will change both to UUIDField","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"28b8914f155af4fb7da82859c371fae87e156b93","unresolved":false,"context_lines":[{"line_number":367,"context_line":"InstanceNUMACell object is updated with::"},{"line_number":368,"context_line":""},{"line_number":369,"context_line":" InstanceNUMACell"},{"line_number":370,"context_line":" +        \u0027virtual_pmems\u0027: obj_fields.ListOfObjectsField(\u0027VirtualPMEM\u0027,"},{"line_number":371,"context_line":" +            nullable\u003dTrue),"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":""},{"line_number":374,"context_line":"REST API impact"}],"source_content_type":"text/x-rst","patch_set":12,"id":"5fc1f717_e6d400c0","line":371,"range":{"start_line":370,"start_character":11,"end_line":371,"end_character":29},"updated":"2019-03-20 13:08:15.000000000","message":"this is stored in the instance extra table as a json blob also so not sure nullable\u003dTrue is need here either. i could be wrong but i think i have been told inthe past that this is not nedded for these objects.\n\nmaybe dan or jay can confrim or anyone that know the ovo/oslo.db interaction for that matter.","commit_id":"9b9d8895418ca408f12a1211e2ecfde35c27cb3c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":28,"context_line":"as the backend of virtual persistent memory. As far as Nova is concerned,"},{"line_number":29,"context_line":"several problems need to be addressed:"},{"line_number":30,"context_line":""},{"line_number":31,"context_line":" * How is the physical persistent memory managed and presented as"},{"line_number":32,"context_line":"   virtual persistent memory"},{"line_number":33,"context_line":" * The discovery and resource tracking of persistent memory"},{"line_number":34,"context_line":" * How does the user specify the desired amount of virtual persistent"},{"line_number":35,"context_line":"   memory"},{"line_number":36,"context_line":" * What is the life cycle of virtual persistent memory"},{"line_number":37,"context_line":""},{"line_number":38,"context_line":"Use Cases"},{"line_number":39,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_ba45b219","line":36,"range":{"start_line":31,"start_character":0,"end_line":36,"end_character":54},"updated":"2019-05-20 18:39:40.000000000","message":"nit: unindent this to make it render properly","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":28,"context_line":"as the backend of virtual persistent memory. As far as Nova is concerned,"},{"line_number":29,"context_line":"several problems need to be addressed:"},{"line_number":30,"context_line":""},{"line_number":31,"context_line":" * How is the physical persistent memory managed and presented as"},{"line_number":32,"context_line":"   virtual persistent memory"},{"line_number":33,"context_line":" * The discovery and resource tracking of persistent memory"},{"line_number":34,"context_line":" * How does the user specify the desired amount of virtual persistent"},{"line_number":35,"context_line":"   memory"},{"line_number":36,"context_line":" * What is the life cycle of virtual persistent memory"},{"line_number":37,"context_line":""},{"line_number":38,"context_line":"Use Cases"},{"line_number":39,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b8a1e948","line":36,"range":{"start_line":31,"start_character":0,"end_line":36,"end_character":54},"in_reply_to":"bfb3d3c7_ba45b219","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Besides data persistence, persistent memory is less expensive than DRAM"},{"line_number":44,"context_line":"and comes with much larger capacities. This is an appealing feature for"},{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fa648abb","line":45,"range":{"start_line":45,"start_character":23,"end_line":45,"end_character":34},"updated":"2019-05-20 18:39:40.000000000","message":"\u0027huge amounts\u0027 or \u0027a huge amount\u0027","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":42,"context_line":""},{"line_number":43,"context_line":"Besides data persistence, persistent memory is less expensive than DRAM"},{"line_number":44,"context_line":"and comes with much larger capacities. This is an appealing feature for"},{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_588a2dbf","line":45,"range":{"start_line":45,"start_character":23,"end_line":45,"end_character":34},"in_reply_to":"bfb3d3c7_fa648abb","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"},{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_9a59ceff","line":48,"range":{"start_line":48,"start_character":20,"end_line":48,"end_character":32},"updated":"2019-05-20 18:39:40.000000000","message":"exploration","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"},{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_5a735681","line":48,"range":{"start_line":48,"start_character":49,"end_line":48,"end_character":52},"updated":"2019-05-20 18:39:40.000000000","message":"which","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"},{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_388df9d2","line":48,"range":{"start_line":48,"start_character":49,"end_line":48,"end_character":52},"in_reply_to":"bfb3d3c7_5a735681","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":45,"context_line":"scenarios that request huge amount of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploriation by applications who heavily use memory"},{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d8bffd5e","line":48,"range":{"start_line":48,"start_character":20,"end_line":48,"end_character":32},"in_reply_to":"bfb3d3c7_9a59ceff","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":73,"context_line":"Persistent memory must be partitioned into `pmem namespaces`_ for"},{"line_number":74,"context_line":"applications to use. There are several modes of pmem namespaces for"},{"line_number":75,"context_line":"different use scenarios. Mode ``devdax`` and mode ``fsdax`` both"},{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_baaff292","line":76,"range":{"start_line":76,"start_character":55,"end_line":76,"end_character":64},"updated":"2019-05-20 18:39:40.000000000","message":"character","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":73,"context_line":"Persistent memory must be partitioned into `pmem namespaces`_ for"},{"line_number":74,"context_line":"applications to use. There are several modes of pmem namespaces for"},{"line_number":75,"context_line":"different use scenarios. Mode ``devdax`` and mode ``fsdax`` both"},{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b8ba496b","line":76,"range":{"start_line":76,"start_character":55,"end_line":76,"end_character":64},"in_reply_to":"bfb3d3c7_baaff292","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":74,"context_line":"applications to use. There are several modes of pmem namespaces for"},{"line_number":75,"context_line":"different use scenarios. Mode ``devdax`` and mode ``fsdax`` both"},{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1a9e3e63","line":77,"range":{"start_line":77,"start_character":11,"end_line":77,"end_character":13},"updated":"2019-05-20 18:39:40.000000000","message":"a","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":74,"context_line":"applications to use. There are several modes of pmem namespaces for"},{"line_number":75,"context_line":"different use scenarios. Mode ``devdax`` and mode ``fsdax`` both"},{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_58b30d99","line":77,"range":{"start_line":77,"start_character":11,"end_line":77,"end_character":13},"in_reply_to":"bfb3d3c7_1a9e3e63","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fa8eca2c","line":79,"range":{"start_line":79,"start_character":4,"end_line":79,"end_character":6},"updated":"2019-05-20 18:39:40.000000000","message":"a","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":76,"context_line":"support ``direct access``. Mode ``devdax`` gives out a charactor"},{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_38a65955","line":79,"range":{"start_line":79,"start_character":4,"end_line":79,"end_character":6},"in_reply_to":"bfb3d3c7_fa8eca2c","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_5a7d961a","line":80,"range":{"start_line":80,"start_character":7,"end_line":80,"end_character":15},"updated":"2019-05-20 18:39:40.000000000","message":"formatted","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":77,"context_line":"device for an namespace, thus applications can mmap() the entire"},{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out an block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formated. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7875b1ca","line":80,"range":{"start_line":80,"start_character":7,"end_line":80,"end_character":15},"in_reply_to":"bfb3d3c7_5a7d961a","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":141,"context_line":""},{"line_number":142,"context_line":"Nova compute agent reports persistent memory namespace resource to"},{"line_number":143,"context_line":"placement service. Custom Resource Classes are used to represent"},{"line_number":144,"context_line":"persistent memory namespace resource. The naming conversion of the"},{"line_number":145,"context_line":"Custom Resource Classes being used is::"},{"line_number":146,"context_line":""},{"line_number":147,"context_line":" CUSTOM_PMEM_NAMESPACE_$CAPACITY"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_dadc261a","line":144,"range":{"start_line":144,"start_character":49,"end_line":144,"end_character":59},"updated":"2019-05-20 18:39:40.000000000","message":"convention","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":141,"context_line":""},{"line_number":142,"context_line":"Nova compute agent reports persistent memory namespace resource to"},{"line_number":143,"context_line":"placement service. Custom Resource Classes are used to represent"},{"line_number":144,"context_line":"persistent memory namespace resource. The naming conversion of the"},{"line_number":145,"context_line":"Custom Resource Classes being used is::"},{"line_number":146,"context_line":""},{"line_number":147,"context_line":" CUSTOM_PMEM_NAMESPACE_$CAPACITY"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d1234155","line":144,"range":{"start_line":144,"start_character":49,"end_line":144,"end_character":59},"in_reply_to":"bfb3d3c7_dadc261a","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":219,"context_line":"Virtual persistent memory information is added to guest hardware flavor"},{"line_number":220,"context_line":"extra specs in the form of::"},{"line_number":221,"context_line":""},{"line_number":222,"context_line":" hw:pmem.[pmem index] \u003d capacity"},{"line_number":223,"context_line":" `capacity` is in the same set of values configured by admin for"},{"line_number":224,"context_line":" the capacities of physical persistent memory namespaces."},{"line_number":225,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_5a55964f","line":222,"range":{"start_line":222,"start_character":9,"end_line":222,"end_character":21},"updated":"2019-05-20 18:39:40.000000000","message":"this is pretty confusing, since \".$N\" is elsewhere associated with a NUMA node index. Could we instead consider something like:\n\n hw:pmem \u003d $capacity,$capacity,...","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b2582d517aabbb7b5da271bbe0cd65522f557230","unresolved":false,"context_lines":[{"line_number":219,"context_line":"Virtual persistent memory information is added to guest hardware flavor"},{"line_number":220,"context_line":"extra specs in the form of::"},{"line_number":221,"context_line":""},{"line_number":222,"context_line":" hw:pmem.[pmem index] \u003d capacity"},{"line_number":223,"context_line":" `capacity` is in the same set of values configured by admin for"},{"line_number":224,"context_line":" the capacities of physical persistent memory namespaces."},{"line_number":225,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_660ea733","line":222,"range":{"start_line":222,"start_character":9,"end_line":222,"end_character":21},"in_reply_to":"bfb3d3c7_17d6fbf2","updated":"2019-05-22 01:56:39.000000000","message":"\u003e \u003e The plan is when we have numa support in the future, we can\n \u003e expend\n \u003e \u003e this to hw:pmem.[pmem index] \u003d [list of capcity]\n \u003e \n \u003e You mean hw:pmem.[NUMA index] \u003d [list of capacity], right? The\n \u003e point being not to have .$N meaning two different things.\n\nSorry, I didn\u0027t what you mean abou two different things.\n\nSo the summary is:\n\nFor the guest has numa topology,  the \"hw:pmem \u003d [list of capacity]\" is actually \"hw:pmem.0 \u003d [list of capacity]\".\n\nFor the guest has no numa topolgoy, \"hw:pmem \u003d [ list of capacity]\" is used to support the guest without numa.\n\n \u003e \n \u003e (I see an earlier revision using two numbers representing\n \u003e ${NUMA].${PMEM}; but I\u0027m still in favor of leaving the ${PMEM} part\n \u003e off and instead enumerating the namespaces in list form in the\n \u003e value.)\n\nyes, we change that, since we need a way which is extenable to support numa next step.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e3c4634c564d15e7085328e1b6678a411bfbca9c","unresolved":false,"context_lines":[{"line_number":219,"context_line":"Virtual persistent memory information is added to guest hardware flavor"},{"line_number":220,"context_line":"extra specs in the form of::"},{"line_number":221,"context_line":""},{"line_number":222,"context_line":" hw:pmem.[pmem index] \u003d capacity"},{"line_number":223,"context_line":" `capacity` is in the same set of values configured by admin for"},{"line_number":224,"context_line":" the capacities of physical persistent memory namespaces."},{"line_number":225,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_17d6fbf2","line":222,"range":{"start_line":222,"start_character":9,"end_line":222,"end_character":21},"in_reply_to":"bfb3d3c7_1d606ea5","updated":"2019-05-21 19:35:45.000000000","message":"\u003e The plan is when we have numa support in the future, we can expend\n \u003e this to hw:pmem.[pmem index] \u003d [list of capcity]\n\nYou mean hw:pmem.[NUMA index] \u003d [list of capacity], right? The point being not to have .$N meaning two different things.\n\n(I see an earlier revision using two numbers representing ${NUMA].${PMEM}; but I\u0027m still in favor of leaving the ${PMEM} part off and instead enumerating the namespaces in list form in the value.)","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":219,"context_line":"Virtual persistent memory information is added to guest hardware flavor"},{"line_number":220,"context_line":"extra specs in the form of::"},{"line_number":221,"context_line":""},{"line_number":222,"context_line":" hw:pmem.[pmem index] \u003d capacity"},{"line_number":223,"context_line":" `capacity` is in the same set of values configured by admin for"},{"line_number":224,"context_line":" the capacities of physical persistent memory namespaces."},{"line_number":225,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_9105c916","line":222,"range":{"start_line":222,"start_character":9,"end_line":222,"end_character":21},"in_reply_to":"bfb3d3c7_5a55964f","updated":"2019-05-27 05:56:51.000000000","message":"Changed to hw:pmem \u003d $RCNAME[,$RCNAME]","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":219,"context_line":"Virtual persistent memory information is added to guest hardware flavor"},{"line_number":220,"context_line":"extra specs in the form of::"},{"line_number":221,"context_line":""},{"line_number":222,"context_line":" hw:pmem.[pmem index] \u003d capacity"},{"line_number":223,"context_line":" `capacity` is in the same set of values configured by admin for"},{"line_number":224,"context_line":" the capacities of physical persistent memory namespaces."},{"line_number":225,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1d606ea5","line":222,"range":{"start_line":222,"start_character":9,"end_line":222,"end_character":21},"in_reply_to":"bfb3d3c7_5a55964f","updated":"2019-05-21 06:47:08.000000000","message":"Yes, that what we thought after the PTG also, I guess Rui forget to update this.\n\nThe plan is when we have numa support in the future, we can expend this to hw:pmem.[pmem index] \u003d [list of capcity]\n\nIt will be great to explain this roadmap in the spec.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":225,"context_line":""},{"line_number":226,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":227,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":228,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":229,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":230,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":231,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":232,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7ad73ac1","line":229,"range":{"start_line":228,"start_character":28,"end_line":229,"end_character":40},"updated":"2019-05-20 18:39:40.000000000","message":"oh, yeah, especially given this, having hw:pmem.$N is going to blow operators\u0027 minds.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":227,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":228,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":229,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":230,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":231,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"Examples::"},{"line_number":234,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_bdf5a2cd","line":231,"range":{"start_line":230,"start_character":25,"end_line":231,"end_character":65},"updated":"2019-05-21 06:47:08.000000000","message":"maybe we should clear that, this is different with the guest which specifiy \u0027hw:numa_nodes\u003d1\u0027. This guest won\u0027t be scheduled to a host numa node. It is still a guest which vcpus are floating on all the pcpu on the host, we just create guest numa node for the guest.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":227,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":228,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":229,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":230,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":231,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":232,"context_line":""},{"line_number":233,"context_line":"Examples::"},{"line_number":234,"context_line":""}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b1024d0a","line":231,"range":{"start_line":230,"start_character":25,"end_line":231,"end_character":65},"in_reply_to":"bfb3d3c7_bdf5a2cd","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":236,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":237,"context_line":"     hw:pmem.0 \u003d 512GB"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":" One NUMA node, two 512GB virutal persistent memory:"},{"line_number":240,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":241,"context_line":"     hw:pmem.0 \u003d 512GB"},{"line_number":242,"context_line":"     hw:pmem.1 \u003d 512GB"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7a009a35","line":239,"range":{"start_line":239,"start_character":26,"end_line":239,"end_character":33},"updated":"2019-05-20 18:39:40.000000000","message":"virtual","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":236,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":237,"context_line":"     hw:pmem.0 \u003d 512GB"},{"line_number":238,"context_line":""},{"line_number":239,"context_line":" One NUMA node, two 512GB virutal persistent memory:"},{"line_number":240,"context_line":"     hw:numa_nodes \u003d 1"},{"line_number":241,"context_line":"     hw:pmem.0 \u003d 512GB"},{"line_number":242,"context_line":"     hw:pmem.1 \u003d 512GB"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_74060320","line":239,"range":{"start_line":239,"start_character":26,"end_line":239,"end_character":33},"in_reply_to":"bfb3d3c7_7a009a35","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":258,"context_line":""},{"line_number":259,"context_line":".. important ::"},{"line_number":260,"context_line":"    Qemu does not support backing one virtual persistent memory device"},{"line_number":261,"context_line":"    by multiple physical persistent memory namespaces, no matter they"},{"line_number":262,"context_line":"    are contiguous or not. So any virtual persistent memory device"},{"line_number":263,"context_line":"    requested by guests is backed by one physical persistent memory"},{"line_number":264,"context_line":"    namespace of the exact same capacity."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7a85faac","line":261,"range":{"start_line":261,"start_character":55,"end_line":261,"end_character":64},"updated":"2019-05-20 18:39:40.000000000","message":"no matter whether","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":258,"context_line":""},{"line_number":259,"context_line":".. important ::"},{"line_number":260,"context_line":"    Qemu does not support backing one virtual persistent memory device"},{"line_number":261,"context_line":"    by multiple physical persistent memory namespaces, no matter they"},{"line_number":262,"context_line":"    are contiguous or not. So any virtual persistent memory device"},{"line_number":263,"context_line":"    requested by guests is backed by one physical persistent memory"},{"line_number":264,"context_line":"    namespace of the exact same capacity."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1407871a","line":261,"range":{"start_line":261,"start_character":55,"end_line":261,"end_character":64},"in_reply_to":"bfb3d3c7_7a85faac","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":263,"context_line":"    requested by guests is backed by one physical persistent memory"},{"line_number":264,"context_line":"    namespace of the exact same capacity."},{"line_number":265,"context_line":""},{"line_number":266,"context_line":"The extra specs are translated to placement API requests accordlingly."},{"line_number":267,"context_line":""},{"line_number":268,"context_line":"Specifying virtual persistent memory in image metadata is not supported"},{"line_number":269,"context_line":"for this iteration."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fac24ae9","line":266,"range":{"start_line":266,"start_character":57,"end_line":266,"end_character":69},"updated":"2019-05-20 18:39:40.000000000","message":"accordingly","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":263,"context_line":"    requested by guests is backed by one physical persistent memory"},{"line_number":264,"context_line":"    namespace of the exact same capacity."},{"line_number":265,"context_line":""},{"line_number":266,"context_line":"The extra specs are translated to placement API requests accordlingly."},{"line_number":267,"context_line":""},{"line_number":268,"context_line":"Specifying virtual persistent memory in image metadata is not supported"},{"line_number":269,"context_line":"for this iteration."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_941a976c","line":266,"range":{"start_line":266,"start_character":57,"end_line":266,"end_character":69},"in_reply_to":"bfb3d3c7_fac24ae9","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d57877f9","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"updated":"2019-05-20 18:39:40.000000000","message":"I don\u0027t understand these two sentences. They seem to be contradictory.\n\nIf you reduce the number of namespaces, you\u0027ll suffer data loss.\n\nIf you increase the number of namespaces, or change them so that the overall total capacity is larger regardless of the number of namespaces, how are you going to match up which blocks go where? That seems like a nontrivial repacking algorithm.\n\nHere\u0027s an example of what I\u0027m talking about:\n\n- Old: hw:pmem \u003d 16MB,16MB\n- New: hw:pmem \u003d 8MB,16MB,8MB,8MB\n\nI\u0027m going from 32MB to 40MB, sure, but somebody has to figure out how to split two 16MB namespaces across the new ones.\n\nAnd assuming you can do this in a generic fashion, does the framework magically preserve the original memory addresses somehow?\n\n[Later] Oh, looking at L299-300, we\u0027re not worried about data loss? So disregard this comment - but L285-6 needs to be deleted?","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b4455b45","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"updated":"2019-05-27 05:56:51.000000000","message":"Yes, only support adding new devices","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"9a56deaa0cd6f7201405275533b4e6be5d7757d3","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fc1b0618","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"in_reply_to":"bfb3d3c7_21189973","updated":"2019-05-22 04:23:27.000000000","message":"\u003e \u003e \u003e hw:pmem\u003d16MB, or\n \u003e \u003e\n \u003e \u003e Sorry, this still doesn\u0027t make sense to me. Why was it okay to\n \u003e \u003e eliminate one of the original namespaces?\n \u003e \n \u003e I, Rui, and Luyao have talk about this again. Yes, that doesn\u0027t\n \u003e make senses. We don\u0027t want the user choice a wrong flavor (which\n \u003e removed one of namespace), then the user loses his data. So reduce\n \u003e the namespace is danger. we shoudn\u0027t support that.\n \u003e \n \u003e \u003e\n \u003e \u003e \u003e hw:pmem\u003d16MB, 16MB, 8MB\n \u003e \u003e\n \u003e \u003e Okay, and is there determinism to which of the original\n \u003e namespaces\n \u003e \u003e maps to which of the new ones? Like, would it be okay if the new\n \u003e \u003e one was\n \u003e \u003e\n \u003e \u003e hw:pmem \u003d 16MB,8MB,16MB\n \u003e \u003e\n \u003e \u003e So the middle one would be \"empty\"?\n \u003e \u003e\n \u003e \u003e And if you can do that kind of stuff, I don\u0027t understand why you\n \u003e \u003e couldn\u0027t divide a 16MB old namespace between two 8MB new\n \u003e \u003e namespaces.\n \u003e \n \u003e We respect the order of  [list of capcity], so mapping the new and\n \u003e old list by order.\n \u003e \n \u003e \u003e\n \u003e \u003e But my point is that the code to do this fitting has the\n \u003e potential\n \u003e \u003e to be either underpowered or overcomplicated. As such, I think it\n \u003e \u003e may be worth enumerating some of these examples and describing\n \u003e \u003e whether/how they are to be supported.\n \u003e \n \u003e nice point. so we can only support add more namespace, remove the\n \u003e case reduce number of namespace.\n \u003e \n \u003e For whether we can change an existed namespace to bigger namespace,\n \u003e I\u0027m not sure. We can support it, since whatever we need copy the\n \u003e data after migration, there should be no difference between copy to\n \u003e same size namepsace and copy to bigger size namespace.\n\nLuyao faces an new problem, she can’t compare the size by those “label”(for exampe, we don’t  know which one is bigger in 4GB and 1024MB) I think we can simple this resize, we only support to add new namespace.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b2582d517aabbb7b5da271bbe0cd65522f557230","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_21189973","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"in_reply_to":"bfb3d3c7_b7792ffd","updated":"2019-05-22 01:56:39.000000000","message":"\u003e \u003e hw:pmem\u003d16MB, or\n \u003e \n \u003e Sorry, this still doesn\u0027t make sense to me. Why was it okay to\n \u003e eliminate one of the original namespaces?\n\nI, Rui, and Luyao have talk about this again. Yes, that doesn\u0027t make senses. We don\u0027t want the user choice a wrong flavor (which removed one of namespace), then the user loses his data. So reduce the namespace is danger. we shoudn\u0027t support that.\n\n \u003e \n \u003e \u003e hw:pmem\u003d16MB, 16MB, 8MB\n \u003e \n \u003e Okay, and is there determinism to which of the original namespaces\n \u003e maps to which of the new ones? Like, would it be okay if the new\n \u003e one was\n \u003e \n \u003e hw:pmem \u003d 16MB,8MB,16MB\n \u003e \n \u003e So the middle one would be \"empty\"?\n \u003e \n \u003e And if you can do that kind of stuff, I don\u0027t understand why you\n \u003e couldn\u0027t divide a 16MB old namespace between two 8MB new\n \u003e namespaces.\n\nWe respect the order of  [list of capcity], so mapping the new and old list by order.\n\n \u003e \n \u003e But my point is that the code to do this fitting has the potential\n \u003e to be either underpowered or overcomplicated. As such, I think it\n \u003e may be worth enumerating some of these examples and describing\n \u003e whether/how they are to be supported.\n\nnice point. so we can only support add more namespace, remove the case reduce number of namespace.\n\nFor whether we can change an existed namespace to bigger namespace, I\u0027m not sure. We can support it, since whatever we need copy the data after migration, there should be no difference between copy to same size namepsace and copy to bigger size namespace.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fdba3adc","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"in_reply_to":"bfb3d3c7_d57877f9","updated":"2019-05-21 06:47:08.000000000","message":"For the example you have, the first namespace in the old flavor is 16MB, and in the new flavor is 8MB, this is the case we rejected. We can\u0027t migrate the 16MB data to the 8MB namespace.\n\nBut you can free to add one more namespace or remove one namespace. so the new flavor can be:\n\nhw:pmem\u003d16MB, or\nhw:pmem\u003d16MB, 16MB, 8MB","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e3c4634c564d15e7085328e1b6678a411bfbca9c","unresolved":false,"context_lines":[{"line_number":280,"context_line":""},{"line_number":281,"context_line":"VM resize"},{"line_number":282,"context_line":"---------"},{"line_number":283,"context_line":"Increase or decrease the number of virtual persistent memory devices"},{"line_number":284,"context_line":"of an instance is allowed."},{"line_number":285,"context_line":"As for a certain virtual persistent memory, only resizing to no less"},{"line_number":286,"context_line":"size is supported in order to avoid data loss."},{"line_number":287,"context_line":""},{"line_number":288,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":289,"context_line":"to the new virtual persistent memory (if there is). This could be time"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b7792ffd","line":286,"range":{"start_line":283,"start_character":0,"end_line":286,"end_character":46},"in_reply_to":"bfb3d3c7_fdba3adc","updated":"2019-05-21 19:35:45.000000000","message":"\u003e hw:pmem\u003d16MB, or\n\nSorry, this still doesn\u0027t make sense to me. Why was it okay to eliminate one of the original namespaces?\n\n \u003e hw:pmem\u003d16MB, 16MB, 8MB\n\nOkay, and is there determinism to which of the original namespaces maps to which of the new ones? Like, would it be okay if the new one was\n\n hw:pmem \u003d 16MB,8MB,16MB\n\nSo the middle one would be \"empty\"?\n\nAnd if you can do that kind of stuff, I don\u0027t understand why you couldn\u0027t divide a 16MB old namespace between two 8MB new namespaces.\n\nBut my point is that the code to do this fitting has the potential to be either underpowered or overcomplicated. As such, I think it may be worth enumerating some of these examples and describing whether/how they are to be supported.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":291,"context_line":""},{"line_number":292,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1a2e3ea5","line":294,"range":{"start_line":294,"start_character":52,"end_line":294,"end_character":58},"updated":"2019-05-20 18:39:40.000000000","message":"nit: ``double backticks`` (here and below)","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":291,"context_line":""},{"line_number":292,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b430fbe9","line":294,"range":{"start_line":294,"start_character":52,"end_line":294,"end_character":58},"in_reply_to":"bfb3d3c7_1a2e3ea5","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1da14eaa","line":296,"range":{"start_line":296,"start_character":0,"end_line":296,"end_character":64},"updated":"2019-05-21 06:47:08.000000000","message":"I\u0027m thinking of this, can we resize a namespace down when this case? should we support this or not.....","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_5a24b6c6","line":296,"range":{"start_line":296,"start_character":53,"end_line":296,"end_character":58},"updated":"2019-05-20 18:39:40.000000000","message":"false","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e3c4634c564d15e7085328e1b6678a411bfbca9c","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_f7e487ae","line":296,"range":{"start_line":296,"start_character":0,"end_line":296,"end_character":64},"in_reply_to":"bfb3d3c7_1da14eaa","updated":"2019-05-21 19:35:45.000000000","message":"Just so. This should be addressed.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7454c3ff","line":296,"range":{"start_line":296,"start_character":53,"end_line":296,"end_character":58},"in_reply_to":"bfb3d3c7_5a24b6c6","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b2582d517aabbb7b5da271bbe0cd65522f557230","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_413cadc7","line":296,"range":{"start_line":296,"start_character":0,"end_line":296,"end_character":64},"in_reply_to":"bfb3d3c7_f7e487ae","updated":"2019-05-22 01:56:39.000000000","message":"After discuss with Rui and Luyao, we only want to support resize up. let us remove all the cases of resize down.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":293,"context_line":""},{"line_number":294,"context_line":"If either the source or target has this flag set to `true`, the"},{"line_number":295,"context_line":"data in virtual persistent memory is copied."},{"line_number":296,"context_line":"If both the source and target have this flag set to `flase`, the"},{"line_number":297,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":298,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":299,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_145547fa","line":296,"range":{"start_line":296,"start_character":0,"end_line":296,"end_character":64},"in_reply_to":"bfb3d3c7_f7e487ae","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":334,"context_line":" Persisent memory namespaces can be created/destroyed on the fly as VM"},{"line_number":335,"context_line":" creation/deletion."},{"line_number":336,"context_line":" This ways is more flexible than the fix sized approach, however it will"},{"line_number":337,"context_line":" result in fragmentation as detailed in the `Background`_ section."},{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Data model impact"},{"line_number":340,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_10da9d16","line":337,"range":{"start_line":337,"start_character":44,"end_line":337,"end_character":57},"updated":"2019-05-20 18:39:40.000000000","message":"This doesn\u0027t link because you\u0027re in a code block. Is the indent intentional?","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":334,"context_line":" Persisent memory namespaces can be created/destroyed on the fly as VM"},{"line_number":335,"context_line":" creation/deletion."},{"line_number":336,"context_line":" This ways is more flexible than the fix sized approach, however it will"},{"line_number":337,"context_line":" result in fragmentation as detailed in the `Background`_ section."},{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Data model impact"},{"line_number":340,"context_line":"-----------------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_34816b4b","line":337,"range":{"start_line":337,"start_character":44,"end_line":337,"end_character":57},"in_reply_to":"bfb3d3c7_10da9d16","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":348,"context_line":" +"},{"line_number":349,"context_line":" +    fields \u003d {"},{"line_number":350,"context_line":" +        #\u0027capacity\u0027 is what is configured by admin."},{"line_number":351,"context_line":" +        \u0027capacity\u0027: fields.StringField(),"},{"line_number":352,"context_line":" +        #\u0027size\u0027 is the actual size in bytes of a pmem namespace,"},{"line_number":353,"context_line":" +        #it is needed by libvirt domain xml."},{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b4e1db2c","line":351,"range":{"start_line":351,"start_character":11,"end_line":351,"end_character":19},"updated":"2019-05-27 05:56:51.000000000","message":"Changed it to \u0027rc_name\u0027 representing resource class name","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":348,"context_line":" +"},{"line_number":349,"context_line":" +    fields \u003d {"},{"line_number":350,"context_line":" +        #\u0027capacity\u0027 is what is configured by admin."},{"line_number":351,"context_line":" +        \u0027capacity\u0027: fields.StringField(),"},{"line_number":352,"context_line":" +        #\u0027size\u0027 is the actual size in bytes of a pmem namespace,"},{"line_number":353,"context_line":" +        #it is needed by libvirt domain xml."},{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d013a5b7","line":351,"range":{"start_line":351,"start_character":10,"end_line":351,"end_character":20},"updated":"2019-05-20 18:39:40.000000000","message":"Perhaps this would be better called \u0027name\u0027 or \u0027label\u0027. The admin can choose what to put in here, and it\u0027s not always a capacity.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"b2582d517aabbb7b5da271bbe0cd65522f557230","unresolved":false,"context_lines":[{"line_number":348,"context_line":" +"},{"line_number":349,"context_line":" +    fields \u003d {"},{"line_number":350,"context_line":" +        #\u0027capacity\u0027 is what is configured by admin."},{"line_number":351,"context_line":" +        \u0027capacity\u0027: fields.StringField(),"},{"line_number":352,"context_line":" +        #\u0027size\u0027 is the actual size in bytes of a pmem namespace,"},{"line_number":353,"context_line":" +        #it is needed by libvirt domain xml."},{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_41410d39","line":351,"range":{"start_line":351,"start_character":10,"end_line":351,"end_character":20},"in_reply_to":"bfb3d3c7_b7628f3d","updated":"2019-05-22 01:56:39.000000000","message":"I\u0027m not sure this now. The user can write high_performance_large or low_performance_small.  In the future, we also can add traits to the RP. For example, allow the operator to configure trait in the config option. (CONF.libvirt.pmem_namespaces\u003d[name]:[capcity][traits]). But this will introduce some complexity, like we need to separate the namespace into different RPs since they have different traits.\n\nIf there isn\u0027t too much combination of high/low performance and size, it is ok for using this field as a label.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":348,"context_line":" +"},{"line_number":349,"context_line":" +    fields \u003d {"},{"line_number":350,"context_line":" +        #\u0027capacity\u0027 is what is configured by admin."},{"line_number":351,"context_line":" +        \u0027capacity\u0027: fields.StringField(),"},{"line_number":352,"context_line":" +        #\u0027size\u0027 is the actual size in bytes of a pmem namespace,"},{"line_number":353,"context_line":" +        #it is needed by libvirt domain xml."},{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_fd88fa1f","line":351,"range":{"start_line":351,"start_character":10,"end_line":351,"end_character":20},"in_reply_to":"bfb3d3c7_d013a5b7","updated":"2019-05-21 06:47:08.000000000","message":"we want them to put the capacity instead of \u0027high\u0027 or \u0027low\u0027 performance label. it sounds like the discussion of resource class name :)","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e3c4634c564d15e7085328e1b6678a411bfbca9c","unresolved":false,"context_lines":[{"line_number":348,"context_line":" +"},{"line_number":349,"context_line":" +    fields \u003d {"},{"line_number":350,"context_line":" +        #\u0027capacity\u0027 is what is configured by admin."},{"line_number":351,"context_line":" +        \u0027capacity\u0027: fields.StringField(),"},{"line_number":352,"context_line":" +        #\u0027size\u0027 is the actual size in bytes of a pmem namespace,"},{"line_number":353,"context_line":" +        #it is needed by libvirt domain xml."},{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b7628f3d","line":351,"range":{"start_line":351,"start_character":10,"end_line":351,"end_character":20},"in_reply_to":"bfb3d3c7_fd88fa1f","updated":"2019-05-21 19:35:45.000000000","message":"So you\u0027re trying to encourage (but not enforce) a particular usage pattern by giving this field a certain name?","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":355,"context_line":" +        \u0027ns_uuid\u0027: fields.UUIDField(nullable\u003dTrue),"},{"line_number":356,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":357,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":358,"context_line":" +    }"},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b004f17c","line":357,"updated":"2019-05-20 18:39:40.000000000","message":"why are these all nullable?","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e3c4634c564d15e7085328e1b6678a411bfbca9c","unresolved":false,"context_lines":[{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":355,"context_line":" +        \u0027ns_uuid\u0027: fields.UUIDField(nullable\u003dTrue),"},{"line_number":356,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":357,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":358,"context_line":" +    }"},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_b7c7ef12","line":357,"in_reply_to":"bfb3d3c7_9ddc5e75","updated":"2019-05-21 19:35:45.000000000","message":"Sorry, I don\u0027t understand this. It should be the same code reading the config, prodding the system, and setting up these objects. That method should be able to have all of these pieces of information ready by the time it initializes the object.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":23598,"name":"Zhong Luyao","email":"luyao.zhong@intel.com","username":"ZhongLuyao"},"change_message_id":"51c6262bcad83fcf0144b22b70fb50cb16343aab","unresolved":false,"context_lines":[{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":355,"context_line":" +        \u0027ns_uuid\u0027: fields.UUIDField(nullable\u003dTrue),"},{"line_number":356,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":357,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":358,"context_line":" +    }"},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_9ddc5e75","line":357,"in_reply_to":"bfb3d3c7_b004f17c","updated":"2019-05-21 06:37:34.000000000","message":"I think these fields can be null before assigning one pmem namespace to a vpmem, after allocation these fields will be filled out according to the pmem namespace info. So now these fields are defined as nullable in code.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":354,"context_line":" +        \u0027size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":355,"context_line":" +        \u0027ns_uuid\u0027: fields.UUIDField(nullable\u003dTrue),"},{"line_number":356,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":357,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":358,"context_line":" +    }"},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_7435a38e","line":357,"in_reply_to":"bfb3d3c7_b7c7ef12","updated":"2019-05-27 05:56:51.000000000","message":"This is the virtual pmem for an instance. So I think in the initial phase of an instance when the physical namespaces are not yet assigned to the instance, these fields can be null. \n\n\n\u003e Sorry, I don\u0027t understand this. It should be the same code reading\n \u003e the config, prodding the system, and setting up these objects. That\n \u003e method should be able to have all of these pieces of information\n \u003e ready by the time it initializes the object.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"e237743b29cee2c5b5b5c14e0880e44f9fa9a62e","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +    vpmems \u003d orm.deferred(Column(Text))"},{"line_number":380,"context_line":"      instance \u003d orm.relationship(Instance,"},{"line_number":381,"context_line":"      ..."},{"line_number":382,"context_line":""},{"line_number":383,"context_line":"REST API impact"},{"line_number":384,"context_line":"---------------"},{"line_number":385,"context_line":"Flavor extra specs already accept arbitrary data."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_1db86ec6","line":382,"updated":"2019-05-21 06:47:08.000000000","message":"for migration, we need to change the migrationcontext obj.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +    vpmems \u003d orm.deferred(Column(Text))"},{"line_number":380,"context_line":"      instance \u003d orm.relationship(Instance,"},{"line_number":381,"context_line":"      ..."},{"line_number":382,"context_line":""},{"line_number":383,"context_line":"REST API impact"},{"line_number":384,"context_line":"---------------"},{"line_number":385,"context_line":"Flavor extra specs already accept arbitrary data."}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_54a3bfb2","line":382,"in_reply_to":"bfb3d3c7_1db86ec6","updated":"2019-05-27 05:56:51.000000000","message":"Done","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":402,"context_line":"PMEM namespaces tend to be large. Zeroing out a persistent memory"},{"line_number":403,"context_line":"namespace requires a considerable amount of time. This may introduce"},{"line_number":404,"context_line":"a negative performance impact when deleting a guest with large"},{"line_number":405,"context_line":"virtual persistent memories."},{"line_number":406,"context_line":""},{"line_number":407,"context_line":"Other deployer impact"},{"line_number":408,"context_line":"---------------------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_30732118","line":405,"updated":"2019-05-20 18:39:40.000000000","message":"It would be nice if we could mark the namespaces as \"unusable\" and kick off the zeroing asynchronously so we can get on with the deletion of the VM without waiting. The thread doing the zeroing can unreserve the namespace when it\u0027s finished.\n\nThis is probably an optimization for the future, as it has nontrivial implications on allocation/inventory management in placement.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"49c603ea181792add0a14ad188b7878979d108d7","unresolved":false,"context_lines":[{"line_number":402,"context_line":"PMEM namespaces tend to be large. Zeroing out a persistent memory"},{"line_number":403,"context_line":"namespace requires a considerable amount of time. This may introduce"},{"line_number":404,"context_line":"a negative performance impact when deleting a guest with large"},{"line_number":405,"context_line":"virtual persistent memories."},{"line_number":406,"context_line":""},{"line_number":407,"context_line":"Other deployer impact"},{"line_number":408,"context_line":"---------------------"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d496cf4c","line":405,"in_reply_to":"bfb3d3c7_30732118","updated":"2019-05-27 05:56:51.000000000","message":"I agree.","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"e43d75fa4d9859732bc39b2e1f75d9be22fb079b","unresolved":false,"context_lines":[{"line_number":477,"context_line":"History"},{"line_number":478,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":479,"context_line":""},{"line_number":480,"context_line":"Optional section intended to be used each time the spec is updated to describe"},{"line_number":481,"context_line":"new design, API or any database schema updated. Useful to let reader understand"},{"line_number":482,"context_line":"what\u0027s happened along the time."},{"line_number":483,"context_line":""},{"line_number":484,"context_line":".. list-table:: Revisions"},{"line_number":485,"context_line":"   :header-rows: 1"}],"source_content_type":"text/x-rst","patch_set":13,"id":"bfb3d3c7_d061653b","line":482,"range":{"start_line":480,"start_character":0,"end_line":482,"end_character":31},"updated":"2019-05-20 18:39:40.000000000","message":"delete boilerplate","commit_id":"fe9ee4a816d90d960d1b9d930650a0ee97c378d9"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"91c02a330bab6cc787bd13fe2f5b3f8f038af114","unresolved":false,"context_lines":[{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""},{"line_number":52,"context_line":".. important ::"},{"line_number":53,"context_line":"    This spec only intends to enable virtual persistent memory"},{"line_number":54,"context_line":"    for the libvirt KVM driver."},{"line_number":55,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_78dfb239","line":52,"range":{"start_line":52,"start_character":3,"end_line":52,"end_character":12},"updated":"2019-06-13 13:56:42.000000000","message":"this could just be a \"note\" rather than \"important\"","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"8c33b625afbd4eccf2eef3ebfdac70b65a4edc61","unresolved":false,"context_lines":[{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""},{"line_number":52,"context_line":".. important ::"},{"line_number":53,"context_line":"    This spec only intends to enable virtual persistent memory"},{"line_number":54,"context_line":"    for the libvirt KVM driver."},{"line_number":55,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f7753a99","line":52,"range":{"start_line":52,"start_character":3,"end_line":52,"end_character":12},"in_reply_to":"9fb8cfa7_78dfb239","updated":"2019-06-24 07:28:20.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"32c2033549c09c1b7f7bf2e08a07ffc44f0501ff","unresolved":false,"context_lines":[{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"},{"line_number":50,"context_line":"oracle_, `SAP HANA`_ and Aerospike_."},{"line_number":51,"context_line":""},{"line_number":52,"context_line":".. important ::"},{"line_number":53,"context_line":"    This spec only intends to enable virtual persistent memory"},{"line_number":54,"context_line":"    for the libvirt KVM driver."},{"line_number":55,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_33d6e6c5","line":52,"range":{"start_line":52,"start_character":3,"end_line":52,"end_character":12},"in_reply_to":"9fb8cfa7_78dfb239","updated":"2019-06-18 01:54:03.000000000","message":"Sure","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7f4aea83","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"updated":"2019-06-17 16:24:36.000000000","message":"Could you highlight the major known issue for this mode? Because it sounds like if we could use this it would have been a lot more convenient to model with block device mappings in nova and then the migration flow might have been easier as well, i.e. treating these resources as block devices (like volumes we can attach/detach at will) rather than directly defined resources in the guest domain xml.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"ac35af69eed8ca766734e9f7f5e6f5701abc5782","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_e3bfdba7","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"in_reply_to":"9fb8cfa7_079ec962","updated":"2019-06-21 01:38:47.000000000","message":"Hey Sean, adding PMEM won\u0027t lock the guest ram at all. VFIO-PCI will lock the guest ram if no vIOMMU present. It has nothing to do with PMEM. The only issue is that in that case (guest ram had been locked) vpmem if passed in as \"fsdax\" will have problems. I believe that is one (if not the only) of the reasons that \"devdax\" mode namespaces is recommended to used in VM cases.\n\nAdding the support of vIOMMU is good, however orthogonal to vpmem.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_eab031e0","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"in_reply_to":"9fb8cfa7_2e0e7d32","updated":"2019-06-19 16:26:29.000000000","message":"we allow vms without \"locked\"  memory to have passthough devices. qemu allocate teh mmio memory for dma seperatly form teh guest ram. its possible there is a qemu bug in the case of pmem but you can swap memory fo a guest that has passthough devices in general, just not the passthough device memory region.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"32c2033549c09c1b7f7bf2e08a07ffc44f0501ff","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_2e0e7d32","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"in_reply_to":"9fb8cfa7_7f4aea83","updated":"2019-06-18 01:54:03.000000000","message":"Matt,\nFirst of all, this sentence (and the whole paragraph) is talking about the persistent memory mode of the \"host\" physical device, not in the guest. Even if ``fsdax`` could work (meaning host pmem device is partitioned to ``fsdax`` namespaces), qemu still has to mmap() a file from the file system on top of the ``fsdax`` namespace. It means that qemu still does not treat the pmem device as storage, but virtual persistent memory device instead. The key point is that qemu is modeling it as virtual persistent memory (more close to virtual memory), not a block device, so IMHO, BDM is not a suitable model for it.\n\nSecond, the known issue is for VMs with physical devices passed through, the memory (physical) required by the VM must be allocated before the vcpus are scheduled to run on the host and the physical memory must not be swapped out because it may be accessed anytime by DMA from/to the passed-through device. Qemu uses its memory hotplug framework to support pmem, which results in the aforementioned restriction also applying to pmem. This controverts to the demand of a ``DAX`` capable filesystem on top of a ``fsdax`` mode namespace to rearrange some of its pages for management purpose. This issue is very hard to be resolved, so it is recommended to use the ``devdax`` mode namespaces to VMs.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"cba61fe562cf08b22207d86497c6b94d54228fa1","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f81b1870","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"in_reply_to":"9fb8cfa7_eab031e0","updated":"2019-06-20 07:07:08.000000000","message":"I got that information from our virtualization team. After some googling, it seems that for VFIO devices, the only possible way to enable partial locking of guest memory is to enable vIOMMU and that has issues also (https://www.lkml.org/lkml/2018/10/30/221).\n\nAnyway, for what ever reason, quote from ndctl create-namespace official document (https://pmem.io/ndctl/ndctl-create-namespace.html):\n\"devdax: Device-DAX mode enables similar mmap(2) DAX mapping capabilities as Filesystem-DAX.  ... Use this mode to assign persistent memory to a virtual-machine, register persistent memory for RDMA, ...\"\n\nI will reword this to something like \"devdax mode is recommended to assign persistent memory to virtual machines.\"\n\n \u003e we allow vms without \"locked\"  memory to have passthough devices.\n \u003e qemu allocate teh mmio memory for dma seperatly form teh guest ram.\n \u003e its possible there is a qemu bug in the case of pmem but you can\n \u003e swap memory fo a guest that has passthough devices in general, just\n \u003e not the passthough device memory region.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d0b738c4ac2471aedb8b558c68e3bd65d3244d0e","unresolved":false,"context_lines":[{"line_number":78,"context_line":"namespace into their address spaces. Whereas mode ``fsdax`` gives"},{"line_number":79,"context_line":"out a block device, on top of which a ``DAX`` capable file system"},{"line_number":80,"context_line":"can be formatted. Applications can mmap() discrete files from such"},{"line_number":81,"context_line":"file systems into their address spaces. Mode ``fsdax`` is known"},{"line_number":82,"context_line":"to have issues with VMs that have physical devices passed through."},{"line_number":83,"context_line":""},{"line_number":84,"context_line":".. important ::"},{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_079ec962","line":82,"range":{"start_line":81,"start_character":40,"end_line":82,"end_character":66},"in_reply_to":"9fb8cfa7_f81b1870","updated":"2019-06-20 13:24:17.000000000","message":"i would actully like to add support for the vIOMMU for other reasons so maybe we should just do that.\n\nif adding PMEM to a guess will lock the guest ram in host ram without it that is a proablem as PMEM guest will prevent host memory oversubstion form working correctly as it will not be possible to swap out there memroy as would be dont for nomal guest\n\nthe only way we have to model memoy that is consumed like that today is to use hugepages. so if we dont implement vIOMMU support then we proably should consider requireing hugepages to prevent the memory accounting issues we will otherwise have. otherwise the guest will be at risk of being reaped by the OOM killer when the the host is configured with a ram allocation ration that is \u003e 1.0","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"},{"line_number":86,"context_line":"    ``devdax`` mode as QEMU virtual persistent memory backends."},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"Please refer to `virtual NVDIMM backends`_ and"},{"line_number":89,"context_line":"`NVDIMM Linux kernel document`_ for details."},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"The ``devdax`` persistent memory namespaces require contiguous physical"},{"line_number":92,"context_line":"space and are not managed in pages as ordinary system memory."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_8ac17531","line":89,"range":{"start_line":88,"start_character":0,"end_line":89,"end_character":44},"updated":"2019-06-19 16:26:29.000000000","message":"nit: i think you want this to be pare of the previous paragraph so i would move it to line 83","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"8c33b625afbd4eccf2eef3ebfdac70b65a4edc61","unresolved":false,"context_lines":[{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"},{"line_number":86,"context_line":"    ``devdax`` mode as QEMU virtual persistent memory backends."},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"Please refer to `virtual NVDIMM backends`_ and"},{"line_number":89,"context_line":"`NVDIMM Linux kernel document`_ for details."},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"The ``devdax`` persistent memory namespaces require contiguous physical"},{"line_number":92,"context_line":"space and are not managed in pages as ordinary system memory."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_77cf0a7f","line":89,"range":{"start_line":88,"start_character":0,"end_line":89,"end_character":44},"in_reply_to":"9fb8cfa7_8ac17531","updated":"2019-06-24 07:28:20.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"cba61fe562cf08b22207d86497c6b94d54228fa1","unresolved":false,"context_lines":[{"line_number":85,"context_line":"    So this spec only proposes to use persistent memory namespaces in"},{"line_number":86,"context_line":"    ``devdax`` mode as QEMU virtual persistent memory backends."},{"line_number":87,"context_line":""},{"line_number":88,"context_line":"Please refer to `virtual NVDIMM backends`_ and"},{"line_number":89,"context_line":"`NVDIMM Linux kernel document`_ for details."},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"The ``devdax`` persistent memory namespaces require contiguous physical"},{"line_number":92,"context_line":"space and are not managed in pages as ordinary system memory."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_18170c48","line":89,"range":{"start_line":88,"start_character":0,"end_line":89,"end_character":44},"in_reply_to":"9fb8cfa7_8ac17531","updated":"2019-06-20 07:07:08.000000000","message":"Sure","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":118,"context_line":"  |         |         |         |         |"},{"line_number":119,"context_line":"  +---------+---------+---------+---------+"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5f25a6aa","line":121,"range":{"start_line":121,"start_character":18,"end_line":121,"end_character":33},"updated":"2019-06-17 16:24:36.000000000","message":"per app, correct?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"cba61fe562cf08b22207d86497c6b94d54228fa1","unresolved":false,"context_lines":[{"line_number":118,"context_line":"  |         |         |         |         |"},{"line_number":119,"context_line":"  +---------+---------+---------+---------+"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d82ad47c","line":121,"range":{"start_line":121,"start_character":18,"end_line":121,"end_character":33},"in_reply_to":"9fb8cfa7_2a62c917","updated":"2019-06-20 07:07:08.000000000","message":"Will change to \"total free space\".","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"32c2033549c09c1b7f7bf2e08a07ffc44f0501ff","unresolved":false,"context_lines":[{"line_number":118,"context_line":"  |         |         |         |         |"},{"line_number":119,"context_line":"  +---------+---------+---------+---------+"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ae2b4d80","line":121,"range":{"start_line":121,"start_character":18,"end_line":121,"end_character":33},"in_reply_to":"9fb8cfa7_5f25a6aa","updated":"2019-06-18 01:54:03.000000000","message":"I mean the total free space of this device is 200GB in the shown case.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":118,"context_line":"  |         |         |         |         |"},{"line_number":119,"context_line":"  +---------+---------+---------+---------+"},{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_2a62c917","line":121,"range":{"start_line":121,"start_character":18,"end_line":121,"end_character":33},"in_reply_to":"9fb8cfa7_ae2b4d80","updated":"2019-06-19 16:26:29.000000000","message":"total available size can imply the size of the entire device.\nit might be clear to available with free.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"},{"line_number":125,"context_line":"------------------------------------------------------------"},{"line_number":126,"context_line":"Due to the aforementioned fragmentation issue, persistent memory can not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ff67bae9","line":123,"updated":"2019-06-17 16:24:36.000000000","message":"What are the solutions for solving the fragmentation issue? Or is this just listed as a known limitation, i.e. the operator slices the available space into 4 chunks of 100GB each and even though only 200GB of the total 400GB is being used, a user can\u0027t resize their app to go from 100GB to 200GB - is that correct?\n\n(later)\n\nOK I see the next section mentions this, i.e. \"The proposed approach is to use pre-created fix sized namespaces.\"","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"cba61fe562cf08b22207d86497c6b94d54228fa1","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"},{"line_number":125,"context_line":"------------------------------------------------------------"},{"line_number":126,"context_line":"Due to the aforementioned fragmentation issue, persistent memory can not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b85fe0e3","line":123,"in_reply_to":"9fb8cfa7_0a18257f","updated":"2019-06-20 07:07:08.000000000","message":"OK. Step_size implies a more strict partitioning. Meaning you have a total available 800GB space, and you partition it into 4x200GB namespaces. The step_size is 200GB. Also you could have other varieties like 8x100GB or 2x400GB with different step_size values. The current way is relative more flexible.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":120,"context_line":""},{"line_number":121,"context_line":"The total size of available space is 200GB. However a ``devdax`` mode"},{"line_number":122,"context_line":"namespace of 200GB size can not be created."},{"line_number":123,"context_line":""},{"line_number":124,"context_line":"Persistent memory namespace management and resource tracking"},{"line_number":125,"context_line":"------------------------------------------------------------"},{"line_number":126,"context_line":"Due to the aforementioned fragmentation issue, persistent memory can not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_0a18257f","line":123,"in_reply_to":"9fb8cfa7_ff67bae9","updated":"2019-06-19 16:26:29.000000000","message":"yes so app1 could not resize to flavor with a singel 200G allocation but i could resize to a flaovr with 2 100G allocations and  be schedulevd to this host.\n\nwe had discussed possible using step_size in placment to model this but i think we had settled on using a different custom resource class per namespace size.\n\nwe proably shoudl add the step_size version to the alternitive section.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":131,"context_line":"The proposed approach is to use pre-created fix sized namespaces."},{"line_number":132,"context_line":"In other words, the cloud admin creates persistent memory of the desired"},{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1e3626b6","line":134,"range":{"start_line":134,"start_character":57,"end_line":134,"end_character":59},"updated":"2019-06-19 16:26:29.000000000","message":"delete","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"8c33b625afbd4eccf2eef3ebfdac70b65a4edc61","unresolved":false,"context_lines":[{"line_number":131,"context_line":"The proposed approach is to use pre-created fix sized namespaces."},{"line_number":132,"context_line":"In other words, the cloud admin creates persistent memory of the desired"},{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_37767208","line":134,"range":{"start_line":134,"start_character":57,"end_line":134,"end_character":59},"in_reply_to":"9fb8cfa7_1e3626b6","updated":"2019-06-24 07:28:20.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":132,"context_line":"In other words, the cloud admin creates persistent memory of the desired"},{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1e5f06e6","line":135,"range":{"start_line":135,"start_character":44,"end_line":135,"end_character":69},"updated":"2019-06-19 16:26:29.000000000","message":"by parsing the config file to determine what namespaces it can allocate to a guest.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5e9d7e64","line":137,"range":{"start_line":136,"start_character":0,"end_line":137,"end_character":29},"updated":"2019-06-19 16:26:29.000000000","message":"\"The discovered persistent memory namespaces will be reported to the placement service as inventories of a custom resources class associated with a new child resource provider of the compute node.\"\n\nmy resonaning for the above is i think it make sense to creat a child resouce previder per numa node so that we can more easilly reshape it when numa is actully supported.\n\nso for now we would create an RP per numa node that has PMEM namespaces available and we create inventoires or pmem namespaces. on the nova side we can add the RP uuids to the relevent host_cell object and optionally implement numa affinity using the numa topology filter if we want to.\n\nthen in U once numa is supported properly in placement we can simply reshape the pmem RPs under the numa node RPs","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ba6fa2fa","line":137,"range":{"start_line":136,"start_character":0,"end_line":137,"end_character":29},"in_reply_to":"9fb8cfa7_17ca4e19","updated":"2019-06-24 15:31:08.000000000","message":"I haven\u0027t seen the IRC discussion, but:\n\n- You can\u0027t change the parent of a resource provider once it\u0027s already set. You can only *set* the parent of a previously-unparented provider.\n- IIUC you\u0027re proposing splitting the PMEM namespaces along NUMA node lines so that you can allocate them according to the affinity requested for the VM. But we don\u0027t yet have a mechanism for you to request such affinity in the first place. Once we do, we\u0027ll start modeling the cpu and (regular) memory resources in NUMA node child providers, and at that point it will make sense to divide PMEM the same way. Note that the reshape of inventories and existing VMs\u0027 allocations will need to solve the problem of splitting those resources at that time anyway. Having PMEM in that mix is no extra burden (is actually probably easier than having to merge the resource from these weird pre-NUMA children).\n\nTL;DR: Yes, keep the spec as written, with the PMEM namespaces in the root provider. Don\u0027t try to get clever with child providers or anticipate NUMA modeling.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"780e926678c0aab205d79e79e10460ef9c8e7758","unresolved":false,"context_lines":[{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_78cba8a4","line":137,"range":{"start_line":136,"start_character":0,"end_line":137,"end_character":29},"in_reply_to":"9fb8cfa7_5e9d7e64","updated":"2019-06-20 06:59:59.000000000","message":"Good question, try to imagine what is it looks like in the future.\n\nWith this spec, we don\u0027t do the affinity for the vpmem. That means the instances with guest numa node which is running on numa node1,  it may gets vpmem from numa node2.\n\nWhen we upgrade to support vpmem numa affinity, we can\u0027t reallcoate the vpmem to the existed instances whatever the vpmem resource in the root rp or separated rp for each numa node.\n\nBut we still can reshape the allocation, the vpmem allocation is from another numa node, it should be ok. If the instance is migrated, we can get the numa affinied vpmem for it.\n\nSo we can do the upgrade for the host which has instance running.\n\nFor the reshape API, why the separated RP is simpler than the resource in root RP?  \n\nWhen we have the numa in placement, I thought the vpmem invenories can be in the numa RP. Do we need them to be a separated RP as child of numa RP?\n\nIf the vpmem in separated RP, the reshape API is only about moving the inventories and allocation, it can\u0027t move the RP, we still need to create those separated RP first, then executes the reshape API for moving the inventories and allocation.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d0b738c4ac2471aedb8b558c68e3bd65d3244d0e","unresolved":false,"context_lines":[{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_a7101daa","line":137,"range":{"start_line":136,"start_character":0,"end_line":137,"end_character":29},"in_reply_to":"9fb8cfa7_78cba8a4","updated":"2019-06-20 13:24:17.000000000","message":"when i said reshap i was refering to reshaping the tree rather then the allocations\n\nmoving an RP just requires you to update the RP parent uuid and it with all its invetoreis and allcoations are then moved.\n\nso we can move an rp quite easilly by calling  with a simple put call\n\nhttps://developer.openstack.org/api-ref/placement/?expanded\u003dupdate-resource-provider-detail#update-resource-provider\n\nwithout needing to touch the allocations.\n\nregarding storing the inventories in the numa RP its still not cleare to me if we will do that or if the numa RP will be a resoucesless RP with just the numa node trait.\n\ni had always assumed cpus hugepage ectra would live on the numa rp so i would be fine for pmem to do so also however\nusing the more complicated reshap allocation api is only needed if you stare out with an inventory that has capasity from multiple numa nodes. if you start out with an inventory per resouce per numa node in a spereate RP you can just more that rp and use the same subtree feature to get affintiy with the cpus and hugepages in the numa node.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"8c33b625afbd4eccf2eef3ebfdac70b65a4edc61","unresolved":false,"context_lines":[{"line_number":133,"context_line":"sizes before Nova is deployed on a certain host. And the cloud admin puts"},{"line_number":134,"context_line":"the namespace information into nova config file (details in below)."},{"line_number":135,"context_line":"Nova compute agent discovers the namespaces in config file afterwards."},{"line_number":136,"context_line":"And these persistent memory namespace resources are reported to"},{"line_number":137,"context_line":"placement service eventually."},{"line_number":138,"context_line":""},{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_17ca4e19","line":137,"range":{"start_line":136,"start_character":0,"end_line":137,"end_character":29},"in_reply_to":"9fb8cfa7_a7101daa","updated":"2019-06-24 07:28:20.000000000","message":"I saw the discussion between you and Alex on IRC on this. So if not fundamental, let\u0027s keep it as is today?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"77a6a70df51d6fd1c6f981c13b6b97db8b324f70","unresolved":false,"context_lines":[{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"},{"line_number":141,"context_line":""},{"line_number":142,"context_line":" CUSTOM_PMEM_NAMESPACE_$RCNAME"},{"line_number":143,"context_line":""},{"line_number":144,"context_line":"``RCNAME`` is variable part of the resource class name defined by the admin"},{"line_number":145,"context_line":"to be associated with a certain number of persistent memory namespaces."}],"source_content_type":"text/x-rst","patch_set":14,"id":"bfb3d3c7_5b289595","line":142,"range":{"start_line":142,"start_character":24,"end_line":142,"end_character":30},"updated":"2019-05-30 03:55:53.000000000","message":"probably be CAPACITY, or LABEL? Since RCNAME is confusing with the full name of resource class","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":139,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"},{"line_number":140,"context_line":"resource. The naming convention of the custom resource classes being used is::"},{"line_number":141,"context_line":""},{"line_number":142,"context_line":" CUSTOM_PMEM_NAMESPACE_$RCNAME"},{"line_number":143,"context_line":""},{"line_number":144,"context_line":"``RCNAME`` is variable part of the resource class name defined by the admin"},{"line_number":145,"context_line":"to be associated with a certain number of persistent memory namespaces."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_57a68655","line":142,"range":{"start_line":142,"start_character":24,"end_line":142,"end_character":30},"in_reply_to":"bfb3d3c7_5b289595","updated":"2019-06-24 16:32:38.000000000","message":"Changed to LABEL","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"    \"$RCNAME:$NSNAME[,$NSNAME][|$RCNAME:$NSNAME[,$NSNAME]]\""},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"``NSNAME`` is the name of the persistent memory namespace that falls"},{"line_number":158,"context_line":"into the resource class named ``CUSTOM_PMEM_NAMESPACE_$RCNAME``."},{"line_number":159,"context_line":"A name can be given to a persitent memory namespace upon creation by"},{"line_number":160,"context_line":"the \"-n/--name\" option to the `ndctl`_ command."},{"line_number":161,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9ef8d67c","line":158,"range":{"start_line":157,"start_character":0,"end_line":158,"end_character":64},"updated":"2019-06-19 16:26:29.000000000","message":"is it really a name or is it a path\n\ne.g. /dev/dax0.0\n\nif it the path i would prefer to not assume that its rooted in /dev and have the full path.\n\nmknod can be used to creat device files in other locations and users break things when we hard code them.\n\n\nif its a path i would rephase this as \n\n`NSPATH`` is the filesystem path of the persistent memory namespace that maps to the resource class named ``CUSTOM_PMEM_NAMESPACE_$RCNAME``.\n\nor\n\n`NSPATH`` is the filesystem path of the persistent memory namespace that is represented by the resource class named ``CUSTOM_PMEM_NAMESPACE_$RCNAME``.\n\nif its a name that is looked up seperatly like a filesystem lable then i would rephase like this.\n\n`NSNAME`` is the name of the persistent memory namespace that maps to the resource class named ``CUSTOM_PMEM_NAMESPACE_$RCNAME``.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"780e926678c0aab205d79e79e10460ef9c8e7758","unresolved":false,"context_lines":[{"line_number":154,"context_line":""},{"line_number":155,"context_line":"    \"$RCNAME:$NSNAME[,$NSNAME][|$RCNAME:$NSNAME[,$NSNAME]]\""},{"line_number":156,"context_line":""},{"line_number":157,"context_line":"``NSNAME`` is the name of the persistent memory namespace that falls"},{"line_number":158,"context_line":"into the resource class named ``CUSTOM_PMEM_NAMESPACE_$RCNAME``."},{"line_number":159,"context_line":"A name can be given to a persitent memory namespace upon creation by"},{"line_number":160,"context_line":"the \"-n/--name\" option to the `ndctl`_ command."},{"line_number":161,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_69276166","line":158,"range":{"start_line":157,"start_character":0,"end_line":158,"end_character":64},"in_reply_to":"9fb8cfa7_9ef8d67c","updated":"2019-06-20 06:59:59.000000000","message":"It is the name. Each namespace has a name. And we can use ndctl to get the namespace by name, then ndctl will tell us the devpath for this namespace.\n\nFor using the name, the benefit we thought is if the devpath changed, we can easily update the devpath.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":161,"context_line":""},{"line_number":162,"context_line":"To give an example, on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_0126cbb8","line":164,"range":{"start_line":164,"start_character":4,"end_line":164,"end_character":58},"updated":"2019-06-19 16:26:29.000000000","message":"we proabably want to alow parsing the right hand side of the  as a regex\n\nso i can do something like this \n\"128G:ns[0-3]|262144MB:ns[4,5]|MEDIUM:ns[6-7]\"\nfor this example its not much shorter then\n\"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\"\nbut in general its simpler to specify.\n\nif we treat the right hand side as a regex then both forms\nare valid as the exect match is still a valid regex and you can obviosly mix as well\n\n\"128G:ns[0-3]|262144MB:ns4,ns5|MEDIUM:ns[^0-5]\"\n\nhttps://regex101.com/r/6nrW3y/1\n\nthis would alos allow them do do think like\n\nSMALL:openstack_small[0-9]+|Medium:openstack_medium[0-9]+\n\nwhere they coudl create there own nameing convention for the namespaces when tehy create them that make sense to them.\n\nin the above case im assumee that all names that are create that start with \"oepnstack_small\" followed by a number will map to the SMALL resocue class and will be availbel for opentsack to use but the nice thing about this approch is nova does not need to know about the mean of the nameing convention it just count the number of matches and reports that as the capasity of the CUSTOM_PMEM_NAMESPACE_SMALL inventory.\n\nthis will make it simple for installer to simply create the namespaces and tell nova which ones it can use.\n\nyou could do this by simply listing all the namespace but that could get long and would be tedious to do by hand","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"73aea58d57fd4d48310b684529172548d1260780","unresolved":false,"context_lines":[{"line_number":161,"context_line":""},{"line_number":162,"context_line":"To give an example, on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1b3066e8","line":164,"range":{"start_line":164,"start_character":4,"end_line":164,"end_character":58},"in_reply_to":"9fb8cfa7_0126cbb8","updated":"2019-06-20 08:32:52.000000000","message":"Sure, regex is a good idea","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":161,"context_line":""},{"line_number":162,"context_line":"To give an example, on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5512f959","line":164,"range":{"start_line":164,"start_character":4,"end_line":164,"end_character":58},"in_reply_to":"9fb8cfa7_1b3066e8","updated":"2019-06-24 15:31:08.000000000","message":"-1 on regex. It would be a bear to debug (for the operator). Escapes are a PITA. We\u0027ve already claimed : and | as top-level delimiters.\n\nExplicit is better than implicit.\n\nHow many of these things do we expect there to be on one system, anyway?\n\nAnd I don\u0027t buy \"tedious to do by hand\" - if you get to that point, a couple of bash pipes and some sed ought to do you just fine.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":161,"context_line":""},{"line_number":162,"context_line":"To give an example, on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_99d89eef","line":164,"range":{"start_line":164,"start_character":4,"end_line":164,"end_character":58},"in_reply_to":"9fb8cfa7_5512f959","updated":"2019-06-25 09:02:37.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"b61cf1e9527fe2518058b0d42547696a8b30491c","unresolved":false,"context_lines":[{"line_number":161,"context_line":""},{"line_number":162,"context_line":"To give an example, on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_c89926c4","line":164,"range":{"start_line":164,"start_character":4,"end_line":164,"end_character":58},"in_reply_to":"9fb8cfa7_5512f959","updated":"2019-06-27 21:38:12.000000000","message":"well you can fit about 8T of PMEM on a 2U server today so with 100G namespace 70+\n\ni expect it will vary widely form deployment to deployment but if we support backing guest ram entirely by PMEM namespaces i expect that number to increase significantly in U as we start making namespace as small a 1MB or likely 1GB\n\nwhich is why wanted it to be a regex form the start rather then having to retrofit it later.\n\nim expecting to have to submit a spec for U that will introduce a \n\nhw:mem_backing_type that will work in tandem with existing\nhw:mem_page_size extraspcec.\n\nhw:mem_backing_type would allow selecting between anonymous ram, file backed memory, hugepage memory or persistent memory.\n\nthis is something i am explicitly not working on in train but that we have partner request to enable in U which we deferred because this sepc is prerequisite. \n\nso while this usecase is expressly out of scope of this spec i was trying to ensure we did not bake ourselves into a corner ahead of time.\n\n\n\"|\" has special meaning in regex as a group seperator so that may have issue with that usecase in the future but we can cross that bridge when we come to it.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"},{"line_number":168,"context_line":"``CUSTOM_PMEM_NAMESPACE_128G``, 2 namespaces (ns4, ns5) of resource class"},{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_151c8168","line":166,"range":{"start_line":166,"start_character":4,"end_line":166,"end_character":18},"updated":"2019-06-24 15:31:08.000000000","message":"interpretation","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""},{"line_number":165,"context_line":""},{"line_number":166,"context_line":"The interpretition of the above configuration is that this host has 4"},{"line_number":167,"context_line":"persistent memory namespaces (ns0, ns1, ns2, ns3) of resource class"},{"line_number":168,"context_line":"``CUSTOM_PMEM_NAMESPACE_128G``, 2 namespaces (ns4, ns5) of resource class"},{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_39e1729c","line":166,"range":{"start_line":166,"start_character":4,"end_line":166,"end_character":18},"in_reply_to":"9fb8cfa7_151c8168","updated":"2019-06-25 09:02:37.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"77a6a70df51d6fd1c6f981c13b6b97db8b324f70","unresolved":false,"context_lines":[{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"},{"line_number":170,"context_line":"class ``CUSTOM_PMEM_NAMESPACE_MEDIUM``."},{"line_number":171,"context_line":""},{"line_number":172,"context_line":"The \u0027total\u0027 value of each custom resource class is the *number* of the"},{"line_number":173,"context_line":"persistent memory namespaces belong to this resource class."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"The \u0027max_unit\u0027 is set to the same value as \u0027total\u0027 since it is possible"}],"source_content_type":"text/x-rst","patch_set":14,"id":"bfb3d3c7_9b228db8","line":172,"range":{"start_line":172,"start_character":1,"end_line":172,"end_character":47},"updated":"2019-05-30 03:55:53.000000000","message":"the total value of inventory, not the resource class.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"},{"line_number":170,"context_line":"class ``CUSTOM_PMEM_NAMESPACE_MEDIUM``."},{"line_number":171,"context_line":""},{"line_number":172,"context_line":"The \u0027total\u0027 value of each custom resource class is the *number* of the"},{"line_number":173,"context_line":"persistent memory namespaces belong to this resource class."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"The \u0027max_unit\u0027 is set to the same value as \u0027total\u0027 since it is possible"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_81e2bb2e","line":172,"range":{"start_line":172,"start_character":1,"end_line":172,"end_character":47},"in_reply_to":"bfb3d3c7_9b228db8","updated":"2019-06-19 16:26:29.000000000","message":"+1","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"},{"line_number":170,"context_line":"class ``CUSTOM_PMEM_NAMESPACE_MEDIUM``."},{"line_number":171,"context_line":""},{"line_number":172,"context_line":"The \u0027total\u0027 value of each custom resource class is the *number* of the"},{"line_number":173,"context_line":"persistent memory namespaces belong to this resource class."},{"line_number":174,"context_line":""},{"line_number":175,"context_line":"The \u0027max_unit\u0027 is set to the same value as \u0027total\u0027 since it is possible"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d2a7e40a","line":172,"range":{"start_line":172,"start_character":1,"end_line":172,"end_character":47},"in_reply_to":"bfb3d3c7_9b228db8","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":217,"context_line":"some flexibility in the naming of the resource class name. It is up to"},{"line_number":218,"context_line":"the admin to configure the namespaces properly."},{"line_number":219,"context_line":""},{"line_number":220,"context_line":"Different units do not convert freely from one to another while embeded"},{"line_number":221,"context_line":"in custom resource class names. Meaning a request for a 128GB persistent"},{"line_number":222,"context_line":"memory namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB"},{"line_number":223,"context_line":"resource, but can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB"},{"line_number":224,"context_line":"resource even though they are of the same quantity."},{"line_number":225,"context_line":""},{"line_number":226,"context_line":"Persistent memory is by nature NUMA sensitive. However for the initial"},{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_956f11c4","line":224,"range":{"start_line":220,"start_character":0,"end_line":224,"end_character":51},"updated":"2019-06-24 15:31:08.000000000","message":"I understand this paragraph because I\u0027ve been tracking this spec, but to the new reader, it may be clearer to say something like:\n\n .. note:: Resource class names are opaque. For example, a request for\n           CUSTOM_PMEM_NAMESPACE_128GB cannot be fulfilled by a\n           CUSTOM_PMEM_NAMESPACE_131072MB resource even though they are\n           (presumably) the same size.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":217,"context_line":"some flexibility in the naming of the resource class name. It is up to"},{"line_number":218,"context_line":"the admin to configure the namespaces properly."},{"line_number":219,"context_line":""},{"line_number":220,"context_line":"Different units do not convert freely from one to another while embeded"},{"line_number":221,"context_line":"in custom resource class names. Meaning a request for a 128GB persistent"},{"line_number":222,"context_line":"memory namespace can be fulfilled by a CUSTOM_PMEM_NAMESPACE_128GB"},{"line_number":223,"context_line":"resource, but can not be fulfilled by a CUSTOM_PMEM_NAMESPACE_131072MB"},{"line_number":224,"context_line":"resource even though they are of the same quantity."},{"line_number":225,"context_line":""},{"line_number":226,"context_line":"Persistent memory is by nature NUMA sensitive. However for the initial"},{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d9a05645","line":224,"range":{"start_line":220,"start_character":0,"end_line":224,"end_character":51},"in_reply_to":"9fb8cfa7_956f11c4","updated":"2019-06-25 09:02:37.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":224,"context_line":"resource even though they are of the same quantity."},{"line_number":225,"context_line":""},{"line_number":226,"context_line":"Persistent memory is by nature NUMA sensitive. However for the initial"},{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_355e6512","line":227,"range":{"start_line":227,"start_character":44,"end_line":227,"end_character":63},"updated":"2019-06-24 15:31:08.000000000","message":"++","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7f03cabf","line":230,"updated":"2019-06-17 16:24:36.000000000","message":"What happens if the configured value changes, for example from what you have above to something like \"256G:ns0,ns1|262144MB:ns2,ns3|MEDIUM:ns4,ns5\" and then restarts the compute service.\n\nWill the update_provider_tree call do the necessary work of deleting the existing CUSTOM_PMEM_NAMESPACE_128GB inventory, adding a new CUSTOM_PMEM_NAMESPACE_256GB inventory and modifying the total values for the CUSTOM_PMEM_NAMESPACE_262144MB and CUSTOM_PMEM_NAMESPACE_MEDIUM inventories? And note that if there are allocations against any of those resource classes that operation could fail until the servers using the allocations are migrated to another host or deleted - so I\u0027d think we\u0027d fail to start the compute service or at least fail to report the change in inventory.\n\nThis is kind of an implementation detail but I want to make sure that we consider what happens when the configured value changes.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_e1a93790","line":230,"in_reply_to":"9fb8cfa7_455e2f46","updated":"2019-06-19 16:26:29.000000000","message":"on issue we had with the pci manager was that if you messed up the whitelist and removed a pci device that was currently allocated to a guest we deleted it form the db.\n\nwith the old behavior when you fixed the whitelist the pci device ehich is actully in use could get allcoated to a new vm causeing the vm to crash/not start.\n\nwe now only remove it form the db after the guest stops using it.\n\nso if you modify the namespace config option and it would make a namespace that is currently inuse by a vm to be unavilabel we should issue a warning and not free the allocation untill the vm has been deleted or moved to another host.\n\nwe could be more aggressive and also error out and stop the compute agent form starting if we detect this also but we do not to do that for pci devices, we just dont allow new vms to use the non whitelisted device once the current vm is removed from the host.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f9be7a63","line":230,"in_reply_to":"9fb8cfa7_55e67918","updated":"2019-06-25 09:02:37.000000000","message":"Thanks Eric!","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_455e2f46","line":230,"in_reply_to":"9fb8cfa7_7f03cabf","updated":"2019-06-18 07:32:59.000000000","message":"Yes, we can do a check in the init stage for the virt driver. The check is that fetching the instance info from the libvirt, and check the vpmem namespace allocated to the instance, and ensure those vpmem doesn\u0027t remove from the changed config. Fail to start the compute service if the allocated vpmem removed from the config.\n\nupdate_provider_tree will update the inventories for the new config.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_55e67918","line":230,"in_reply_to":"9fb8cfa7_9bf536f0","updated":"2019-06-24 15:31:08.000000000","message":"I agree with failing to start the compute service if an in-use namespace has been (re)moved in the config. And then all the other stuff Sean mentions \"just works\".\n\nNote that this could wind up being a bit *too* aggressive in certain cases. For example, FOO:ns0,ns1,ns2 are all equivalent, so libvirt can assign one to a guest arbitrarily. Let\u0027s say it was ns2. Now the admin decides to remove/resize/etc. one of the FOOs. She might be forgiven for thinking *she* can pick one arbitrarily since they\u0027re all equivalent; but if she picks ns2, we\u0027ll fail to start the compute service.\n\n...but I\u0027m willing to live with that.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"73aea58d57fd4d48310b684529172548d1260780","unresolved":false,"context_lines":[{"line_number":227,"context_line":"iteration, the resource inventories are put directly under ROOT resource"},{"line_number":228,"context_line":"provider of the compute host. Persistent memory NUMA affinity will be"},{"line_number":229,"context_line":"adddressed by a seperate follow-on spec."},{"line_number":230,"context_line":""},{"line_number":231,"context_line":"Virtual persistent memory specification"},{"line_number":232,"context_line":"---------------------------------------"},{"line_number":233,"context_line":"Virtual persistent memory information is added to guest hardware flavor"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9bf536f0","line":230,"in_reply_to":"9fb8cfa7_e1a93790","updated":"2019-06-20 08:32:52.000000000","message":"The way introduced by Alex actually stops the compute agent from starting once it is discovered that a namespace used by an instance is deleted in the config. This aligns with the \"aggressive\" way you mentioned. Though implementation details, I will mention it in the next revision.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5fe046fb","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"updated":"2019-06-17 16:24:36.000000000","message":"This seems pretty confusing to me, can you give an example? So this means if I have a flavor with extra spec:\n\n  hw:pmem \u003d 128GB,128GB\n\nIt means the server using that flavor will get two namespaces allocated from the CUSTOM_PMEM_NAMESPACE_128GB inventory, correct?\n\nCouldn\u0027t we have encoded the RCNAME in the extra spec like how hw:numa_cpus.N works? e.g. hw:pmem.128GB\u003d2","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"73aea58d57fd4d48310b684529172548d1260780","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_db396ea3","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_0139eb3f","updated":"2019-06-20 08:32:52.000000000","message":"Another advantage of hw:pmem \u003d $RCNAME[,$RCNAME] is it indicates an order of the virtual pmem devices if that is in concern of the end user. So if not fundamental, I\u0027d stay to the currently proposed approach.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_0139eb3f","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_0e6859b8","updated":"2019-06-19 16:26:29.000000000","message":"i agree we shoudl reserve the \u0027.\u0027 notation for for numa but\nwe could do somethign else like\n\nhw:pmem_128gb\u003d2 which could then be exteded to hw:pmem_128GB.0\u003d2 when we want to also encode numa\n\nso the general form would be hw:pmem_$RCNAME.$numa_node\u003d$amount\n\nthe problem with that is it technically woudl be reserving the entire hw:pmem_* space of extraspecs for this use meaning if we need a hw:pmep_mode ectra extraspec in the future we code break people even if it is unlikely.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1091ef99","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_43ffc766","updated":"2019-06-24 15:31:08.000000000","message":"I also don\u0027t like the idea of encoding the $RCNAME in the key.\n\nAnother option is:\n\n hw:pmem \u003d $RCNAME:$COUNT[,$RCNAME:$COUNT...]\n\ne.g.\n\n hw:pmem \u003d 128GB:2,64GB:1\n\nBut if we don\u0027t like that, I\u0027m fine with $RCNAME[,$RCNAME...]","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"32c2033549c09c1b7f7bf2e08a07ffc44f0501ff","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_0e6859b8","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_5fe046fb","updated":"2019-06-18 01:54:03.000000000","message":"Yes, your interpretation of hw:pmem \u003d 128GB,128GB is correct.\n\nThe reason that we do not adopt hw:pmem.128GB\u003d2 is we want to reserve the dot and the space behind it (.128GB) to model guest NUMA topology in future (for example, hw:pmem.0 \u003d 128GB,128GB could mean on guest NUMA node 0 we have two namespaces allocated from CUSTOM_PMEM_NAMESPACE_128G inventory)","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"ac35af69eed8ca766734e9f7f5e6f5701abc5782","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_43ffc766","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_876cf908","updated":"2019-06-21 01:38:47.000000000","message":"OK, that makes sense -- ordering is bad :). \nI just do not want to support hw:pmem_128g\u003d2, it makes implementation more complicated and the benefits not worth the engineering effort. As I said, if it is not fundamental, I\u0027d like to stick to hw:pmem \u003d $RCNAME[,$RCNAME]","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d0b738c4ac2471aedb8b558c68e3bd65d3244d0e","unresolved":false,"context_lines":[{"line_number":237,"context_line":""},{"line_number":238,"context_line":"``RCNAME`` is the variable part of a resource class name as defined"},{"line_number":239,"context_line":"in the `Persistent memory namespace management and resource tracking`_"},{"line_number":240,"context_line":"section. Each appearence of a $RCNAME means a requirement to one"},{"line_number":241,"context_line":"persistent memory namespace of ``CUSTOM_PMEM_NAMESPACE_$RCNAME``"},{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_876cf908","line":243,"range":{"start_line":240,"start_character":9,"end_line":243,"end_character":29},"in_reply_to":"9fb8cfa7_db396ea3","updated":"2019-06-20 13:24:17.000000000","message":"the order i think is actully a bad thing.\nfor one if we ever support hot plug that order can be messed up as libvirt does not guarentte the order in that case as far as i am aware. second placment wont keep track of the order so we would have to try and sort them in nova and if we dont have numa aware nese that could be sub optimal as we might end up with memory devices like this host numa 0, host numa 1, host numa0 ,host numa1\n\nfor guest with virutual numa toplogies untill we track numa affinity in placmenet or in the numa toplogy filter the ordering will prevent us form passing the namespace form the the numa ndoe the virtual numa node is pinned to \n\ne.g.for  hw:numa_node \u003d 2  i would want to reoreder \n\"virtual numa node0:(host numa 0, host numa 1), virtual numa node 1:(host numa0 ,host numa1)\" too be\n\n\"virtual numa node0:(host numa 0, host numa 0), virtual numa node 1:(host numa1 ,host numa1)\"\n\nor \n\"virtual numa node0:(host numa 1, host numa 1), virtual numa node 1:(host numa0 ,host numa0)\"\n\nif we try to provide order guarrentes we cant do that.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":247,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1fe2ce02","line":245,"range":{"start_line":245,"start_character":38,"end_line":245,"end_character":68},"updated":"2019-06-17 16:24:36.000000000","message":"each virtual persistent memory *namespace*?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0693d134468888bc9ffc2fb070485339f19bb3df","unresolved":false,"context_lines":[{"line_number":242,"context_line":"resource class. So there can be multiple appearences of the same"},{"line_number":243,"context_line":"$RCNAME in one specification."},{"line_number":244,"context_line":""},{"line_number":245,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":246,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":247,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_16eff9e9","line":245,"range":{"start_line":245,"start_character":38,"end_line":245,"end_character":68},"in_reply_to":"9fb8cfa7_1fe2ce02","updated":"2019-06-19 07:45:08.000000000","message":"No, we are backing up each \"virtual persistent memory\" device with one \"physical persistent memory namespace\", however multiple virtual namespaces can be created on top of the \"virtual persistent memory\" in side of the VM instance. Basically VM sees a non-volatile memory range.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":249,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":250,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7fbaeae1","line":251,"range":{"start_line":251,"start_character":0,"end_line":251,"end_character":6},"updated":"2019-06-17 16:24:36.000000000","message":"Please","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":249,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":250,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9fbf9ecf","line":251,"range":{"start_line":251,"start_character":55,"end_line":251,"end_character":66},"updated":"2019-06-17 16:24:36.000000000","message":"constructing","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":249,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":250,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d25cc4ee","line":251,"range":{"start_line":251,"start_character":0,"end_line":251,"end_character":6},"in_reply_to":"9fb8cfa7_7fbaeae1","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":248,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":249,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":250,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_72739881","line":251,"range":{"start_line":251,"start_character":55,"end_line":251,"end_character":66},"in_reply_to":"9fb8cfa7_9fbf9ecf","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":255,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":256,"context_line":"guest is still a non-NUMA guest."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7f9f4a65","line":256,"range":{"start_line":254,"start_character":19,"end_line":256,"end_character":32},"updated":"2019-06-17 16:24:36.000000000","message":"Won\u0027t this affect our resource tracking and claims processing? If you\u0027re going to use numa node 0 but not actually *claim* it via the limit provided from the NUMATopologyFilter and subsequent resource claim in the nova-compute service, couldn\u0027t we hit conflicts if something else is already using numa node 0?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":255,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":256,"context_line":"guest is still a non-NUMA guest."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b9424267","line":256,"range":{"start_line":254,"start_character":19,"end_line":256,"end_character":32},"in_reply_to":"9fb8cfa7_10ac8f8a","updated":"2019-06-25 09:02:37.000000000","message":"Libvirt requires persistent memory to be associated with a guest numa node. So if the user does not specify numa topology for guests, then we need to construct one in order to fit persistent memory into libvirt domain xml. We actually discussed this at PTG.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":255,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":256,"context_line":"guest is still a non-NUMA guest."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_980c8a3c","line":256,"range":{"start_line":254,"start_character":19,"end_line":256,"end_character":32},"in_reply_to":"9fb8cfa7_7f9f4a65","updated":"2019-06-18 07:32:59.000000000","message":"This is talking about the instance which hasn\u0027t guest numa node. We just create a guest numa node in the virt driver layer, and won\u0027t bind this guest numa node to any host numa node. So we needn\u0027t claim or tracking it. The instance\u0027s vcpus still floating on all the host pcpus.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":251,"context_line":"please note, under the second circumstance (implicitly construting"},{"line_number":252,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":253,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":254,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":255,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":256,"context_line":"guest is still a non-NUMA guest."},{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_10ac8f8a","line":256,"range":{"start_line":254,"start_character":19,"end_line":256,"end_character":32},"in_reply_to":"9fb8cfa7_980c8a3c","updated":"2019-06-24 15:31:08.000000000","message":"This has Sean\u0027s fingerprints on it.\n\nWhy do we need an implicit NUMA 0 at all? (See also below.)","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ffceda59","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"updated":"2019-06-17 16:24:36.000000000","message":"Is this numa node implied or explicitly defined in the flavor using hw:numa_nodes\u003d1?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"73aea58d57fd4d48310b684529172548d1260780","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_db088e92","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_41c283eb","updated":"2019-06-20 08:32:52.000000000","message":"This actually kills me :) The original spec proposed as you said that this only supports instances with guest numa topology. However in PTG, someone (IIRC from Redhat) mentioned that they have customers where the majority VM instances are without guest numa topology and would like to have pmem work. That directed the discussion to a conclusion that VM instances without numa topology should also be supported.\nTo make things better, I think the typical or recommend way for a server to host VMs is that either all the VM instances on a certain host all have numa topology or all the VM instances on that host all float. For floating guests, cpu/memory accessing performance is implicitly secondary. So the interference between guests as far as QPI bandwidth is concerned is not so critical on a host with all floating VMs.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_377564fb","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_652013b2","updated":"2019-06-19 16:26:29.000000000","message":"well we could its what we do for hugepages and cpu pinning but i would prefer it to be explcit and make it an error to not set it.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_41c283eb","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_b69ced5d","updated":"2019-06-19 16:26:29.000000000","message":"i think we shoudl really only be supporting this for guests with a numa toplogy. i dont think it makes sense for floating instance to use PMEM the perfromcne impacts will be too sever and it will affect other instnce on the host too not just the \nfloating instance. UPI/QPI bandwidth is finite and we should try not to waste it.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_50d0c7f7","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_db088e92","updated":"2019-06-24 15:31:08.000000000","message":"I recall that one of the major motivations for this feature was to make large amounts of cheap memory available. That has nothing to do with NUMA. IMO we should not be dragging NUMA into this implementation where it\u0027s not explicitly requested.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0693d134468888bc9ffc2fb070485339f19bb3df","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b69ced5d","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_ffceda59","updated":"2019-06-19 07:45:08.000000000","message":"I will split this case to two, one with explicit NUMA node definition, another does not have NUMA node defined.\n\nAnd below examples needs to be refined also.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":257,"context_line":""},{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_652013b2","line":260,"range":{"start_line":260,"start_character":1,"end_line":260,"end_character":14},"in_reply_to":"9fb8cfa7_ffceda59","updated":"2019-06-18 07:32:59.000000000","message":"no, we didn\u0027t create guest numa node for instance implied. I think this example should be updated.  This case added when we still support numa in this spec, but now, this is the case for floating instance.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"a92fb2dadd062d455974532728511f81f2997def","unresolved":false,"context_lines":[{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"},{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_eed764ed","line":261,"updated":"2019-06-12 06:17:03.000000000","message":"I found one thing which we didn\u0027t think about before.\n\nThat is what happened when both the extra spec hw:pmem and resources used by the user.\n\nIt should be ok to support both. We need to define how to merge the request from both extra specs.\n\nBut is it really worth to support both? Frist the hw:pmem is more flexible, like we have order info in the hw:pmem. For example, hw:pmem\u003d512GB,1024GB,512GB. If we turn this into resources extra spec, it will be resources\u003dCUSTOM_VPMEM_512GB:2,CUSTOM_VPMEM_1024GB:1, then we lost the order.\n\nBut If we reject the resources extra spec for CUSTOM_VPMEM_*, that is also strange.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"d0b738c4ac2471aedb8b558c68e3bd65d3244d0e","unresolved":false,"context_lines":[{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"},{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_82086728","line":261,"in_reply_to":"9fb8cfa7_36c1dd2f","updated":"2019-06-20 13:24:17.000000000","message":"it would be a prescident as currently we dont do that type of validtion for resouce: extra spec\nim not agains rejecting it but im not sure how people feel about having to check in the api for the presence of\nCUSTOM_PMEM_NAMESPACE_* in the flavor for each boot request.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"0693d134468888bc9ffc2fb070485339f19bb3df","unresolved":false,"context_lines":[{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"},{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_36c1dd2f","line":261,"in_reply_to":"9fb8cfa7_782492af","updated":"2019-06-19 07:45:08.000000000","message":"Yes, agree. Specifying pmem by resource extra specs will lose guest NUMA information which we definitely wants in future. So why not reject with an warning or error?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"},{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_301c33c0","line":261,"in_reply_to":"9fb8cfa7_82086728","updated":"2019-06-24 15:31:08.000000000","message":"We should definitely not make an effort to \"merge\" PMEM requests of different formats.\n\nIMO we should only support the non-Placement-ese form. In addition to the reasons already stated, this will give us more power in the future e.g. to create the appropriate affinity syntax in the placement query, which would be difficult or impossible for the operator to do.\n\nI agree we don\u0027t yet have a precedent for a hard check to fail if the Placement-ese resource classes are specified. I would be in favor of setting that precedent - and applying it to other efforts that play in a similar space (like SEV).","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"77366375098c8feb3e13f842f6364bc221a1e32f","unresolved":false,"context_lines":[{"line_number":258,"context_line":"Examples::"},{"line_number":259,"context_line":""},{"line_number":260,"context_line":" One NUMA node, one 512GB virtual persistent memory:"},{"line_number":261,"context_line":"     hw:pmem \u003d 512GB"},{"line_number":262,"context_line":""},{"line_number":263,"context_line":" One NUMA node, two 512GB virtual persistent memory:"},{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_782492af","line":261,"in_reply_to":"9fb8cfa7_eed764ed","updated":"2019-06-12 12:06:26.000000000","message":"The order of hw:pmem isn\u0027t very useful. The useful info is about support numa in the future, like we can support numa by \"hw:pmem.[guest numa id] \u003d [list of pmems]\".","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"77a6a70df51d6fd1c6f981c13b6b97db8b324f70","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"bfb3d3c7_e64f7068","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"updated":"2019-05-30 03:55:53.000000000","message":"For two numa nodes, you should have extra spec \u0027hw:numa_nodes\u003d2\u0027","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b8262eba","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"in_reply_to":"9fb8cfa7_3fe552dd","updated":"2019-06-18 07:32:59.000000000","message":"Matt is right. Those \u0027512GB\u0027 is actually a label, it isn\u0027t real integer we can do the math.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b0a10349","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"in_reply_to":"9fb8cfa7_411843d2","updated":"2019-06-24 15:31:08.000000000","message":"See above, we should not tie PMEM to NUMA.\n\nAnd\n\nWe have already discussed numa node dot-notation in the keys and agreed that it\u0027s valid, but deferred and not in scope for this spec.\n\nAnd\n\nWe have already discussed and rejected attaching syntactic significance to the values (i.e. interpreting them as sizes) in favor of opaque labels corresponding to configured namespaces.\n\nLet\u0027s not rehash all of that.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_3fe552dd","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"in_reply_to":"9fb8cfa7_58daee4a","updated":"2019-06-17 16:24:36.000000000","message":"\u003e right\n \u003e so it shoudl be\n \u003e \n \u003e hw:numa_nodes\u003d2 and hw:pmem\u003d1024\n\nReading above the value wouldn\u0027t be hw:pmem\u003d1024 because it\u0027s not about the total size, it\u0027s about the number of namespaces per resource class, so they are saying they want two 512GB namespaces for the server. I think it\u0027s a bit confusing how this is modeled as noted above.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_411843d2","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"in_reply_to":"9fb8cfa7_b8262eba","updated":"2019-06-19 16:26:29.000000000","message":"yes this has changed since v12 where it was the actual size\n\nim not sure why we have changed form the old version made more sense to me.\n\n\"\nOne NUMA node, one 512GB vPMEM:\n     hw:numa_nodes \u003d 1\n     hw:numa_pmem \u003d 512GB\n\n One NUMA node, two 512GB vPMEM:\n     hw:numa_nodes \u003d 1\n     hw:numa_pmem.0.0 \u003d 512GB\n     hw:numa_pmem.0.1 \u003d 512GB\n\n Two NUMA nodes, each node has one 512GB vPMEM:\n     hw:numa_nodes \u003d 2\n     hw:numa_pmem.0.0 \u003d 512GB\n     hw:numa_pmem.1.0 \u003d 512GB\n\n     Below notation has the same semantics as the above:\n     hw:numa_nodes\u003d2\n     hw:numa_pmem \u003d 1024GB\n\"\n\nas wrtten this si consitent with the new spec but i think we have regressed in readablity and functionality.\n\ni also think we shoudl still be using hw:numa_pmem.\n\nwe do not nessisarly need to track the numa affintiy in placment but i think we should be doing it in nova.\n\n\nthis may have been changed when numa was defered form this spec but i think that is a mistake. \n\ni dont think it is resonable to support this feature for a non numa guest as cross numa access will degrade the performance of all guests on the host not just the guest of the with the pmem device. memory bandwith is a shared resouce after all that all process must share so if you map a pmem namespace form a remote numa node then you will retuce the amount of memoy and QPI/UPI bandwidth available for other guests.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"91c02a330bab6cc787bd13fe2f5b3f8f038af114","unresolved":false,"context_lines":[{"line_number":264,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":265,"context_line":""},{"line_number":266,"context_line":" Two NUMA nodes, two 512GB virtual persistent memory:"},{"line_number":267,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":268,"context_line":""},{"line_number":269,"context_line":"     Both of the two virtual persistent memory devices"},{"line_number":270,"context_line":"     are put under NUMA node 0."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_58daee4a","line":267,"range":{"start_line":267,"start_character":5,"end_line":267,"end_character":26},"in_reply_to":"bfb3d3c7_e64f7068","updated":"2019-06-13 13:56:42.000000000","message":"right\nso it shoudl be \n\nhw:numa_nodes\u003d2 and hw:pmem\u003d1024","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":285,"context_line":"The extra specs are translated to placement API requests accordingly."},{"line_number":286,"context_line":""},{"line_number":287,"context_line":"Specifying virtual persistent memory in image metadata is not supported"},{"line_number":288,"context_line":"for this iteration."},{"line_number":289,"context_line":""},{"line_number":290,"context_line":"Virtual persistent memory disposal"},{"line_number":291,"context_line":"----------------------------------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d0bd9768","line":288,"updated":"2019-06-24 15:31:08.000000000","message":"nor should it be - we don\u0027t allow resource requests via image metadata","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":285,"context_line":"The extra specs are translated to placement API requests accordingly."},{"line_number":286,"context_line":""},{"line_number":287,"context_line":"Specifying virtual persistent memory in image metadata is not supported"},{"line_number":288,"context_line":"for this iteration."},{"line_number":289,"context_line":""},{"line_number":290,"context_line":"Virtual persistent memory disposal"},{"line_number":291,"context_line":"----------------------------------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_19202eac","line":288,"in_reply_to":"9fb8cfa7_d0bd9768","updated":"2019-06-25 09:02:37.000000000","message":"I will remove this line.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":294,"context_line":"once the virtual persisent memory is no longer associated with any VM"},{"line_number":295,"context_line":"instance. Otherwise there will be security concerns."},{"line_number":296,"context_line":"Since persistent memory devices are typically of large size, this may"},{"line_number":297,"context_line":"introduce a performance penalty to guest deletion or any other actions"},{"line_number":298,"context_line":"involving erasing PMEM namespaces."},{"line_number":299,"context_line":"The standard I/O APIs (read/write) cannot be used with DAX (direct access)"},{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_df43d6ae","line":297,"range":{"start_line":297,"start_character":50,"end_line":297,"end_character":70},"updated":"2019-06-17 16:24:36.000000000","message":"Do you mean other actions like cold migration / resize, unshelve, evacuate, etc?\n\nSpeaking of evacuate, have we considered what will happen if/when a server using pmem is evacuated from host A to host B and then host A is restored and the nova-compute service on host A comes back up? Will we properly cleanup any previously used namespaces on host A in that case?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":294,"context_line":"once the virtual persisent memory is no longer associated with any VM"},{"line_number":295,"context_line":"instance. Otherwise there will be security concerns."},{"line_number":296,"context_line":"Since persistent memory devices are typically of large size, this may"},{"line_number":297,"context_line":"introduce a performance penalty to guest deletion or any other actions"},{"line_number":298,"context_line":"involving erasing PMEM namespaces."},{"line_number":299,"context_line":"The standard I/O APIs (read/write) cannot be used with DAX (direct access)"},{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_214bcf87","line":297,"range":{"start_line":297,"start_character":50,"end_line":297,"end_character":70},"in_reply_to":"9fb8cfa7_383f9eb6","updated":"2019-06-19 16:26:29.000000000","message":"ok so the data will remain until the agent is run but that would be true of the root disk images too so i think that is fine.\n\nas long as the namespace is erased before it is used again that should be ok although on start up is proably best  if the instance has indeed been evacuated.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":294,"context_line":"once the virtual persisent memory is no longer associated with any VM"},{"line_number":295,"context_line":"instance. Otherwise there will be security concerns."},{"line_number":296,"context_line":"Since persistent memory devices are typically of large size, this may"},{"line_number":297,"context_line":"introduce a performance penalty to guest deletion or any other actions"},{"line_number":298,"context_line":"involving erasing PMEM namespaces."},{"line_number":299,"context_line":"The standard I/O APIs (read/write) cannot be used with DAX (direct access)"},{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_383f9eb6","line":297,"range":{"start_line":297,"start_character":50,"end_line":297,"end_character":70},"in_reply_to":"9fb8cfa7_df43d6ae","updated":"2019-06-18 07:32:59.000000000","message":"good point, I didn\u0027t think about this case. But I checked the code, we have code to clean up the instance already evacuated, and we execute the virt driver\u0027s destroy method, we already implement the vpmem cleanup in the destroy method, so it will work as nature","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":297,"context_line":"introduce a performance penalty to guest deletion or any other actions"},{"line_number":298,"context_line":"involving erasing PMEM namespaces."},{"line_number":299,"context_line":"The standard I/O APIs (read/write) cannot be used with DAX (direct access)"},{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"},{"line_number":301,"context_line":"purpose."},{"line_number":302,"context_line":""},{"line_number":303,"context_line":"VM resize"},{"line_number":304,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9f7c7eec","line":301,"range":{"start_line":300,"start_character":9,"end_line":301,"end_character":8},"updated":"2019-06-17 16:24:36.000000000","message":"This is an implementation detail but we should use privsep when executing daxio commands.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":297,"context_line":"introduce a performance penalty to guest deletion or any other actions"},{"line_number":298,"context_line":"involving erasing PMEM namespaces."},{"line_number":299,"context_line":"The standard I/O APIs (read/write) cannot be used with DAX (direct access)"},{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"},{"line_number":301,"context_line":"purpose."},{"line_number":302,"context_line":""},{"line_number":303,"context_line":"VM resize"},{"line_number":304,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f848265d","line":301,"range":{"start_line":300,"start_character":9,"end_line":301,"end_character":8},"in_reply_to":"9fb8cfa7_9f7c7eec","updated":"2019-06-18 07:32:59.000000000","message":"yup","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"115322e559917828638dcf83a597ee20976980ea","unresolved":false,"context_lines":[{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"},{"line_number":301,"context_line":"purpose."},{"line_number":302,"context_line":""},{"line_number":303,"context_line":"VM resize"},{"line_number":304,"context_line":"---------"},{"line_number":305,"context_line":"Adding new virtual persistent memory devices to an instance is allowed."},{"line_number":306,"context_line":"As for a concrete virtual persistent memory device, changing its backing"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_97b0b98d","line":303,"range":{"start_line":303,"start_character":0,"end_line":303,"end_character":9},"updated":"2019-06-19 16:29:25.000000000","message":"can you add a section for rebuild and shelve/unshevle\n\nfor rebuild i am expecting the pmem namespace to be 0\u0027d out\n\nfor shelve unshelve you can refer to the lack of snapshot supprot and simpley state the pmem namese data will not be preserved and it will be erased when the vm is shelved offloaed.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"51bdd0f73cf3cc193f5902184bacb4106e71c4bf","unresolved":false,"context_lines":[{"line_number":300,"context_line":"devices. The nova compute libvirt driver uses `daxio`_ utility for this"},{"line_number":301,"context_line":"purpose."},{"line_number":302,"context_line":""},{"line_number":303,"context_line":"VM resize"},{"line_number":304,"context_line":"---------"},{"line_number":305,"context_line":"Adding new virtual persistent memory devices to an instance is allowed."},{"line_number":306,"context_line":"As for a concrete virtual persistent memory device, changing its backing"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d4428bab","line":303,"range":{"start_line":303,"start_character":0,"end_line":303,"end_character":9},"in_reply_to":"9fb8cfa7_97b0b98d","updated":"2019-06-21 07:27:07.000000000","message":"Sure, there is a section for shelve/unshelve below. So I will add something for rebuild. And also enrich the shelve/unshelve with what you stated here if they are not included in the existing shelve/unshelve section.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":308,"context_line":"always possible to compare the sizes of two namespaces in different resource"},{"line_number":309,"context_line":"classes (e.g. CUSTOM_PMEM_NAMESPACE_128G and CUSTOM_PMEM_NAMESPACE_MEDIUM)."},{"line_number":310,"context_line":""},{"line_number":311,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":312,"context_line":"to the new virtual persistent memory (if there is). This could be time"},{"line_number":313,"context_line":"consuming, so a flavor extra spec is introduced as a flag::"},{"line_number":314,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5fbb6671","line":311,"updated":"2019-06-17 16:24:36.000000000","message":"Is this something that only happens if the new flavor does not have the same pmem resource class as the old flavor, i.e. resizing from a flavor with CUSTOM_PMEM_NAMESPACE_128G to a flavor with CUSTOM_PMEM_NAMESPACE_MEDIUM? If so, is there a chance that the target namespace is not big enough for the copy from the source namespace and we\u0027ll fail, like trying to resizing the root image into a smaller disk?\n\nAlso, if the new flavor has a new resource class but does not remove the old resource class, then there is no copy, correct? e.g. I\u0027m resizing from a flavor with CUSTOM_PMEM_NAMESPACE_128G to a flavor with CUSTOM_PMEM_NAMESPACE_128G and CUSTOM_PMEM_NAMESPACE_MEDIUM, then the CUSTOM_PMEM_NAMESPACE_128G can remain untouched as long as the number of required namespaces does not change, is that correct?\n\nIt would be good to have examples for clarification in this section.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":308,"context_line":"always possible to compare the sizes of two namespaces in different resource"},{"line_number":309,"context_line":"classes (e.g. CUSTOM_PMEM_NAMESPACE_128G and CUSTOM_PMEM_NAMESPACE_MEDIUM)."},{"line_number":310,"context_line":""},{"line_number":311,"context_line":"By default the content of the original virtual persistent memory is copied"},{"line_number":312,"context_line":"to the new virtual persistent memory (if there is). This could be time"},{"line_number":313,"context_line":"consuming, so a flavor extra spec is introduced as a flag::"},{"line_number":314,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b882ae51","line":311,"in_reply_to":"9fb8cfa7_5fbb6671","updated":"2019-06-18 07:32:59.000000000","message":"\u003e Is this something that only happens if the new flavor does not have\n \u003e the same pmem resource class as the old flavor, i.e. resizing from\n \u003e a flavor with CUSTOM_PMEM_NAMESPACE_128G to a flavor with\n \u003e CUSTOM_PMEM_NAMESPACE_MEDIUM? If so, is there a chance that the\n \u003e target namespace is not big enough for the copy from the source\n \u003e namespace and we\u0027ll fail, like trying to resizing the root image\n \u003e into a smaller disk?\n \u003e \n\nWe want to simple the cases, we only allow to add more new namespace to the instance, we doesn\u0027t allow change the size of existed namespace, and remove any existed namespace.\n\n \u003e Also, if the new flavor has a new resource class but does not\n \u003e remove the old resource class, then there is no copy, correct? e.g.\n \u003e I\u0027m resizing from a flavor with CUSTOM_PMEM_NAMESPACE_128G to a\n \u003e flavor with CUSTOM_PMEM_NAMESPACE_128G and CUSTOM_PMEM_NAMESPACE_MEDIUM,\n \u003e then the CUSTOM_PMEM_NAMESPACE_128G can remain untouched as long as\n \u003e the number of required namespaces does not change, is that correct?\n \u003e \n \u003e It would be good to have examples for clarification in this\n \u003e section.\n\nWe will ensure the data copy for CUSTOM_PMEM_NAMESPACE_128G, \nCUSTOM_PMEM_NAMESPACE_MEDIUM is the new namespace, it will be a new empty namespace.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":312,"context_line":"to the new virtual persistent memory (if there is). This could be time"},{"line_number":313,"context_line":"consuming, so a flavor extra spec is introduced as a flag::"},{"line_number":314,"context_line":""},{"line_number":315,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":316,"context_line":""},{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_54d9df1f","line":315,"range":{"start_line":315,"start_character":40,"end_line":315,"end_character":44},"updated":"2019-06-19 16:26:29.000000000","message":"this should defualt to false\nsee below.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":312,"context_line":"to the new virtual persistent memory (if there is). This could be time"},{"line_number":313,"context_line":"consuming, so a flavor extra spec is introduced as a flag::"},{"line_number":314,"context_line":""},{"line_number":315,"context_line":" hw:allow_pmem_copy\u003dtrue|false (default true)"},{"line_number":316,"context_line":""},{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_8b6ff2c3","line":315,"range":{"start_line":315,"start_character":40,"end_line":315,"end_character":44},"in_reply_to":"9fb8cfa7_54d9df1f","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":316,"context_line":""},{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_705aab50","line":319,"range":{"start_line":319,"start_character":54,"end_line":319,"end_character":59},"updated":"2019-06-24 15:31:08.000000000","message":"false","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":316,"context_line":""},{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_990bbe28","line":319,"range":{"start_line":319,"start_character":54,"end_line":319,"end_character":59},"in_reply_to":"9fb8cfa7_705aab50","updated":"2019-06-25 09:02:37.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"},{"line_number":323,"context_line":"is flushed, so no need to copy the data."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7f618a11","line":320,"range":{"start_line":320,"start_character":49,"end_line":320,"end_character":63},"updated":"2019-06-17 16:24:36.000000000","message":"By \"this\" you mean the allow_pmem_copy\u003dfalse case, correct? Meaning it\u0027s useful to say I don\u0027t care about copying the data because my application is just using the pmem namespace as a cache.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"},{"line_number":323,"context_line":"is flushed, so no need to copy the data."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_94caf74c","line":320,"range":{"start_line":320,"start_character":49,"end_line":320,"end_character":63},"in_reply_to":"9fb8cfa7_787c3672","updated":"2019-06-19 16:26:29.000000000","message":"it is also usefully when you know you have 2 TB of PMEM associated with a vm and you dont want to have to copy that over the network to another host.\n\npersonally since we do not snapshot PMEM i would prefer if we also did not copy the pmem namespcase data by default so that we have consistent behaviour between shelve/unselve and resize as i dont think the behavior should de different between a cross cell resize which works more like shevel and a standard resize.\n\nintel has 512G NVDIMM modules and you can have up to 16 of them(8TB) + 8 ddr4 dims in a singel system .\n\ni actully think you can have more but the largest system i was aware of had 24 dimm slots and 1 dimm per channel had to be ddr4.\n\nin any case we can easily have cases where there are multiple terabytes of PMEM resouces on a singel host and a vm can there for be expected to have multiple 100s of GB PMEM data that would have to be copied which will be slow even over a 40G network link. so i think we shoudl default to not copying the data and have the admin/user opt in.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"},{"line_number":323,"context_line":"is flushed, so no need to copy the data."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_787c3672","line":320,"range":{"start_line":320,"start_character":49,"end_line":320,"end_character":63},"in_reply_to":"9fb8cfa7_7f618a11","updated":"2019-06-18 07:32:59.000000000","message":"yes","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"df489474e71899230a8cc2e676a28a931490fdea","unresolved":false,"context_lines":[{"line_number":317,"context_line":"If either the source or target has this flag set to ``true``, the"},{"line_number":318,"context_line":"data in virtual persistent memory is copied."},{"line_number":319,"context_line":"If both the source and target have this flag set to ``flase``, the"},{"line_number":320,"context_line":"data in virtual persistent memory is not copied. This is useful"},{"line_number":321,"context_line":"in scenarios such as virtual persistent memory is used as cache."},{"line_number":322,"context_line":"For a graceful shutdown (which resize does), the data in the cache"},{"line_number":323,"context_line":"is flushed, so no need to copy the data."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f8f2389a","line":320,"range":{"start_line":320,"start_character":49,"end_line":320,"end_character":63},"in_reply_to":"9fb8cfa7_94caf74c","updated":"2019-06-20 07:37:04.000000000","message":"I\u0027m ok with default value as false.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":325,"context_line":"Nova compute libvirt driver uses daxio_ utility to read out the data from"},{"line_number":326,"context_line":"the source persistent memory namespace and write in to the target"},{"line_number":327,"context_line":"persistent memory namespace."},{"line_number":328,"context_line":"If the source and target persistent memory namespaces are not on the"},{"line_number":329,"context_line":"same host, ssh tunnel is used to channel the data transfer. This ssh tunnel"},{"line_number":330,"context_line":"uses the same ssh key as of moving VM disks during resize."},{"line_number":331,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ffd81af1","line":328,"updated":"2019-06-17 16:24:36.000000000","message":"Just a note that this wouldn\u0027t work for cross-cell resize where there is no direct connectivity, like SSH, between the source and target host.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"8c33b625afbd4eccf2eef3ebfdac70b65a4edc61","unresolved":false,"context_lines":[{"line_number":325,"context_line":"Nova compute libvirt driver uses daxio_ utility to read out the data from"},{"line_number":326,"context_line":"the source persistent memory namespace and write in to the target"},{"line_number":327,"context_line":"persistent memory namespace."},{"line_number":328,"context_line":"If the source and target persistent memory namespaces are not on the"},{"line_number":329,"context_line":"same host, ssh tunnel is used to channel the data transfer. This ssh tunnel"},{"line_number":330,"context_line":"uses the same ssh key as of moving VM disks during resize."},{"line_number":331,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_574a06ac","line":328,"in_reply_to":"9fb8cfa7_348207ca","updated":"2019-06-24 07:28:20.000000000","message":"\u003e I saw the discussion between you and Alex on IRC on this topic. So\n \u003e if not fundamental, let\u0027s keep it as is today?\n\nSorry, I put this reply in the wrong place.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":325,"context_line":"Nova compute libvirt driver uses daxio_ utility to read out the data from"},{"line_number":326,"context_line":"the source persistent memory namespace and write in to the target"},{"line_number":327,"context_line":"persistent memory namespace."},{"line_number":328,"context_line":"If the source and target persistent memory namespaces are not on the"},{"line_number":329,"context_line":"same host, ssh tunnel is used to channel the data transfer. This ssh tunnel"},{"line_number":330,"context_line":"uses the same ssh key as of moving VM disks during resize."},{"line_number":331,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b7f4ddd0","line":328,"in_reply_to":"9fb8cfa7_9881ea4d","updated":"2019-06-19 16:26:29.000000000","message":"if we dont copy by default then we would only need a check for the case where we opt in to the copying behaivor.\n\nif/when we support snapshotting the pmem namespace we would support cross cell migration by uploading the pmem snap shots along with the root disk.\n\nthat would be after train but i dont think we shoudl block\nresize/cold migate in general for instance with PMEM","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"51bdd0f73cf3cc193f5902184bacb4106e71c4bf","unresolved":false,"context_lines":[{"line_number":325,"context_line":"Nova compute libvirt driver uses daxio_ utility to read out the data from"},{"line_number":326,"context_line":"the source persistent memory namespace and write in to the target"},{"line_number":327,"context_line":"persistent memory namespace."},{"line_number":328,"context_line":"If the source and target persistent memory namespaces are not on the"},{"line_number":329,"context_line":"same host, ssh tunnel is used to channel the data transfer. This ssh tunnel"},{"line_number":330,"context_line":"uses the same ssh key as of moving VM disks during resize."},{"line_number":331,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_348207ca","line":328,"in_reply_to":"9fb8cfa7_b7f4ddd0","updated":"2019-06-21 07:27:07.000000000","message":"I saw the discussion between you and Alex on IRC on this topic. So if not fundamental, let\u0027s keep it as is today?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":325,"context_line":"Nova compute libvirt driver uses daxio_ utility to read out the data from"},{"line_number":326,"context_line":"the source persistent memory namespace and write in to the target"},{"line_number":327,"context_line":"persistent memory namespace."},{"line_number":328,"context_line":"If the source and target persistent memory namespaces are not on the"},{"line_number":329,"context_line":"same host, ssh tunnel is used to channel the data transfer. This ssh tunnel"},{"line_number":330,"context_line":"uses the same ssh key as of moving VM disks during resize."},{"line_number":331,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9881ea4d","line":328,"in_reply_to":"9fb8cfa7_ffd81af1","updated":"2019-06-18 07:32:59.000000000","message":"ok, thanks, we can add a check to stop resize for the instance with vpmem?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Virtual persistent memory hotplug"},{"line_number":340,"context_line":"---------------------------------"},{"line_number":341,"context_line":"This spec does not address the hot plugging of virtual persistent memory."},{"line_number":342,"context_line":""},{"line_number":343,"context_line":"VM snapshot"},{"line_number":344,"context_line":"-----------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1f5bae54","line":341,"updated":"2019-06-17 16:24:36.000000000","message":"Another case where if we could have modeled this as a block device mapping, like a volume, we could have maybe supported attach/detach.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Virtual persistent memory hotplug"},{"line_number":340,"context_line":"---------------------------------"},{"line_number":341,"context_line":"This spec does not address the hot plugging of virtual persistent memory."},{"line_number":342,"context_line":""},{"line_number":343,"context_line":"VM snapshot"},{"line_number":344,"context_line":"-----------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f88fc672","line":341,"in_reply_to":"9fb8cfa7_1f5bae54","updated":"2019-06-18 07:32:59.000000000","message":"Not sure the usecase for hotplug for vpmem yet.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"ac35af69eed8ca766734e9f7f5e6f5701abc5782","unresolved":false,"context_lines":[{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Virtual persistent memory hotplug"},{"line_number":340,"context_line":"---------------------------------"},{"line_number":341,"context_line":"This spec does not address the hot plugging of virtual persistent memory."},{"line_number":342,"context_line":""},{"line_number":343,"context_line":"VM snapshot"},{"line_number":344,"context_line":"-----------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_a316c3a6","line":341,"in_reply_to":"9fb8cfa7_77c5055b","updated":"2019-06-21 01:38:47.000000000","message":"Yes, libvirt (qemu eventually) does that.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":338,"context_line":""},{"line_number":339,"context_line":"Virtual persistent memory hotplug"},{"line_number":340,"context_line":"---------------------------------"},{"line_number":341,"context_line":"This spec does not address the hot plugging of virtual persistent memory."},{"line_number":342,"context_line":""},{"line_number":343,"context_line":"VM snapshot"},{"line_number":344,"context_line":"-----------"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_77c5055b","line":341,"in_reply_to":"9fb8cfa7_f88fc672","updated":"2019-06-19 16:26:29.000000000","message":"libvirt requiers you to stat how may dimm slots are availble at boot time so while you can hot plug them you need to also\nreserve free slots to be able to do so.\n\nkind of like how we have to reserve slots for pci devices with the q35 chipset.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":357,"context_line":"As snapshot, saving/restoring virtual persistent memory images could be"},{"line_number":358,"context_line":"supported after the persistent memory images can be stored in Glance."},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"Alternatives"},{"line_number":361,"context_line":"------------"},{"line_number":362,"context_line":"Persisent memory namespaces can be created/destroyed on the fly as VM"},{"line_number":363,"context_line":"creation/deletion. This ways is more flexible than the fix sized"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5f17e652","line":360,"updated":"2019-06-17 16:24:36.000000000","message":"As mentioned previously, and maybe this is naive since I\u0027m not familiar with these types of devices, is there any chance we could have modeled these as block devices like volumes and managed them that way which seems to me like it would have been a lot cleaner during something like a migration. It really sounds like a BDM with a destination_type of \"local\" but we\u0027d maybe have to create a new source_type for \"pmem\".","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":357,"context_line":"As snapshot, saving/restoring virtual persistent memory images could be"},{"line_number":358,"context_line":"supported after the persistent memory images can be stored in Glance."},{"line_number":359,"context_line":""},{"line_number":360,"context_line":"Alternatives"},{"line_number":361,"context_line":"------------"},{"line_number":362,"context_line":"Persisent memory namespaces can be created/destroyed on the fly as VM"},{"line_number":363,"context_line":"creation/deletion. This ways is more flexible than the fix sized"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_b8d06e55","line":360,"in_reply_to":"9fb8cfa7_5f17e652","updated":"2019-06-18 07:32:59.000000000","message":"As in the IRC said, actually, the vpmem is memory device, it isn\u0027t the block device. \n\nSo we can\u0027t write image to vpmem, and vpmem can\u0027t be a boot device.\n\nAlso If we add vpmem to the BDM, then we probably need to process them as block device, we probably should have a Driver under nova/block_device.py. But the most driver interface doesn\u0027t match. And when we attach the vpmem to the instance in the libvirt virt driver, we can\u0027t reuse the code path for the block device, it will use it the xml node \u003cmemory\u003e. https://libvirt.org/formatdomain.html#elementsMemory","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":368,"context_line":"-----------------"},{"line_number":369,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"},{"line_number":370,"context_line":"of an instance, it stands for a virtual persistent memory device backed"},{"line_number":371,"context_line":"by a physical persistent memory namespace::"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":" +class VirtualPMEM(base.NovaObject):"},{"line_number":374,"context_line":" +    # Version 1.0: Initial version"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_bf05c216","line":371,"range":{"start_line":371,"start_character":41,"end_line":371,"end_character":43},"updated":"2019-06-17 16:24:36.000000000","message":"nit: rather than this, use:\n\n.. code-block:: python\n\nHere and throughout below.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":368,"context_line":"-----------------"},{"line_number":369,"context_line":"A new VirtualPMEM object is introduced to track the virtual PMEM information"},{"line_number":370,"context_line":"of an instance, it stands for a virtual persistent memory device backed"},{"line_number":371,"context_line":"by a physical persistent memory namespace::"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":" +class VirtualPMEM(base.NovaObject):"},{"line_number":374,"context_line":" +    # Version 1.0: Initial version"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_cb05ea7b","line":371,"range":{"start_line":371,"start_character":41,"end_line":371,"end_character":43},"in_reply_to":"9fb8cfa7_bf05c216","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":370,"context_line":"of an instance, it stands for a virtual persistent memory device backed"},{"line_number":371,"context_line":"by a physical persistent memory namespace::"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":" +class VirtualPMEM(base.NovaObject):"},{"line_number":374,"context_line":" +    # Version 1.0: Initial version"},{"line_number":375,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":376,"context_line":" +"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_df02f60b","line":373,"range":{"start_line":373,"start_character":1,"end_line":373,"end_character":2},"updated":"2019-06-17 16:24:36.000000000","message":"nit: remove the left-side + characters - here and below","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":370,"context_line":"of an instance, it stands for a virtual persistent memory device backed"},{"line_number":371,"context_line":"by a physical persistent memory namespace::"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":" +class VirtualPMEM(base.NovaObject):"},{"line_number":374,"context_line":" +    # Version 1.0: Initial version"},{"line_number":375,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":376,"context_line":" +"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_eb08ee91","line":373,"range":{"start_line":373,"start_character":1,"end_line":373,"end_character":2},"in_reply_to":"9fb8cfa7_df02f60b","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":370,"context_line":"of an instance, it stands for a virtual persistent memory device backed"},{"line_number":371,"context_line":"by a physical persistent memory namespace::"},{"line_number":372,"context_line":""},{"line_number":373,"context_line":" +class VirtualPMEM(base.NovaObject):"},{"line_number":374,"context_line":" +    # Version 1.0: Initial version"},{"line_number":375,"context_line":" +    VERSION \u003d \"1.0\""},{"line_number":376,"context_line":" +"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f79c955e","line":373,"range":{"start_line":373,"start_character":1,"end_line":373,"end_character":2},"in_reply_to":"9fb8cfa7_df02f60b","updated":"2019-06-19 16:26:29.000000000","message":"right this reads like a diff rather then a code snipit\nif we use the code block instead it will render with syntax hylighting in the final output but it needs to be in source form ate not diff formate for that  to work properly.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":378,"context_line":" +        \u0027rc_name\u0027: fields.StringField(),"},{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_5a0cd43a","line":381,"range":{"start_line":381,"start_character":11,"end_line":381,"end_character":17},"updated":"2019-06-17 16:24:36.000000000","message":"This is a device name / mountpoint like \"/dev/dax0.0\" correct? Seems like a BlockDeviceMapping to me.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":378,"context_line":" +        \u0027rc_name\u0027: fields.StringField(),"},{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_581db20d","line":381,"range":{"start_line":381,"start_character":11,"end_line":381,"end_character":17},"in_reply_to":"9fb8cfa7_5a0cd43a","updated":"2019-06-18 07:32:59.000000000","message":"reply same as above, it is memory device,  have different process in the virt driver, it will create special code path for the memory device in block device code patch.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_fa102852","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"updated":"2019-06-17 16:24:36.000000000","message":"Will this be used? From the libvirt domain xml docs:\n\n\"The alignsize element defines the page size alignment used to mmap the address range for the backend path. If not supplied the host page size is used. For example, to mmap a real NVDIMM device a 2M-aligned page may be required. Since 5.0.0\"\n\nIf we use this, from where would we get the value?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"51bdd0f73cf3cc193f5902184bacb4106e71c4bf","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_74d15f57","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"in_reply_to":"9fb8cfa7_172a89e6","updated":"2019-06-21 07:27:07.000000000","message":"For now, we do not see any requirement from the user to specify the alignment size. So for now, we can leave it there. But in future, if there is a need, we can always enhance to add an additional extra spec and/or influence the operator to provide namespaces with a variety of alignment values.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"76e4b73aa624ef9fbecf164985e5551b9a79e3df","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_eb2a2e7c","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"in_reply_to":"9fb8cfa7_74d15f57","updated":"2019-06-24 15:31:08.000000000","message":"Sorry, why do we need it (or the ns_dev and ns_size for that matter) in the database? Is there a reason we shouldn\u0027t look it up on the fly when we need it?","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_542bcd93","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"in_reply_to":"9fb8cfa7_eb2a2e7c","updated":"2019-06-25 09:02:37.000000000","message":"I agree with you, fields except rc_name and ns_name can be read out on the fly.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_172a89e6","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"in_reply_to":"9fb8cfa7_f89ca66b","updated":"2019-06-19 16:26:29.000000000","message":"ok so we do not need a hw:pmem_alignment_size extra spec\nand can hide this form the user by discovering it form the namespace. from within the guest will the alignment be reported and or can the guest do unnalined read/write if it is not discoverable in the guest.\n\nim wondering if this is something that the endusers will need to be able to influance or if this is purly up to the discretion of the operator.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":23598,"name":"Zhong Luyao","email":"luyao.zhong@intel.com","username":"ZhongLuyao"},"change_message_id":"eb8f31a90adc8b6fec57120ed3b71a62d12ee472","unresolved":false,"context_lines":[{"line_number":379,"context_line":" +        \u0027ns_name\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":380,"context_line":" +        \u0027ns_size\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":381,"context_line":" +        \u0027ns_dev\u0027: fields.StringField(nullable\u003dTrue),"},{"line_number":382,"context_line":" +        \u0027ns_align\u0027: fields.IntegerField(nullable\u003dTrue),"},{"line_number":383,"context_line":" +    }"},{"line_number":384,"context_line":""},{"line_number":385,"context_line":"In addition a VirtualPMEMList object is introduced to represent a list"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_f89ca66b","line":382,"range":{"start_line":382,"start_character":11,"end_line":382,"end_character":19},"in_reply_to":"9fb8cfa7_fa102852","updated":"2019-06-18 07:50:50.000000000","message":"Yes, we need this and will use it. For real NVDIMM device and devdax mode, 2M is the default alignsize when creating namespace, and it still supports other alignsize such as 1G. We need specify this value when generating domain xml.\n\nWhen initializing libvirt driver, \u0027ndctl\u0027 will be called to get namespaces info which contains the \u0027alignsize\u0027 value.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":438,"context_line":""},{"line_number":439,"context_line":"Performance Impact"},{"line_number":440,"context_line":"------------------"},{"line_number":441,"context_line":"PMEM namespaces tend to be large. Zeroing out a persistent memory"},{"line_number":442,"context_line":"namespace requires a considerable amount of time. This may introduce"},{"line_number":443,"context_line":"a negative performance impact when deleting a guest with large"},{"line_number":444,"context_line":"virtual persistent memories."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_da8c4481","line":441,"updated":"2019-06-17 16:24:36.000000000","message":"This doesn\u0027t really sound any different from the volume_clear option used by the libvirt lvm image backend:\n\nhttps://docs.openstack.org/nova/latest/configuration/config.html#libvirt.volume_clear\n\nAnother reason why I keep thinking these sound like lvm volumes and could be modeled as block device mappings...","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"51bdd0f73cf3cc193f5902184bacb4106e71c4bf","unresolved":false,"context_lines":[{"line_number":438,"context_line":""},{"line_number":439,"context_line":"Performance Impact"},{"line_number":440,"context_line":"------------------"},{"line_number":441,"context_line":"PMEM namespaces tend to be large. Zeroing out a persistent memory"},{"line_number":442,"context_line":"namespace requires a considerable amount of time. This may introduce"},{"line_number":443,"context_line":"a negative performance impact when deleting a guest with large"},{"line_number":444,"context_line":"virtual persistent memories."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_742abfe7","line":441,"in_reply_to":"9fb8cfa7_574ec1c1","updated":"2019-06-21 07:27:07.000000000","message":"Your analysis is on spot. However it will certainly takes host cpu cycles and more importantly occupy memory controller bandwidth to do the zeroing out. That also implies a performance impact to other jobs on the host.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"606ec728f99dedd9b680352b515e6cb5f18fbc2d","unresolved":false,"context_lines":[{"line_number":438,"context_line":""},{"line_number":439,"context_line":"Performance Impact"},{"line_number":440,"context_line":"------------------"},{"line_number":441,"context_line":"PMEM namespaces tend to be large. Zeroing out a persistent memory"},{"line_number":442,"context_line":"namespace requires a considerable amount of time. This may introduce"},{"line_number":443,"context_line":"a negative performance impact when deleting a guest with large"},{"line_number":444,"context_line":"virtual persistent memories."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_574ec1c1","line":441,"in_reply_to":"9fb8cfa7_da8c4481","updated":"2019-06-19 16:26:29.000000000","message":"will this acttly be a perfomacne issue. it will take longer for the guest deleteion to happen but we will just spawn a shell and execution the cli to 0 out the namescpase so form a nova persepective when we make that call to the cli eventlet will jsut context switch to do something else untill the command completes so it should not really impact other operations. its not as if we are going to mmap the devdax device and 0 it out in python.\n\nso yes the overall time for the delete action will increase\nalot if a large amoung of PMEM was used but the amount of time the agent will activly spend deleteing the vm will be more or less the same as the cleanup of the PMEM namespace will happen exernally and we will just switch to a diffenet greenthread.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":456,"context_line":"--------------"},{"line_number":457,"context_line":"None."},{"line_number":458,"context_line":""},{"line_number":459,"context_line":"System prerequisite"},{"line_number":460,"context_line":"-------------------"},{"line_number":461,"context_line":"* Kernel version \u003e\u003d 4.2"},{"line_number":462,"context_line":"* QEMU version \u003e\u003d 2.9.0"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_fa710879","line":459,"updated":"2019-06-17 16:24:36.000000000","message":"You\u0027d also need the daxio package installed.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":477,"context_line":""},{"line_number":478,"context_line":"Work Items"},{"line_number":479,"context_line":"----------"},{"line_number":480,"context_line":"* Object: add DB model and Nova object."},{"line_number":481,"context_line":"* Compute: virtual persistent memory life cycle management."},{"line_number":482,"context_line":"* Scheduler: translate virtual persistent memory request to"},{"line_number":483,"context_line":"             placement requests."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_dafd24b2","line":480,"updated":"2019-06-17 16:24:36.000000000","message":"I still wonder if we could get BlockDeviceMapping(List) to work for these rather than add something entirely new but mostly similar.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":481,"context_line":"* Compute: virtual persistent memory life cycle management."},{"line_number":482,"context_line":"* Scheduler: translate virtual persistent memory request to"},{"line_number":483,"context_line":"             placement requests."},{"line_number":484,"context_line":"* API: parse virtual persistent memory flavor extra specs."},{"line_number":485,"context_line":""},{"line_number":486,"context_line":"Dependencies"},{"line_number":487,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_1a1dfc0e","line":484,"updated":"2019-06-17 16:24:36.000000000","message":"I\u0027m not really clear on what you\u0027d be parsing and doing with this since it sounds like the API doesn\u0027t do anything with them, the scheduler and compute do.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":5754,"name":"Alex Xu","email":"hejie.xu@intel.com","username":"xuhj"},"change_message_id":"0cb61d3e88f9e8bb12f6863011ca056d95ad37f2","unresolved":false,"context_lines":[{"line_number":481,"context_line":"* Compute: virtual persistent memory life cycle management."},{"line_number":482,"context_line":"* Scheduler: translate virtual persistent memory request to"},{"line_number":483,"context_line":"             placement requests."},{"line_number":484,"context_line":"* API: parse virtual persistent memory flavor extra specs."},{"line_number":485,"context_line":""},{"line_number":486,"context_line":"Dependencies"},{"line_number":487,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_d8a442bc","line":484,"in_reply_to":"9fb8cfa7_1a1dfc0e","updated":"2019-06-18 07:32:59.000000000","message":"we should have API layer validate the \u0027hw:pmem\u0027 with correct format and put the request into instance.vpmem","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":485,"context_line":""},{"line_number":486,"context_line":"Dependencies"},{"line_number":487,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":488,"context_line":"None."},{"line_number":489,"context_line":""},{"line_number":490,"context_line":""},{"line_number":491,"context_line":"Testing"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_9ad6cc27","line":488,"updated":"2019-06-17 16:24:36.000000000","message":"This should be your \"System prerequisite\" section, so just move that content here and drop the \"System prerequisite\" section.","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":485,"context_line":""},{"line_number":486,"context_line":"Dependencies"},{"line_number":487,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":488,"context_line":"None."},{"line_number":489,"context_line":""},{"line_number":490,"context_line":""},{"line_number":491,"context_line":"Testing"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_ebf22e6f","line":488,"in_reply_to":"9fb8cfa7_9ad6cc27","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":499,"context_line":"Documentation Impact"},{"line_number":500,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":501,"context_line":""},{"line_number":502,"context_line":"The cloud administrator docs need to describe how to create"},{"line_number":503,"context_line":"and configure persistent memory namespaces."},{"line_number":504,"context_line":""},{"line_number":505,"context_line":"The end user needs to be make aware of this feature."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_7abaf8e1","line":502,"updated":"2019-06-17 16:24:36.000000000","message":"Yeah we\u0027d probably have something in here like we have for file-backed memory and vgpus:\n\nhttps://docs.openstack.org/nova/latest/admin/adv-config.html","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":499,"context_line":"Documentation Impact"},{"line_number":500,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":501,"context_line":""},{"line_number":502,"context_line":"The cloud administrator docs need to describe how to create"},{"line_number":503,"context_line":"and configure persistent memory namespaces."},{"line_number":504,"context_line":""},{"line_number":505,"context_line":"The end user needs to be make aware of this feature."}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_aba19648","line":502,"in_reply_to":"9fb8cfa7_7abaf8e1","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":6873,"name":"Matt Riedemann","email":"mriedem.os@gmail.com","username":"mriedem"},"change_message_id":"b6221f12d0108fd23f02a1ebdc614eec19e4d8f8","unresolved":false,"context_lines":[{"line_number":502,"context_line":"The cloud administrator docs need to describe how to create"},{"line_number":503,"context_line":"and configure persistent memory namespaces."},{"line_number":504,"context_line":""},{"line_number":505,"context_line":"The end user needs to be make aware of this feature."},{"line_number":506,"context_line":""},{"line_number":507,"context_line":"References"},{"line_number":508,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_facd8886","line":505,"updated":"2019-06-17 16:24:36.000000000","message":"And this is where the user flavor documentation would go:\n\nhttps://docs.openstack.org/nova/latest/user/flavors.html","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"db2f140f9a617f255d1130df1f0e4c86daa3c507","unresolved":false,"context_lines":[{"line_number":502,"context_line":"The cloud administrator docs need to describe how to create"},{"line_number":503,"context_line":"and configure persistent memory namespaces."},{"line_number":504,"context_line":""},{"line_number":505,"context_line":"The end user needs to be make aware of this feature."},{"line_number":506,"context_line":""},{"line_number":507,"context_line":"References"},{"line_number":508,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":14,"id":"9fb8cfa7_0bab021d","line":505,"in_reply_to":"9fb8cfa7_facd8886","updated":"2019-06-24 16:32:38.000000000","message":"Done","commit_id":"7474f1378967d29a32a15a1d9dcda18b3ae80e5c"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"d229a3c2b376ac2aed97b5c4b66cf7159a23d81e","unresolved":false,"context_lines":[{"line_number":132,"context_line":"Nova compute agent discovers the namespaces by parsing the config file"},{"line_number":133,"context_line":"to determine what namespaces it can allocate to a guest. The discovered"},{"line_number":134,"context_line":"persistent memory namespaces will be reported to the placement service"},{"line_number":135,"context_line":"as inventories of a custome resource class associated with the ROOT"},{"line_number":136,"context_line":"resource provider."},{"line_number":137,"context_line":""},{"line_number":138,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_1aa9097b","line":135,"range":{"start_line":135,"start_character":20,"end_line":135,"end_character":27},"updated":"2019-06-24 17:44:09.000000000","message":"custom","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":132,"context_line":"Nova compute agent discovers the namespaces by parsing the config file"},{"line_number":133,"context_line":"to determine what namespaces it can allocate to a guest. The discovered"},{"line_number":134,"context_line":"persistent memory namespaces will be reported to the placement service"},{"line_number":135,"context_line":"as inventories of a custome resource class associated with the ROOT"},{"line_number":136,"context_line":"resource provider."},{"line_number":137,"context_line":""},{"line_number":138,"context_line":"Custom Resource Classes are used to represent persistent memory namespace"}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_b486c9b8","line":135,"range":{"start_line":135,"start_character":20,"end_line":135,"end_character":27},"in_reply_to":"9fb8cfa7_1aa9097b","updated":"2019-06-25 09:02:37.000000000","message":"Done","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"d229a3c2b376ac2aed97b5c4b66cf7159a23d81e","unresolved":false,"context_lines":[{"line_number":158,"context_line":"A name can be given to a persitent memory namespace upon creation by"},{"line_number":159,"context_line":"the \"-n/--name\" option to the `ndctl`_ command."},{"line_number":160,"context_line":""},{"line_number":161,"context_line":"Both \u0027$LABEL\u0027 and \u0027$NSNAME\u0027 can be regluer expressions. To give an example,"},{"line_number":162,"context_line":"on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_dab851c9","line":161,"range":{"start_line":161,"start_character":0,"end_line":161,"end_character":54},"updated":"2019-06-24 17:44:09.000000000","message":"-1, see PS14 comment","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":158,"context_line":"A name can be given to a persitent memory namespace upon creation by"},{"line_number":159,"context_line":"the \"-n/--name\" option to the `ndctl`_ command."},{"line_number":160,"context_line":""},{"line_number":161,"context_line":"Both \u0027$LABEL\u0027 and \u0027$NSNAME\u0027 can be regluer expressions. To give an example,"},{"line_number":162,"context_line":"on a certain host, there might be a below configuration::"},{"line_number":163,"context_line":""},{"line_number":164,"context_line":"    \"128G:ns0,ns1,ns2,ns3|262144MB:ns4,ns5|MEDIUM:ns6,ns7\""}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_548f4dcc","line":161,"range":{"start_line":161,"start_character":0,"end_line":161,"end_character":54},"in_reply_to":"9fb8cfa7_dab851c9","updated":"2019-06-25 09:02:37.000000000","message":"Removed","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":14070,"name":"Eric Fried","email":"openstack@fried.cc","username":"efried"},"change_message_id":"d229a3c2b376ac2aed97b5c4b66cf7159a23d81e","unresolved":false,"context_lines":[{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"},{"line_number":170,"context_line":"class ``CUSTOM_PMEM_NAMESPACE_MEDIUM``."},{"line_number":171,"context_line":""},{"line_number":172,"context_line":"Or by using regular expressions the above configuration can also be::"},{"line_number":173,"context_line":""},{"line_number":174,"context_line":"    \"128G:ns[0-3]|262144MB:ns[4,5]|MEDIUM:ns[6,7]\""},{"line_number":175,"context_line":""},{"line_number":176,"context_line":"The \u0027total\u0027 value of the inventory is the *number* of the"},{"line_number":177,"context_line":"persistent memory namespaces belong to this resource class."}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_3abf2dab","line":174,"range":{"start_line":172,"start_character":0,"end_line":174,"end_character":50},"updated":"2019-06-24 17:44:09.000000000","message":"-1","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"89ef5df3912b5b59457d6c979968b14ef7f25a96","unresolved":false,"context_lines":[{"line_number":169,"context_line":"``CUSTOM_PMEM_NAMESPACE_262144MB``, and 2 namespaces (ns6, ns7) of resource"},{"line_number":170,"context_line":"class ``CUSTOM_PMEM_NAMESPACE_MEDIUM``."},{"line_number":171,"context_line":""},{"line_number":172,"context_line":"Or by using regular expressions the above configuration can also be::"},{"line_number":173,"context_line":""},{"line_number":174,"context_line":"    \"128G:ns[0-3]|262144MB:ns[4,5]|MEDIUM:ns[6,7]\""},{"line_number":175,"context_line":""},{"line_number":176,"context_line":"The \u0027total\u0027 value of the inventory is the *number* of the"},{"line_number":177,"context_line":"persistent memory namespaces belong to this resource class."}],"source_content_type":"text/x-rst","patch_set":15,"id":"9fb8cfa7_34c419f5","line":174,"range":{"start_line":172,"start_character":0,"end_line":174,"end_character":50},"in_reply_to":"9fb8cfa7_3abf2dab","updated":"2019-06-25 09:02:37.000000000","message":"Removed","commit_id":"bd3f4febc4907deb7af0a3e8ccfbaab50f6d443d"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"46b13949564cafe1432a361dc95525e34bf4886d","unresolved":false,"context_lines":[{"line_number":43,"context_line":"Besides data persistence, persistent memory is less expensive than DRAM"},{"line_number":44,"context_line":"and comes with much larger capacities. This is an appealing feature for"},{"line_number":45,"context_line":"scenarios that request huge amounts of memory such as high performance"},{"line_number":46,"context_line":"computing (HPC)."},{"line_number":47,"context_line":""},{"line_number":48,"context_line":"There has been some exploration by applications which heavily use memory"},{"line_number":49,"context_line":"devices such as in memory databases. To name a few: redis_, rocksdb_,"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_f370a542","line":46,"updated":"2019-06-27 16:08:58.000000000","message":"+1 from me :)\n\nI believe your proposal here works well for this use case, so I am happy.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"3c124bdeb61354783a7e43120e392a6a400fb49d","unresolved":false,"context_lines":[{"line_number":151,"context_line":"is defined by a new configuration option \u0027CONF.libvirt.pmem_namespaces\u0027."},{"line_number":152,"context_line":"This config option is of string type in below format::"},{"line_number":153,"context_line":""},{"line_number":154,"context_line":"    \"$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]]\""},{"line_number":155,"context_line":""},{"line_number":156,"context_line":"``$NSNAME`` is the name of the persistent memory namespace that falls"},{"line_number":157,"context_line":"into the resource class named ``CUSTOM_PMEM_NAMESPACE_$LABEL``."}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_0f4ea06e","line":154,"range":{"start_line":154,"start_character":4,"end_line":154,"end_character":56},"updated":"2019-06-25 09:07:31.000000000","message":"I flapped the use of \"|\" and \",\" in this revision since \",\" is normally used as a separator for listopts.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"b61cf1e9527fe2518058b0d42547696a8b30491c","unresolved":false,"context_lines":[{"line_number":257,"context_line":"namespaces."},{"line_number":258,"context_line":""},{"line_number":259,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":260,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":261,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":262,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":263,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":264,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":265,"context_line":"Please note, under the second circumstance (implicitly constructing"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_08817ec3","line":262,"range":{"start_line":260,"start_character":43,"end_line":262,"end_character":40},"updated":"2019-06-27 21:38:12.000000000","message":"this is a pessimisation that i think makes this not suitable for production use in some case but may be fine for others.\n\ni regard this as an implementation detail that should not be relaied upon or considered part of the stable abi of this feature such that it can be change in a follow up spec.\n\ni would have preferred that to be stated in this spec explicitly so that we can directly point to it but i think that was then intent this statement was made with.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"a4dafb8068df22f4495946270879db66dd7f647d","unresolved":false,"context_lines":[{"line_number":257,"context_line":"namespaces."},{"line_number":258,"context_line":""},{"line_number":259,"context_line":"Libvirt domain specification requires each virtual persistent memory"},{"line_number":260,"context_line":"to be associated with one guest NUMA node. If guest NUMA topology"},{"line_number":261,"context_line":"is specified in the flavor, the guest virtual persistent memory"},{"line_number":262,"context_line":"devices are put under guest NUMA node 0. If guest NUMA topology is not"},{"line_number":263,"context_line":"specified in the flavor, a guest NUMA node 0 is constructed implicitly"},{"line_number":264,"context_line":"and all guest virutal persistent memory devices are put under it."},{"line_number":265,"context_line":"Please note, under the second circumstance (implicitly constructing"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_170d17e7","line":262,"range":{"start_line":260,"start_character":43,"end_line":262,"end_character":40},"in_reply_to":"9fb8cfa7_08817ec3","updated":"2019-06-28 06:30:56.000000000","message":"Yes, we could have added something like that \"this is the details of current implementation and it would change by follow up specs on numa affinity\"","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"b61cf1e9527fe2518058b0d42547696a8b30491c","unresolved":false,"context_lines":[{"line_number":265,"context_line":"Please note, under the second circumstance (implicitly constructing"},{"line_number":266,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":267,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":268,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":269,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":270,"context_line":"guest is still a non-NUMA guest."},{"line_number":271,"context_line":""},{"line_number":272,"context_line":"Examples::"},{"line_number":273,"context_line":""}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_68f59a56","line":270,"range":{"start_line":268,"start_character":19,"end_line":270,"end_character":32},"updated":"2019-06-27 21:38:12.000000000","message":"am i think this is going to be problematic.\n\ni will need to think about this some more but\ni think we are going to have issue with this\nwhen we try to implenet it and we will have to take accound to the numa topology of the vm at the schduler but we may chosse to disregard the numa affintiy of the pmem device.\n\ni would strongly prefer this to be addressed in a followup patch.\n\nwhat i suggested at the ptg was to explcitly create a numa node of 1 as we do for hugepage and pinnging which is check in the numa toplogy filter.\n\ni have never asserted it a good idea to only do this at the driver.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"a4dafb8068df22f4495946270879db66dd7f647d","unresolved":false,"context_lines":[{"line_number":265,"context_line":"Please note, under the second circumstance (implicitly constructing"},{"line_number":266,"context_line":"a guest NUMA node 0), the construction of guest NUMA node 0 happens"},{"line_number":267,"context_line":"at the libvirt driver while the guest libvirt domain specification"},{"line_number":268,"context_line":"is being built up. The NUMA topology logic in the scheduler is not"},{"line_number":269,"context_line":"applied. And from the perspective of any other parts of Nova, this"},{"line_number":270,"context_line":"guest is still a non-NUMA guest."},{"line_number":271,"context_line":""},{"line_number":272,"context_line":"Examples::"},{"line_number":273,"context_line":""}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_b7be8bab","line":270,"range":{"start_line":268,"start_character":19,"end_line":270,"end_character":32},"in_reply_to":"9fb8cfa7_68f59a56","updated":"2019-06-28 06:30:56.000000000","message":"\u003e am i think this is going to be problematic.\n \u003e \n \u003e i will need to think about this some more but\n \u003e i think we are going to have issue with this\n \u003e when we try to implenet it and we will have to take accound to the\n \u003e numa topology of the vm at the schduler but we may chosse to\n \u003e disregard the numa affintiy of the pmem device.\n \u003e \n\nIf the user specified guest numa topology, the guest will go through the numa topology filter for cpu and memory and perhaps other pci devices. There is no pmem info in the numa filter. \n\nIn this *specific* case which is the user does not specify guest numa nodes, the only thing that is numa sensitive is pmem. And yet, we do not have pmem info in the numa topology filter. So there is no point to go through the numa topology filter.\n\nI think you\u0027ve got all the above situations, it is just my expression in the spec causing confusions.\n\n \u003e i would strongly prefer this to be addressed in a followup patch.\n \u003e \n \u003e what i suggested at the ptg was to explcitly create a numa node of\n \u003e 1 as we do for hugepage and pinnging which is check in the numa\n \u003e toplogy filter.\n \u003e \n \u003e i have never asserted it a good idea to only do this at the driver.\n\nIf we have physical pmem numa info in either the numa filter or placement, it makes sense to create a numa node 0 and put the VM through the filter. However, we do not have it now, the filter will only work for VM cpu and memory which the user is never intended to. The vpmem device could still be not numa optimized as not going through the numa filter.\n\nThere are definitely incoming specs to address the numa mess. The pmem numa info will be in placement or numa filter depending on how we agree to address it and the progress of numa in placement. And for a user caring about performance, the user has to create guest numa topology and put vpmem in the desired guest numa node. You are more then welcome to review the specs!","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11604,"name":"sean mooney","email":"smooney@redhat.com","username":"sean-k-mooney"},"change_message_id":"b61cf1e9527fe2518058b0d42547696a8b30491c","unresolved":false,"context_lines":[{"line_number":289,"context_line":" No NUMA node, two 512GB virtual persistent memory:"},{"line_number":290,"context_line":"     hw:pmem \u003d 512GB,512GB"},{"line_number":291,"context_line":""},{"line_number":292,"context_line":"     A guest NUMA node 0 is constructed implicitly."},{"line_number":293,"context_line":"     Both virtual persistent memory devices are put under it."},{"line_number":294,"context_line":""},{"line_number":295,"context_line":".. important ::"},{"line_number":296,"context_line":"    Qemu does not support backing one virtual persistent memory device"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_6894ba37","line":293,"range":{"start_line":292,"start_character":4,"end_line":293,"end_character":61},"updated":"2019-06-27 21:38:12.000000000","message":"ok so we are creating an implicit numa topology of 1 numa node as we do for cpu pinning or hugepages.\n\nthats bad for the same reason as it bad for cpu pinning and hugepages but its consitent which im fine with since we dont need to teach operators and users its special vs the other memory parmatners like hugepages.\n\neffectivly this is a shorthand for 278 above\n\nhw:numa_nodes\u003d1\nhw:pmem\u003d512GB,512GB","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"46b13949564cafe1432a361dc95525e34bf4886d","unresolved":false,"context_lines":[{"line_number":318,"context_line":"VM rebuild"},{"line_number":319,"context_line":"----------"},{"line_number":320,"context_line":"The persisent memory namespaces are zeroed out during VM rebuild to"},{"line_number":321,"context_line":"get to the initial state of the VM."},{"line_number":322,"context_line":""},{"line_number":323,"context_line":"VM resize"},{"line_number":324,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_73eb5577","line":321,"updated":"2019-06-27 16:08:58.000000000","message":"Part of me now wonders what hard and soft reboot does, but I will ignore that for now.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":11347,"name":"Rui Zang","email":"rui.zang@yandex.com","username":"rzang"},"change_message_id":"a4dafb8068df22f4495946270879db66dd7f647d","unresolved":false,"context_lines":[{"line_number":318,"context_line":"VM rebuild"},{"line_number":319,"context_line":"----------"},{"line_number":320,"context_line":"The persisent memory namespaces are zeroed out during VM rebuild to"},{"line_number":321,"context_line":"get to the initial state of the VM."},{"line_number":322,"context_line":""},{"line_number":323,"context_line":"VM resize"},{"line_number":324,"context_line":"---------"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_57df0f0c","line":321,"in_reply_to":"9fb8cfa7_73eb5577","updated":"2019-06-28 06:30:56.000000000","message":"For hard and soft reboots, the resources the VM occupies does not change. So we follow the behavior of the baremetal servers. The data in the virtual pmem pertains.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":782,"name":"John Garbutt","email":"john@johngarbutt.com","username":"johngarbutt"},"change_message_id":"46b13949564cafe1432a361dc95525e34bf4886d","unresolved":false,"context_lines":[{"line_number":534,"context_line":"Third party CI is required for testing on real hardware."},{"line_number":535,"context_line":"Persistent memory nested virtualization works for QEMU/KVM."},{"line_number":536,"context_line":"For the third party CI, tempest tests are executed in a VM with"},{"line_number":537,"context_line":"virtual persisent memory backed by physical persistent memory."},{"line_number":538,"context_line":""},{"line_number":539,"context_line":"Documentation Impact"},{"line_number":540,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"}],"source_content_type":"text/x-rst","patch_set":16,"id":"9fb8cfa7_45e206d7","line":537,"updated":"2019-06-27 16:08:58.000000000","message":"Note quite sure how this idea works, but I remember us being happy with the plan at the PTG.","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"},{"author":{"_account_id":27478,"name":"Archit Modi","email":"amodi@redhat.com","username":"amodi"},"change_message_id":"3e74d3cca33a5bf74387f1026a7240accf6f31ca","unresolved":false,"context_lines":[{"line_number":540,"context_line":"\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d\u003d"},{"line_number":541,"context_line":""},{"line_number":542,"context_line":"The cloud administrator docs need to describe how to create"},{"line_number":543,"context_line":"and configure persistent memory namespaces. Add a persitent"},{"line_number":544,"context_line":"memory section into the Nova \"advanced configuration\" document."},{"line_number":545,"context_line":""},{"line_number":546,"context_line":"The end user needs to be make aware of this feature. Add the"}],"source_content_type":"text/x-rst","patch_set":16,"id":"1fa4df85_ac53906a","line":543,"range":{"start_line":543,"start_character":50,"end_line":543,"end_character":59},"updated":"2020-03-09 07:33:44.000000000","message":"nit: persistent\n\nthis can be resolved later in another patch","commit_id":"2e4cfbe8aa291bf7038bcd9341ba283bc09fe3cc"}]}
