)]}'
{"/COMMIT_MSG":[{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"0c530cf7008bd603a8ddc82cb4edff01c8b61b69","unresolved":false,"context_lines":[{"line_number":10,"context_line":"runc implementation."},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"Co-Authored-By: Jan Kundrát \u003cjan.kundrat@cesnet.cz\u003e"},{"line_number":13,"context_line":"Depends-On: https://review.openstack.org/535538"},{"line_number":14,"context_line":"Change-Id: I1b735bc20d193563d219cd79b935aeba0ad5441b"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":21,"id":"9fdfeff1_567e3de4","line":13,"range":{"start_line":13,"start_character":0,"end_line":13,"end_character":47},"updated":"2019-02-14 18:34:12.000000000","message":"I do not think that that change is still needed, but I\u0027m not removing that.","commit_id":"dbc85f23218b0bafbe9f6928556a6827323ecd2a"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"9c172d69e2b26670cd16eb2a412cbe0219af8d2a","unresolved":false,"context_lines":[{"line_number":10,"context_line":"runc implementation."},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"Co-Authored-By: Jan Kundrát \u003cjan.kundrat@cesnet.cz\u003e"},{"line_number":13,"context_line":"Depends-On: https://review.openstack.org/535538"},{"line_number":14,"context_line":"Change-Id: I1b735bc20d193563d219cd79b935aeba0ad5441b"}],"source_content_type":"text/x-gerrit-commit-message","patch_set":21,"id":"9fdfeff1_6cb04d44","line":13,"range":{"start_line":13,"start_character":0,"end_line":13,"end_character":47},"in_reply_to":"9fdfeff1_567e3de4","updated":"2019-02-15 01:05:12.000000000","message":"Indeed, that is not needed, unless you use a directory that is not /tmp","commit_id":"dbc85f23218b0bafbe9f6928556a6827323ecd2a"}],"doc/source/configuration.rst":[{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"aca3fd551c88be508173401caf19c7d2607723ba","unresolved":false,"context_lines":[{"line_number":1112,"context_line":""},{"line_number":1113,"context_line":"         The hostname of the instance."},{"line_number":1114,"context_line":""},{"line_number":1115,"context_line":"      .. attr:: host-key"},{"line_number":1116,"context_line":"         :type: str"},{"line_number":1117,"context_line":""},{"line_number":1118,"context_line":"         The ssh host key of the node."}],"source_content_type":"text/x-rst","patch_set":19,"id":"9fdfeff1_1bd1c8fb","line":1115,"updated":"2019-02-14 10:46:10.000000000","message":"I wanted to point out that the examples do not contain this key, so I checked and found out that it\u0027s actually not  mandatory. On the other hand, that example from a static node provider listed the SSH host pubkeys. Not a big deal anyway.","commit_id":"0c6f03b81db9de0a8a4134ae32d2ba52801f3176"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"6ea428ae5fd3600a271e4c6e8531208b6b0945e9","unresolved":false,"context_lines":[{"line_number":1112,"context_line":""},{"line_number":1113,"context_line":"         The hostname of the instance."},{"line_number":1114,"context_line":""},{"line_number":1115,"context_line":"      .. attr:: host-key"},{"line_number":1116,"context_line":"         :type: str"},{"line_number":1117,"context_line":""},{"line_number":1118,"context_line":"         The ssh host key of the node."}],"source_content_type":"text/x-rst","patch_set":19,"id":"9fdfeff1_f11f4058","line":1115,"in_reply_to":"9fdfeff1_1bd1c8fb","updated":"2019-02-14 15:02:28.000000000","message":"Done","commit_id":"0c6f03b81db9de0a8a4134ae32d2ba52801f3176"},{"author":{"_account_id":3099,"name":"David Shrewsbury","email":"dshrewsb@redhat.com","username":"dshrews"},"change_message_id":"316b5f4933a43ca5bdb72b77d2ec86f4977af288","unresolved":false,"context_lines":[{"line_number":1137,"context_line":"              labels:"},{"line_number":1138,"context_line":"                - name: fedora-runc"},{"line_number":1139,"context_line":""},{"line_number":1140,"context_line":"      .. attr:: zuul-console-dir"},{"line_number":1141,"context_line":"         :default: /tmp"},{"line_number":1142,"context_line":"         :type: str"},{"line_number":1143,"context_line":""},{"line_number":1144,"context_line":"         The directory named shared between the host and the containers for"},{"line_number":1145,"context_line":"         zuul console\u0027s logs. When using a directory other than /tmp,"},{"line_number":1146,"context_line":"         Zuul configuration \u0027log_stream_file\u0027 [executor] needs to match the"},{"line_number":1147,"context_line":"         new directory."},{"line_number":1148,"context_line":""},{"line_number":1149,"context_line":"   .. attr:: pools"},{"line_number":1150,"context_line":"      :type: list"}],"source_content_type":"text/x-rst","patch_set":28,"id":"dfbec78f_deb90515","line":1147,"range":{"start_line":1140,"start_character":1,"end_line":1147,"end_character":23},"updated":"2019-05-13 17:06:30.000000000","message":"Wrong indent, doesn\u0027t render properly in the produced documentation.","commit_id":"b001db31b3b66de093be31dcfa61742a74d9630b"},{"author":{"_account_id":3099,"name":"David Shrewsbury","email":"dshrewsb@redhat.com","username":"dshrews"},"change_message_id":"316b5f4933a43ca5bdb72b77d2ec86f4977af288","unresolved":false,"context_lines":[{"line_number":1196,"context_line":"         :default: /home/{username}"},{"line_number":1197,"context_line":""},{"line_number":1198,"context_line":"         The home directory of the zuul worker."},{"line_number":1199,"context_line":""},{"line_number":1200,"context_line":""},{"line_number":1201,"context_line":"The pool nodes need to be pre-configured:"},{"line_number":1202,"context_line":""}],"source_content_type":"text/x-rst","patch_set":28,"id":"dfbec78f_dee02529","line":1199,"updated":"2019-05-13 17:06:30.000000000","message":"Missing python-path.","commit_id":"b001db31b3b66de093be31dcfa61742a74d9630b"},{"author":{"_account_id":1,"name":"James E. Blair","email":"jim@acmegating.com","username":"corvus"},"change_message_id":"7d7644e1809780995d283c525ccc498ce3b4ae35","unresolved":false,"context_lines":[{"line_number":1198,"context_line":"         The home directory of the zuul worker."},{"line_number":1199,"context_line":""},{"line_number":1200,"context_line":""},{"line_number":1201,"context_line":"The pool nodes need to be pre-configured:"},{"line_number":1202,"context_line":""},{"line_number":1203,"context_line":"* Create a new user, for example: useradd -m zuul-worker"},{"line_number":1204,"context_line":"* Authorize nodepool to connect as root: copy the /var/lib/nodepool/.ssh/id_rsa.pub to"}],"source_content_type":"text/x-rst","patch_set":28,"id":"dfbec78f_0149792f","line":1201,"updated":"2019-05-13 22:47:33.000000000","message":"This preconfiguration suggests that this driver doesn\u0027t quite match the design of the system.  Nodepool is designed to interact with cloud resource providers, but here we are clearly *implementing* a cloud resource provider within nodepool.  There are other systems which can be used to run containers (k8s comes to mind).  Are we re-implementing those here?  Are there other lighter-weight options which don\u0027t require us to implement a container orchestration engine?","commit_id":"b001db31b3b66de093be31dcfa61742a74d9630b"}],"nodepool/driver/runc/handler.py":[{"author":{"_account_id":16068,"name":"Tobias Henkel","email":"tobias.henkel@bmw.de","username":"tobias.henkel"},"change_message_id":"0dd671ffb72fbb070c22debe434d855bb0d238ca","unresolved":false,"context_lines":[{"line_number":90,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":91,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":92,"context_line":"        if pool.max_servers is None or \\"},{"line_number":93,"context_line":"           len(pool.containers) + 1 \u003c\u003d pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_26d2fd5d","line":93,"updated":"2019-01-22 20:58:38.000000000","message":"nit: this can be simplified to:\n\n len(pool.containers) \u003c pool.max_servers","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[{"line_number":90,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":91,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":92,"context_line":"        if pool.max_servers is None or \\"},{"line_number":93,"context_line":"           len(pool.containers) + 1 \u003c\u003d pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_e4cb1f50","line":93,"in_reply_to":"9fdfeff1_26d2fd5d","updated":"2019-02-13 14:08:55.000000000","message":"Done","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"bd3ff2c9154ec8da309e4f6648ce1f926aa99d36","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_55e2a5a1","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"updated":"2019-02-15 12:33:22.000000000","message":"This looks OK to me who has not checked the rest of Zuul on how these methods are used, but they do not work for me. I am aiming for max. one container per each \"pool\", and this is the config that I am using:\n\n # This file is generated by Ansible\n #  DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN\n #\n ---\n images-dir: /opt/nodepool/images\n \n zookeeper-servers:\n   - host: localhost\n     port: 2181\n \n diskimages: []\n \n labels:\n   - name: f29\n     min-ready: 0\n     max-ready: 0\n \n providers:\n   - name: potemkin-runc\n     driver: runc\n     pools:\n       - name: ci-f29-potemkin05-vm.vesnicky.cesnet.cz\n         host-key: \u0027ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHuMVGaDJLxNlUBDQ05zM2/aEtvBoFL/quv1YK0oEkgJ\u0027\n         max-servers: 1\n         labels:\n           - name: f29\n             username: ci\n             path: /containers/f29\n       - name: ci-f29-potemkin06-vm.vesnicky.cesnet.cz\n         host-key: \u0027ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHH329IczhjR/WGSHTLtMENVP5rrKblyLZr3HWOkwgIY\u0027\n         max-servers: 1\n         labels:\n           - name: f29\n             username: ci\n             path: /containers/f29\n       - name: ci-f29-potemkin07-vm.vesnicky.cesnet.cz\n         host-key: \u0027ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEJTPjfpP0z3mKwz8iL8rfUwN+G10Ucs+XUHrXd9INWu\u0027\n         max-servers: 1\n         labels:\n           - name: f29\n             username: ci\n             path: /containers/f29\n       - name: ci-f29-potemkin08-vm.vesnicky.cesnet.cz\n         host-key: \u0027ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJL8dD6/MD4pDKxxqgM7o24PtBDCsCIx6fUvOLMb0Z+q\u0027\n         max-servers: 1\n         labels:\n           - name: f29\n             username: ci\n             path: /containers/f29\n\nWhen I submitted a patch which requires four jobs, three of them were launched at one node:\n\n $ ssh root@nodepool.gerrit.cesnet.cz nodepool list\n +------------+---------------+-------+-------------------------------+-------------+--------------------------------+--------+-------------+--------+\n | ID         | Provider      | Label | Server ID                     | Public IPv4 | IPv6                           | State  | Age         | Locked |\n +------------+---------------+-------+-------------------------------+-------------+--------------------------------+--------+-------------+--------+\n | 0000000056 | potemkin-runc | f29   | 0000000056-f29-200-0000000053 | None        | 2001:718:1:28:5054:ff:fece:501 | in-use | 00:00:00:16 | locked |\n | 0000000058 | potemkin-runc | f29   | 0000000058-f29-200-0000000055 | None        | 2001:718:1:28:5054:ff:fece:801 | in-use | 00:00:00:16 | locked |\n | 0000000057 | potemkin-runc | f29   | 0000000057-f29-200-0000000054 | None        | 2001:718:1:28:5054:ff:fece:501 | in-use | 00:00:00:16 | locked |\n | 0000000059 | potemkin-runc | f29   | 0000000059-f29-200-0000000056 | None        | 2001:718:1:28:5054:ff:fece:501 | in-use | 00:00:00:16 | locked |\n +------------+---------------+-------+-------------------------------+-------------+--------------------------------+--------+-------------+--------+\n\n(The last part of the IPv6 address identifies the runc hypervisor in this case.)\n\nWhat am I doing wrong?","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"1fc168636d4bdb5c3877f86c4ddcf14f0dd4fb90","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_ad5ed873","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"in_reply_to":"9fdfeff1_4322afbc","updated":"2019-02-20 07:58:27.000000000","message":"That fix looks correct to me, thanks!","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"e4df2fb8efea0e6d3526edd189b4e52da1855c40","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_bc0acc86","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"in_reply_to":"9fdfeff1_55e2a5a1","updated":"2019-02-16 00:02:49.000000000","message":"There is an issue indeed. The max-servers configuration is actually provider scope (not pool scope), but the code is using the default pool\u0027s max-server which is inf. The fix would be to make the max-server\u0027s pool scope in the runc/config.py.\n\nAlso, not sure why you got nodes with \"max-ready: 0\".","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"dfd6bb159d2d149071af57cc7b85f878762e7f62","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_4322afbc","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"in_reply_to":"9fdfeff1_7b599d33","updated":"2019-02-19 16:16:31.000000000","message":"Done\n\nThere were two bugs, actually: first of them storing the list of running containers in a config (which gets rewritten from a background thread), the second one a TOCTOU between checking the quotas en-masse for several nodes and attempting to launch them.\n\nI\u0027ve changed this to rely on information stored in zookeeper, similarly to how the OpenStack driver is doing this.","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"e8a57fba290aaa4b77f6186822ad904707b5fee8","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_d5fada41","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"in_reply_to":"9fdfeff1_bc0acc86","updated":"2019-02-19 11:41:04.000000000","message":"I double-checked the config propagation, and it appears to work. When I add extra debugging output to RuncNodeRequestHandler.hasRemainingQuota, the pool.max_servers is correctly set to 1, but pool.containers remains empty.\n\nI checked this with a change that requires 9 jobs and a pool config containing 4 pools (runc hypervisors), each with max-servers: 1.\n\nI also tried queueing two changes at once, each again resulting in 9 jobs. This resulted in 18 containers on 4 hypervisors.\n\nThis looks like a time-of-check, time-of-use issue to me. Stuff gets added into pool.containers only after they are really started via RuncProvider.createContainer(). Perhaps there should be some locking around that? How do other providers handle this?","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"0735196ae0fa69d8c34d2a1c10fdfefe9177934c","unresolved":false,"context_lines":[{"line_number":88,"context_line":"        \u0027\u0027\u0027"},{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":93,"context_line":"        if pool.max_servers is None or len(pool.containers) \u003c pool.max_servers:"},{"line_number":94,"context_line":"            return True"},{"line_number":95,"context_line":"        return False"},{"line_number":96,"context_line":""},{"line_number":97,"context_line":"    def hasProviderQuota(self, ntypes):"},{"line_number":98,"context_line":"        pool \u003d self.provider.pools[self.pool.name]"},{"line_number":99,"context_line":"        if pool.max_servers is None or \\"},{"line_number":100,"context_line":"           len(pool.containers) + len(ntypes) \u003c\u003d pool.max_servers:"},{"line_number":101,"context_line":"            return True"},{"line_number":102,"context_line":"        return False"},{"line_number":103,"context_line":""},{"line_number":104,"context_line":"    def launchesComplete(self):"},{"line_number":105,"context_line":"        \u0027\u0027\u0027"}],"source_content_type":"text/x-python","patch_set":22,"id":"9fdfeff1_7b599d33","line":102,"range":{"start_line":91,"start_character":0,"end_line":102,"end_character":20},"in_reply_to":"9fdfeff1_d5fada41","updated":"2019-02-19 12:57:44.000000000","message":"...and this is because `self.provider` is a RuncProviderConfig, and `pool` is an instance of RuncPool which is again just a config class. The container instances are managed elsewhere, within a RuncProvider instance. I\u0027ll try to see if I can find out how to access that one.","commit_id":"0d67bc8ea0d6fc99ba1c57f32b165eaf5a204232"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"1fc168636d4bdb5c3877f86c4ddcf14f0dd4fb90","unresolved":false,"context_lines":[{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        max_servers \u003d self.provider.pools[self.pool.name].max_servers"},{"line_number":93,"context_line":"        # This is supposedly a light-weight container job which does not"},{"line_number":94,"context_line":"        # consume any RAM and CPU resources and little disk space when already"},{"line_number":95,"context_line":"        # being deleted, so let\u0027s parallelize a bit."}],"source_content_type":"text/x-python","patch_set":24,"id":"9fdfeff1_cdc47c11","line":92,"updated":"2019-02-20 07:58:27.000000000","message":"We could return True directly if None to avoid the zk iterator.","commit_id":"fd7b14f382052cf443f697f247d5f30e6238c554"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"4dcf4d56478c62cadd0213934231282552a47143","unresolved":false,"context_lines":[{"line_number":89,"context_line":"        return True"},{"line_number":90,"context_line":""},{"line_number":91,"context_line":"    def hasRemainingQuota(self, ntype):"},{"line_number":92,"context_line":"        max_servers \u003d self.provider.pools[self.pool.name].max_servers"},{"line_number":93,"context_line":"        # This is supposedly a light-weight container job which does not"},{"line_number":94,"context_line":"        # consume any RAM and CPU resources and little disk space when already"},{"line_number":95,"context_line":"        # being deleted, so let\u0027s parallelize a bit."}],"source_content_type":"text/x-python","patch_set":24,"id":"9fdfeff1_c811ca06","line":92,"in_reply_to":"9fdfeff1_cdc47c11","updated":"2019-02-20 08:28:50.000000000","message":"Done","commit_id":"fd7b14f382052cf443f697f247d5f30e6238c554"}],"nodepool/driver/runc/playbooks/clean.yml":[{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"47d4eacbc96b9750efe1eb9cea0472af87a80426","unresolved":false,"context_lines":[{"line_number":7,"context_line":"      register: running"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"    - name: \"List state data\""},{"line_number":10,"context_line":"      command: find /var/lib/nodepool/runc/ -maxdepth 1 -mindepth 1 -mtime +1"},{"line_number":11,"context_line":"      register: state"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"    - name: \"Remove leaked state data directory\""}],"source_content_type":"text/x-yaml","patch_set":17,"id":"9fdfeff1_8f7911fd","line":10,"updated":"2019-02-13 10:40:43.000000000","message":"This causes a failure on an out-of-the-box setup when the \"hypervisor\" is a Fedora 29 image, and Nodepool is configured to launch images from another location that the hypervisor\u0027s rootfs (that\u0027s what I use, I have not checked if it works when launched from rootfs).\n\nEither the docs should be expanded to `mkdir /var/lib/nodepool/runc/` on each of the container hypervisors, or the playbooks should be made to auto-create these when needed.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[{"line_number":7,"context_line":"      register: running"},{"line_number":8,"context_line":""},{"line_number":9,"context_line":"    - name: \"List state data\""},{"line_number":10,"context_line":"      command: find /var/lib/nodepool/runc/ -maxdepth 1 -mindepth 1 -mtime +1"},{"line_number":11,"context_line":"      register: state"},{"line_number":12,"context_line":""},{"line_number":13,"context_line":"    - name: \"Remove leaked state data directory\""}],"source_content_type":"text/x-yaml","patch_set":17,"id":"9fdfeff1_64b88fc7","line":10,"in_reply_to":"9fdfeff1_8f7911fd","updated":"2019-02-13 14:08:55.000000000","message":"Good catch, this playbook needs to ensure the directory exists first.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"}],"nodepool/driver/runc/playbooks/init.yml":[{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"0c530cf7008bd603a8ddc82cb4edff01c8b61b69","unresolved":false,"context_lines":[{"line_number":66,"context_line":""},{"line_number":67,"context_line":"    - name: \"Spawn zuul_console daemon\""},{"line_number":68,"context_line":"      zuul_console:"},{"line_number":69,"context_line":"        path: \"{{ zuul_console_dir }}/{log_uuid}.log\""},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"    - name: Gather network facts"},{"line_number":72,"context_line":"      setup:"}],"source_content_type":"text/x-yaml","patch_set":21,"id":"9fdfeff1_b6576958","line":69,"updated":"2019-02-14 18:34:12.000000000","message":"There was one more discrepancy here, as seen by strace:\n\n [pid 12943] openat(AT_FDCWD, \"/tmp/025056e2-0054-d09a-1846-000000000008-f29.log\", O_RDONLY) \u003d -1 ENOENT (No such file or directory)\n [pid 12943] sendto(6, \"[Zuul] Log not found\\n\", 21, 0, NULL, 0) \u003d 21\n\nChanged in next PS.","commit_id":"dbc85f23218b0bafbe9f6928556a6827323ecd2a"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"9c172d69e2b26670cd16eb2a412cbe0219af8d2a","unresolved":false,"context_lines":[{"line_number":66,"context_line":""},{"line_number":67,"context_line":"    - name: \"Spawn zuul_console daemon\""},{"line_number":68,"context_line":"      zuul_console:"},{"line_number":69,"context_line":"        path: \"{{ zuul_console_dir }}/{log_uuid}.log\""},{"line_number":70,"context_line":""},{"line_number":71,"context_line":"    - name: Gather network facts"},{"line_number":72,"context_line":"      setup:"}],"source_content_type":"text/x-yaml","patch_set":21,"id":"9fdfeff1_8cab31d4","line":69,"in_reply_to":"9fdfeff1_b6576958","updated":"2019-02-15 01:05:12.000000000","message":"good catch.","commit_id":"dbc85f23218b0bafbe9f6928556a6827323ecd2a"}],"nodepool/driver/runc/playbooks/library/zuul_console.py":[{"author":{"_account_id":16068,"name":"Tobias Henkel","email":"tobias.henkel@bmw.de","username":"tobias.henkel"},"change_message_id":"0dd671ffb72fbb070c22debe434d855bb0d238ca","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":17,"id":"9fdfeff1_66e6c547","updated":"2019-01-22 20:58:38.000000000","message":"I feel very bad about forking this script from zuul here. It\u0027s almost guarenteed that if we change something in the zuul variant we forget it here. However I have no better idea atm.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":1,"name":"James E. Blair","email":"jim@acmegating.com","username":"corvus"},"change_message_id":"7d7644e1809780995d283c525ccc498ce3b4ae35","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":28,"id":"dfbec78f_a1518ddb","updated":"2019-05-13 22:47:33.000000000","message":"I am very uncomfortable with Nodepool sharing ownership of zuul_console.  It is not at all something that Nodepool should be concerned with.  There shouldn\u0027t be any zuul-specific code in Nodepool.","commit_id":"b001db31b3b66de093be31dcfa61742a74d9630b"}],"nodepool/driver/runc/provider.py":[{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"47d4eacbc96b9750efe1eb9cea0472af87a80426","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":17,"id":"9fdfeff1_af941538","updated":"2019-02-13 10:40:43.000000000","message":"This provider now uses Ansible, but nothing ensures that the `ansible-playbook` command is available. I fixed that locally by simply adding the same version of Ansible as Zuul is using to requirements.txt.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[],"source_content_type":"","patch_set":17,"id":"9fdfeff1_84e253d4","in_reply_to":"9fdfeff1_af941538","updated":"2019-02-13 14:08:55.000000000","message":"indeed, next PS should fix that.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"47d4eacbc96b9750efe1eb9cea0472af87a80426","unresolved":false,"context_lines":[{"line_number":40,"context_line":"            else:"},{"line_number":41,"context_line":"                hostname \u003d pool.name"},{"line_number":42,"context_line":"            pool.use_rootfs \u003d False"},{"line_number":43,"context_line":"            pool.hostname \u003d socket.gethostbyname(hostname)"},{"line_number":44,"context_line":"            pool.inventory \u003d os.path.join("},{"line_number":45,"context_line":"                self.data_path, \"%s.inventory\" % pool.name)"},{"line_number":46,"context_line":"            pool.info_path \u003d os.path.join("}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_8fb831c8","line":43,"updated":"2019-02-13 10:40:43.000000000","message":"This breaks on IPv6-only container hosts (which is exactly the setup that I am trying to run). In my testing, it seemed to be enough to just store it unmodified:\n\n  pool.hostname \u003d hostname\n\nMy config file contained raw IPv6 address literals. The containers were launched, *but* my inventory looked like this:\n\n all:\n  hosts:\n    f29:\n      ansible_connection: ssh\n      ansible_host: 2001:718:1:28:5054:ff:fece:501\n      ansible_port: 51278\n      ansible_user: ci\n      nodepool:\n        az: null\n        cloud: null\n        host_id: null\n        interface_ip: 2001:718:1:28:5054:ff:fece:501\n        label: f29\n        private_ipv4: null\n        provider: potemkin-runc\n        public_ipv4: 2001:718:1:28:5054:ff:fece:501\n        public_ipv6: null\n        region: null\n\nNotice the public_ipv4 vs. public_ipv6 confusion. Am I doing something wrong here?","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"845b8db315bd9ec68377308631089b7e6bc690c9","unresolved":false,"context_lines":[{"line_number":40,"context_line":"            else:"},{"line_number":41,"context_line":"                hostname \u003d pool.name"},{"line_number":42,"context_line":"            pool.use_rootfs \u003d False"},{"line_number":43,"context_line":"            pool.hostname \u003d socket.gethostbyname(hostname)"},{"line_number":44,"context_line":"            pool.inventory \u003d os.path.join("},{"line_number":45,"context_line":"                self.data_path, \"%s.inventory\" % pool.name)"},{"line_number":46,"context_line":"            pool.info_path \u003d os.path.join("}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_adb733f5","line":43,"in_reply_to":"9fdfeff1_840b338c","updated":"2019-02-13 18:25:29.000000000","message":"I fixed this in https://review.openstack.org/#/c/636703","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[{"line_number":40,"context_line":"            else:"},{"line_number":41,"context_line":"                hostname \u003d pool.name"},{"line_number":42,"context_line":"            pool.use_rootfs \u003d False"},{"line_number":43,"context_line":"            pool.hostname \u003d socket.gethostbyname(hostname)"},{"line_number":44,"context_line":"            pool.inventory \u003d os.path.join("},{"line_number":45,"context_line":"                self.data_path, \"%s.inventory\" % pool.name)"},{"line_number":46,"context_line":"            pool.info_path \u003d os.path.join("}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_840b338c","line":43,"in_reply_to":"9fdfeff1_8fb831c8","updated":"2019-02-13 14:08:55.000000000","message":"The inventory looks correct, it\u0027s the nodepool metadata that needs to be adjusted, e.g. L59 of the launcher.py module.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"47d4eacbc96b9750efe1eb9cea0472af87a80426","unresolved":false,"context_lines":[{"line_number":52,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\""},{"line_number":53,"context_line":"                    \"[localhost]\\nlocalhost\""},{"line_number":54,"context_line":"                    \" ansible_connection\u003dlocal\""},{"line_number":55,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\")"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"            if [True for label in pool.labels.values()"},{"line_number":58,"context_line":"                if label.path \u003d\u003d \u0027/\u0027]:"}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_8f8a7118","line":55,"range":{"start_line":55,"start_character":49,"end_line":55,"end_character":64},"updated":"2019-02-13 10:40:43.000000000","message":"With `ansible` added to requirements.txt (see my other comment), this should no longer be hardcoded. On OSes with funyn setup (such as centos with Python from the software collections), the Python interpreter which Ansible was set up to work with is no longer the systemwide Python. I changed this to use sys.executable() here (and `import sys` earlier).","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":13413,"name":"Jan Kundrát","email":"jkt@kde.org","username":"jkt"},"change_message_id":"845b8db315bd9ec68377308631089b7e6bc690c9","unresolved":false,"context_lines":[{"line_number":52,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\""},{"line_number":53,"context_line":"                    \"[localhost]\\nlocalhost\""},{"line_number":54,"context_line":"                    \" ansible_connection\u003dlocal\""},{"line_number":55,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\")"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"            if [True for label in pool.labels.values()"},{"line_number":58,"context_line":"                if label.path \u003d\u003d \u0027/\u0027]:"}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_ed8f7b27","line":55,"range":{"start_line":55,"start_character":49,"end_line":55,"end_character":64},"in_reply_to":"9fdfeff1_4401cb69","updated":"2019-02-13 18:25:29.000000000","message":"Actually I am referting to the interpreter at the nodepool machine. See https://review.openstack.org/#/c/636702/1 .","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[{"line_number":52,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\""},{"line_number":53,"context_line":"                    \"[localhost]\\nlocalhost\""},{"line_number":54,"context_line":"                    \" ansible_connection\u003dlocal\""},{"line_number":55,"context_line":"                    \" ansible_python_interpreter\u003d/usr/bin/python\\n\")"},{"line_number":56,"context_line":""},{"line_number":57,"context_line":"            if [True for label in pool.labels.values()"},{"line_number":58,"context_line":"                if label.path \u003d\u003d \u0027/\u0027]:"}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_4401cb69","line":55,"range":{"start_line":55,"start_character":49,"end_line":55,"end_character":64},"in_reply_to":"9fdfeff1_8f8a7118","updated":"2019-02-13 14:08:55.000000000","message":"yeah, I wished ansible would be smart enough to figure out remote host python. I guess this could be part of the pool configuration.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":16068,"name":"Tobias Henkel","email":"tobias.henkel@bmw.de","username":"tobias.henkel"},"change_message_id":"0dd671ffb72fbb070c22debe434d855bb0d238ca","unresolved":false,"context_lines":[{"line_number":74,"context_line":""},{"line_number":75,"context_line":"    def run_ansible(self, playbook, pool, extra_vars\u003d[]):"},{"line_number":76,"context_line":"        # TODO: replace this by a host-key provider setting"},{"line_number":77,"context_line":"        os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] \u003d \"False\""},{"line_number":78,"context_line":"        os.environ[\"ANSIBLE_CONFIG\"] \u003d self.ansible_cfg"},{"line_number":79,"context_line":"        argv \u003d [\"ansible-playbook\","},{"line_number":80,"context_line":"                \"%s/%s.yml\" % (self.playbook_path, playbook),"}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_86e3a933","line":77,"updated":"2019-01-22 20:58:38.000000000","message":"Should we fix this todo before landing?","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"},{"author":{"_account_id":9311,"name":"Tristan Cacqueray","email":"tdecacqu@redhat.com","username":"tristanC"},"change_message_id":"95d5c8025bacc85aa48377b2dc26ec856f40950a","unresolved":false,"context_lines":[{"line_number":74,"context_line":""},{"line_number":75,"context_line":"    def run_ansible(self, playbook, pool, extra_vars\u003d[]):"},{"line_number":76,"context_line":"        # TODO: replace this by a host-key provider setting"},{"line_number":77,"context_line":"        os.environ[\"ANSIBLE_HOST_KEY_CHECKING\"] \u003d \"False\""},{"line_number":78,"context_line":"        os.environ[\"ANSIBLE_CONFIG\"] \u003d self.ansible_cfg"},{"line_number":79,"context_line":"        argv \u003d [\"ansible-playbook\","},{"line_number":80,"context_line":"                \"%s/%s.yml\" % (self.playbook_path, playbook),"}],"source_content_type":"text/x-python","patch_set":17,"id":"9fdfeff1_e4f9df7f","line":77,"in_reply_to":"9fdfeff1_86e3a933","updated":"2019-02-13 14:08:55.000000000","message":"indeed.","commit_id":"8fba059a8ee1868b711449fa65bf0bfadbce3e10"}]}
