[PATCH 0/5I -V4] Add multiple disks in Template, add max vcpu and topology support

V4 - Include patch with support to max vcpu and other changes in vm backend to support cpu hot plug in the future. - Fixes vm update problem (offline update was not atomic) V3 - Addresses Aline's comments * fixes backend, API.md and API.json * fixes test cases and add new test V2 - Rebase over latest new UI patches V1 - Initial version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit *** BLURB HERE *** Rodrigo Trujillo (5): Fix Template backend create/update for multiple disks Enable create guests with multiple disks from different pools UI - Implement multiple disks support in Template edit window Test: Fix and add test related to disks in templates Update VCPU by using libvirt function src/wok/plugins/kimchi/API.json | 19 +- src/wok/plugins/kimchi/docs/API.md | 6 + src/wok/plugins/kimchi/i18n.py | 4 + src/wok/plugins/kimchi/mockmodel.py | 57 +++-- src/wok/plugins/kimchi/model/templates.py | 30 ++- src/wok/plugins/kimchi/model/vms.py | 130 +++++++++-- src/wok/plugins/kimchi/osinfo.py | 1 + src/wok/plugins/kimchi/tests/test_model.py | 13 ++ src/wok/plugins/kimchi/tests/test_rest.py | 15 +- src/wok/plugins/kimchi/tests/test_template.py | 44 +++- .../kimchi/ui/js/src/kimchi.template_edit_main.js | 246 ++++++++++----------- .../kimchi/ui/pages/template-edit.html.tmpl | 5 +- src/wok/plugins/kimchi/vmtemplate.py | 81 +++++-- 13 files changed, 425 insertions(+), 226 deletions(-) -- 2.1.0

This patch fixes the backend code in order to allow create templates with multiple disks and update them correctly. Backend supports adding multiple disks with a json like: "disks": [{"size": 3, "format": "qcow"}, {"size": 5, "pool":"/plugins/kimchi/storagepools/poolX"}, {"size": 7, "format": "raw", "pool": "/plugins/kimchi/storagepools/poolY"} ] If any information is missing, Kimchi will use values from template.conf. Kimchi will expose template disk data like: "disks": [{"index" : ... , "size" : ... , "format" : ... , "pool" : { "name" : ... , "type" : ... } , }, ... ] Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/API.json | 19 ++++++++++++++++++- src/wok/plugins/kimchi/docs/API.md | 6 ++++++ src/wok/plugins/kimchi/model/templates.py | 9 +++++---- src/wok/plugins/kimchi/osinfo.py | 1 + src/wok/plugins/kimchi/vmtemplate.py | 26 +++++++++++++++++++++----- 5 files changed, 51 insertions(+), 10 deletions(-) diff --git a/src/wok/plugins/kimchi/API.json b/src/wok/plugins/kimchi/API.json index 7addfc7..6c6a5f7 100644 --- a/src/wok/plugins/kimchi/API.json +++ b/src/wok/plugins/kimchi/API.json @@ -477,6 +477,12 @@ "type": "integer", "minimum": 0 }, + "format": { + "description": "Type of the image of the disk", + "type": "string", + "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", + "error": "KCHTMPL0027E" + }, "size": { "description": "Size (GB) of the disk", "type": "number", @@ -488,8 +494,13 @@ "type": "string", "pattern": "^/.+$", "error": "KCHTMPL0023E" + }, + "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E" } - } }, "minItems": 1, @@ -660,6 +671,12 @@ "type": "string", "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", "error": "KCHTMPL0027E" + }, + "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E" } } }, diff --git a/src/wok/plugins/kimchi/docs/API.md b/src/wok/plugins/kimchi/docs/API.md index 1e7d8ca..a4dd084 100644 --- a/src/wok/plugins/kimchi/docs/API.md +++ b/src/wok/plugins/kimchi/docs/API.md @@ -289,6 +289,8 @@ Represents a snapshot of the Virtual Machine's primary monitor. (either *size* or *volume* must be specified): * index: The device index * size: The device size in GB + * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc + * pool: URI of the storagepool where disk will be created * base: Base image of this disk * graphics *(optional)*: The graphics paramenters of this template @@ -388,6 +390,9 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc. + * pool: Information about the pool where disk or volume will be created + * name: URI of the storagepool + * type: Type of the storagepool (dir, nfs, scsci, iscsi, etc) * graphics: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network @@ -422,6 +427,7 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc. + * pool: URI of the storagepool where template allocates vm disk. * graphics *(optional)*: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index 47b2c9e..bac6e47 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -234,8 +234,9 @@ class LibvirtVMTemplate(VMTemplate): self.conn = conn VMTemplate.__init__(self, args, scan) - def _storage_validate(self): - pool_uri = self.info['storagepool'] + def _storage_validate(self, pool_uri=None): + if pool_uri is None: + pool_uri = self.info['storagepool'] pool_name = pool_name_from_uri(pool_uri) try: conn = self.conn.get() @@ -278,8 +279,8 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0] - def _get_storage_type(self): - pool = self._storage_validate() + def _get_storage_type(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/@type")[0] diff --git a/src/wok/plugins/kimchi/osinfo.py b/src/wok/plugins/kimchi/osinfo.py index 30ecd4f..5b0b5bf 100644 --- a/src/wok/plugins/kimchi/osinfo.py +++ b/src/wok/plugins/kimchi/osinfo.py @@ -158,6 +158,7 @@ def _get_tmpl_defaults(): for disk in storage_section.keys(): data = storage_section[disk] data['index'] = int(disk.split('.')[1]) + data['pool'] = defaults['storagepool'] defaults['disks'].append(data) # Parse processor section to get cpus and cpu_topology values diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 3097b66..c7635b0 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -76,14 +76,30 @@ class VMTemplate(object): graphics.update(graph_args) args['graphics'] = graphics - # Merge disks dict + # Provide compatibility with old template version and sets data to + # correct output format if necessary + def _fix_disk_compatibilities(disks): + for i, disk in enumerate(disks): + if 'pool' not in disk: + disk['pool'] = {'name': self.info['storagepool']} + if (isinstance(disk['pool'], str) or + isinstance(disk['pool'], unicode)): + disk['pool'] = {'name': disk['pool']} + disk['pool']['type'] = \ + self._get_storage_type(disk['pool']['name']) + + if self.info.get('disks') is not None: + _fix_disk_compatibilities(self.info['disks']) + if args.get('disks') is not None: + _fix_disk_compatibilities(args['disks']) + + # Merge disks dict with disks provided by user default_disk = self.info['disks'][0] for i, d in enumerate(args.get('disks', [])): disk = dict(default_disk) + disk['index'] = i disk.update(d) - - # Assign right disk format to logical and [i]scsi storagepools - if self._get_storage_type() in ['logical', 'iscsi', 'scsi']: + if disk['pool']['type'] in ['logical', 'iscsi', 'scsi']: disk['format'] = 'raw' args['disks'][i] = disk @@ -401,7 +417,7 @@ class VMTemplate(object): def _get_storage_path(self): return '' - def _get_storage_type(self): + def _get_storage_type(self, pool=None): return '' def _get_volume_path(self): -- 2.1.0

On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes the backend code in order to allow create templates with multiple disks and update them correctly. Backend supports adding multiple disks with a json like:
"disks": [{"size": 3, "format": "qcow"}, {"size": 5, "pool":"/plugins/kimchi/storagepools/poolX"}, {"size": 7, "format": "raw", "pool": "/plugins/kimchi/storagepools/poolY"} ]
If any information is missing, Kimchi will use values from template.conf.
Kimchi will expose template disk data like: "disks": [{"index" : ... , "size" : ... , "format" : ... , "pool" : { "name" : ... , "type" : ... } , }, ... ]
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/API.json | 19 ++++++++++++++++++- src/wok/plugins/kimchi/docs/API.md | 6 ++++++ src/wok/plugins/kimchi/model/templates.py | 9 +++++---- src/wok/plugins/kimchi/osinfo.py | 1 + src/wok/plugins/kimchi/vmtemplate.py | 26 +++++++++++++++++++++----- 5 files changed, 51 insertions(+), 10 deletions(-)
diff --git a/src/wok/plugins/kimchi/API.json b/src/wok/plugins/kimchi/API.json index 7addfc7..6c6a5f7 100644 --- a/src/wok/plugins/kimchi/API.json +++ b/src/wok/plugins/kimchi/API.json @@ -477,6 +477,12 @@ "type": "integer", "minimum": 0 }, + "format": { + "description": "Type of the image of the disk", + "type": "string", + "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", + "error": "KCHTMPL0027E" + }, "size": { "description": "Size (GB) of the disk", "type": "number", @@ -488,8 +494,13 @@ "type": "string", "pattern": "^/.+$", "error": "KCHTMPL0023E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
} - } }, "minItems": 1, @@ -660,6 +671,12 @@ "type": "string", "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", "error": "KCHTMPL0027E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
} } }, diff --git a/src/wok/plugins/kimchi/docs/API.md b/src/wok/plugins/kimchi/docs/API.md index 1e7d8ca..a4dd084 100644 --- a/src/wok/plugins/kimchi/docs/API.md +++ b/src/wok/plugins/kimchi/docs/API.md @@ -289,6 +289,8 @@ Represents a snapshot of the Virtual Machine's primary monitor. (either *size* or *volume* must be specified): * index: The device index * size: The device size in GB + * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc + * pool: URI of the storagepool where disk will be created
'pool' will be an object with 2 elements: 'name' and 'type'.
* base: Base image of this disk
* graphics *(optional)*: The graphics paramenters of this template @@ -388,6 +390,9 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc. + * pool: Information about the pool where disk or volume will be created + * name: URI of the storagepool + * type: Type of the storagepool (dir, nfs, scsci, iscsi, etc) * graphics: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network @@ -422,6 +427,7 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc.
+ * pool: URI of the storagepool where template allocates vm disk.
'pool' will be an object with 2 elements: 'name' and 'type'.
* graphics *(optional)*: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index 47b2c9e..bac6e47 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -234,8 +234,9 @@ class LibvirtVMTemplate(VMTemplate): self.conn = conn VMTemplate.__init__(self, args, scan)
- def _storage_validate(self): - pool_uri = self.info['storagepool'] + def _storage_validate(self, pool_uri=None): + if pool_uri is None: + pool_uri = self.info['storagepool'] pool_name = pool_name_from_uri(pool_uri) try: conn = self.conn.get() @@ -278,8 +279,8 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
- def _get_storage_type(self): - pool = self._storage_validate() + def _get_storage_type(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/@type")[0]
diff --git a/src/wok/plugins/kimchi/osinfo.py b/src/wok/plugins/kimchi/osinfo.py index 30ecd4f..5b0b5bf 100644 --- a/src/wok/plugins/kimchi/osinfo.py +++ b/src/wok/plugins/kimchi/osinfo.py @@ -158,6 +158,7 @@ def _get_tmpl_defaults(): for disk in storage_section.keys(): data = storage_section[disk] data['index'] = int(disk.split('.')[1]) + data['pool'] = defaults['storagepool'] defaults['disks'].append(data)
# Parse processor section to get cpus and cpu_topology values diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 3097b66..c7635b0 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -76,14 +76,30 @@ class VMTemplate(object): graphics.update(graph_args) args['graphics'] = graphics
- # Merge disks dict + # Provide compatibility with old template version and sets data to + # correct output format if necessary + def _fix_disk_compatibilities(disks): + for i, disk in enumerate(disks): + if 'pool' not in disk: + disk['pool'] = {'name': self.info['storagepool']} + if (isinstance(disk['pool'], str) or + isinstance(disk['pool'], unicode)): + disk['pool'] = {'name': disk['pool']} + disk['pool']['type'] = \ + self._get_storage_type(disk['pool']['name']) +
+ if self.info.get('disks') is not None: + _fix_disk_compatibilities(self.info['disks']) + if args.get('disks') is not None: + _fix_disk_compatibilities(args['disks']) +
Minor comment: you can merge those 2 if's
+ # Merge disks dict with disks provided by user default_disk = self.info['disks'][0] for i, d in enumerate(args.get('disks', [])): disk = dict(default_disk) + disk['index'] = i disk.update(d) - - # Assign right disk format to logical and [i]scsi storagepools - if self._get_storage_type() in ['logical', 'iscsi', 'scsi']: + if disk['pool']['type'] in ['logical', 'iscsi', 'scsi']: disk['format'] = 'raw' args['disks'][i] = disk
@@ -401,7 +417,7 @@ class VMTemplate(object): def _get_storage_path(self): return ''
- def _get_storage_type(self): + def _get_storage_type(self, pool=None): return ''
def _get_volume_path(self):

See comments below. Rodrigo Trujillo On 11/30/2015 09:02 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes the backend code in order to allow create templates with multiple disks and update them correctly. Backend supports adding multiple disks with a json like:
"disks": [{"size": 3, "format": "qcow"}, {"size": 5, "pool":"/plugins/kimchi/storagepools/poolX"}, {"size": 7, "format": "raw", "pool": "/plugins/kimchi/storagepools/poolY"} ]
If any information is missing, Kimchi will use values from template.conf.
Kimchi will expose template disk data like: "disks": [{"index" : ... , "size" : ... , "format" : ... , "pool" : { "name" : ... , "type" : ... } , }, ... ]
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/API.json | 19 ++++++++++++++++++- src/wok/plugins/kimchi/docs/API.md | 6 ++++++ src/wok/plugins/kimchi/model/templates.py | 9 +++++---- src/wok/plugins/kimchi/osinfo.py | 1 + src/wok/plugins/kimchi/vmtemplate.py | 26 +++++++++++++++++++++----- 5 files changed, 51 insertions(+), 10 deletions(-)
diff --git a/src/wok/plugins/kimchi/API.json b/src/wok/plugins/kimchi/API.json index 7addfc7..6c6a5f7 100644 --- a/src/wok/plugins/kimchi/API.json +++ b/src/wok/plugins/kimchi/API.json @@ -477,6 +477,12 @@ "type": "integer", "minimum": 0 }, + "format": { + "description": "Type of the image of the disk", + "type": "string", + "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", + "error": "KCHTMPL0027E" + }, "size": { "description": "Size (GB) of the disk", "type": "number", @@ -488,8 +494,13 @@ "type": "string", "pattern": "^/.+$", "error": "KCHTMPL0023E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
Aline, I did the following decision: - To CREATE or UPDATE a template, you just need the pool URI, so I left "pool" as a single string parameter. - When you list a template (LOOKUP), it will return "pool: { name: xxx, type: yyy }" What do you think ?
} - } }, "minItems": 1, @@ -660,6 +671,12 @@ "type": "string", "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", "error": "KCHTMPL0027E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
Explained above.
} } }, diff --git a/src/wok/plugins/kimchi/docs/API.md b/src/wok/plugins/kimchi/docs/API.md index 1e7d8ca..a4dd084 100644 --- a/src/wok/plugins/kimchi/docs/API.md +++ b/src/wok/plugins/kimchi/docs/API.md @@ -289,6 +289,8 @@ Represents a snapshot of the Virtual Machine's primary monitor. (either *size* or *volume* must be specified): * index: The device index * size: The device size in GB + * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc + * pool: URI of the storagepool where disk will be created
'pool' will be an object with 2 elements: 'name' and 'type'.
Explained above.
* base: Base image of this disk
* graphics *(optional)*: The graphics paramenters of this template @@ -388,6 +390,9 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc. + * pool: Information about the pool where disk or volume will be created + * name: URI of the storagepool + * type: Type of the storagepool (dir, nfs, scsci, iscsi, etc)
This is the information related to the GET
* graphics: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network @@ -422,6 +427,7 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc.
+ * pool: URI of the storagepool where template allocates vm disk.
'pool' will be an object with 2 elements: 'name' and 'type'. Explained above.
* graphics *(optional)*: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index 47b2c9e..bac6e47 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -234,8 +234,9 @@ class LibvirtVMTemplate(VMTemplate): self.conn = conn VMTemplate.__init__(self, args, scan)
- def _storage_validate(self): - pool_uri = self.info['storagepool'] + def _storage_validate(self, pool_uri=None): + if pool_uri is None: + pool_uri = self.info['storagepool'] pool_name = pool_name_from_uri(pool_uri) try: conn = self.conn.get() @@ -278,8 +279,8 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
- def _get_storage_type(self): - pool = self._storage_validate() + def _get_storage_type(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/@type")[0]
diff --git a/src/wok/plugins/kimchi/osinfo.py b/src/wok/plugins/kimchi/osinfo.py index 30ecd4f..5b0b5bf 100644 --- a/src/wok/plugins/kimchi/osinfo.py +++ b/src/wok/plugins/kimchi/osinfo.py @@ -158,6 +158,7 @@ def _get_tmpl_defaults(): for disk in storage_section.keys(): data = storage_section[disk] data['index'] = int(disk.split('.')[1]) + data['pool'] = defaults['storagepool'] defaults['disks'].append(data)
# Parse processor section to get cpus and cpu_topology values diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 3097b66..c7635b0 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -76,14 +76,30 @@ class VMTemplate(object): graphics.update(graph_args) args['graphics'] = graphics
- # Merge disks dict + # Provide compatibility with old template version and sets data to + # correct output format if necessary + def _fix_disk_compatibilities(disks): + for i, disk in enumerate(disks): + if 'pool' not in disk: + disk['pool'] = {'name': self.info['storagepool']} + if (isinstance(disk['pool'], str) or + isinstance(disk['pool'], unicode)): + disk['pool'] = {'name': disk['pool']} + disk['pool']['type'] = \ + self._get_storage_type(disk['pool']['name']) +
+ if self.info.get('disks') is not None: + _fix_disk_compatibilities(self.info['disks']) + if args.get('disks') is not None: + _fix_disk_compatibilities(args['disks']) +
Minor comment: you can merge those 2 if's I double checked the code and I cannot see how I can merge the if's. Because, self.info['disks'] and arg['disks'] will not have the same information at this point of the code.
+ # Merge disks dict with disks provided by user default_disk = self.info['disks'][0] for i, d in enumerate(args.get('disks', [])): disk = dict(default_disk) + disk['index'] = i disk.update(d) - - # Assign right disk format to logical and [i]scsi storagepools - if self._get_storage_type() in ['logical', 'iscsi', 'scsi']: + if disk['pool']['type'] in ['logical', 'iscsi', 'scsi']: disk['format'] = 'raw' args['disks'][i] = disk
@@ -401,7 +417,7 @@ class VMTemplate(object): def _get_storage_path(self): return ''
- def _get_storage_type(self): + def _get_storage_type(self, pool=None): return ''
def _get_volume_path(self):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 30/11/2015 11:15, Rodrigo Trujillo wrote:
See comments below.
Rodrigo Trujillo
On 11/30/2015 09:02 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes the backend code in order to allow create templates with multiple disks and update them correctly. Backend supports adding multiple disks with a json like:
"disks": [{"size": 3, "format": "qcow"}, {"size": 5, "pool":"/plugins/kimchi/storagepools/poolX"}, {"size": 7, "format": "raw", "pool": "/plugins/kimchi/storagepools/poolY"} ]
If any information is missing, Kimchi will use values from template.conf.
Kimchi will expose template disk data like: "disks": [{"index" : ... , "size" : ... , "format" : ... , "pool" : { "name" : ... , "type" : ... } , }, ... ]
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/API.json | 19 ++++++++++++++++++- src/wok/plugins/kimchi/docs/API.md | 6 ++++++ src/wok/plugins/kimchi/model/templates.py | 9 +++++---- src/wok/plugins/kimchi/osinfo.py | 1 + src/wok/plugins/kimchi/vmtemplate.py | 26 +++++++++++++++++++++----- 5 files changed, 51 insertions(+), 10 deletions(-)
diff --git a/src/wok/plugins/kimchi/API.json b/src/wok/plugins/kimchi/API.json index 7addfc7..6c6a5f7 100644 --- a/src/wok/plugins/kimchi/API.json +++ b/src/wok/plugins/kimchi/API.json @@ -477,6 +477,12 @@ "type": "integer", "minimum": 0 }, + "format": { + "description": "Type of the image of the disk", + "type": "string", + "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", + "error": "KCHTMPL0027E" + }, "size": { "description": "Size (GB) of the disk", "type": "number", @@ -488,8 +494,13 @@ "type": "string", "pattern": "^/.+$", "error": "KCHTMPL0023E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
Aline, I did the following decision: - To CREATE or UPDATE a template, you just need the pool URI, so I left "pool" as a single string parameter. - When you list a template (LOOKUP), it will return "pool: { name: xxx, type: yyy }"
What do you think ?
It is not a good idea to change the API according to which HTTP method to use. By REST API, we understand the POST method will create a resource with a given set of information (which we return on lookup()) and by PUT we change the set of information already available. So the JSON must be the same in all cases.
} - } }, "minItems": 1, @@ -660,6 +671,12 @@ "type": "string", "pattern": "^(bochs|cloop|cow|dmg|qcow|qcow2|qed|raw|vmdk|vpc)$", "error": "KCHTMPL0027E" + },
+ "pool": { + "description": "Location (URI) of the storage pool", + "type": "string", + "pattern": "^/plugins/kimchi/storagepools/[^/]+/?$", + "error": "KCHTMPL0015E"
'pool' will be an object with 2 elements: 'name' and 'type'.
Explained above.
} } }, diff --git a/src/wok/plugins/kimchi/docs/API.md b/src/wok/plugins/kimchi/docs/API.md index 1e7d8ca..a4dd084 100644 --- a/src/wok/plugins/kimchi/docs/API.md +++ b/src/wok/plugins/kimchi/docs/API.md @@ -289,6 +289,8 @@ Represents a snapshot of the Virtual Machine's primary monitor. (either *size* or *volume* must be specified): * index: The device index * size: The device size in GB + * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc + * pool: URI of the storagepool where disk will be created
'pool' will be an object with 2 elements: 'name' and 'type'.
Explained above.
* base: Base image of this disk
* graphics *(optional)*: The graphics paramenters of this template @@ -388,6 +390,9 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc. + * pool: Information about the pool where disk or volume will be created + * name: URI of the storagepool + * type: Type of the storagepool (dir, nfs, scsci, iscsi, etc)
This is the information related to the GET
* graphics: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network @@ -422,6 +427,7 @@ A interface represents available network interface on VM. * size: The device size in GB * volume: A volume name that contains the initial disk contents * format: Format of the image. Valid formats: bochs, cloop, cow, dmg, qcow, qcow2, qed, raw, vmdk, vpc.
+ * pool: URI of the storagepool where template allocates vm disk.
'pool' will be an object with 2 elements: 'name' and 'type'. Explained above.
* graphics *(optional)*: A dict of graphics paramenters of this template * type: The type of graphics. It can be VNC or spice or None. * vnc: Graphical display using the Virtual Network diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index 47b2c9e..bac6e47 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -234,8 +234,9 @@ class LibvirtVMTemplate(VMTemplate): self.conn = conn VMTemplate.__init__(self, args, scan)
- def _storage_validate(self): - pool_uri = self.info['storagepool'] + def _storage_validate(self, pool_uri=None): + if pool_uri is None: + pool_uri = self.info['storagepool'] pool_name = pool_name_from_uri(pool_uri) try: conn = self.conn.get() @@ -278,8 +279,8 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
- def _get_storage_type(self): - pool = self._storage_validate() + def _get_storage_type(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/@type")[0]
diff --git a/src/wok/plugins/kimchi/osinfo.py b/src/wok/plugins/kimchi/osinfo.py index 30ecd4f..5b0b5bf 100644 --- a/src/wok/plugins/kimchi/osinfo.py +++ b/src/wok/plugins/kimchi/osinfo.py @@ -158,6 +158,7 @@ def _get_tmpl_defaults(): for disk in storage_section.keys(): data = storage_section[disk] data['index'] = int(disk.split('.')[1]) + data['pool'] = defaults['storagepool'] defaults['disks'].append(data)
# Parse processor section to get cpus and cpu_topology values diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 3097b66..c7635b0 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -76,14 +76,30 @@ class VMTemplate(object): graphics.update(graph_args) args['graphics'] = graphics
- # Merge disks dict + # Provide compatibility with old template version and sets data to + # correct output format if necessary + def _fix_disk_compatibilities(disks): + for i, disk in enumerate(disks): + if 'pool' not in disk: + disk['pool'] = {'name': self.info['storagepool']} + if (isinstance(disk['pool'], str) or + isinstance(disk['pool'], unicode)): + disk['pool'] = {'name': disk['pool']} + disk['pool']['type'] = \ + self._get_storage_type(disk['pool']['name']) +
+ if self.info.get('disks') is not None: + _fix_disk_compatibilities(self.info['disks']) + if args.get('disks') is not None: + _fix_disk_compatibilities(args['disks']) +
Minor comment: you can merge those 2 if's I double checked the code and I cannot see how I can merge the if's. Because, self.info['disks'] and arg['disks'] will not have the same information at this point of the code.
+ # Merge disks dict with disks provided by user default_disk = self.info['disks'][0] for i, d in enumerate(args.get('disks', [])): disk = dict(default_disk) + disk['index'] = i disk.update(d) - - # Assign right disk format to logical and [i]scsi storagepools - if self._get_storage_type() in ['logical', 'iscsi', 'scsi']: + if disk['pool']['type'] in ['logical', 'iscsi', 'scsi']: disk['format'] = 'raw' args['disks'][i] = disk
@@ -401,7 +417,7 @@ class VMTemplate(object): def _get_storage_path(self): return ''
- def _get_storage_type(self): + def _get_storage_type(self, pool=None): return ''
def _get_volume_path(self):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

Applied. Thanks. Regards, Aline Manera

This patch changes the Kimchi backend in order to support creation of guests from Templates that have multiple disks from different storage pools. Before, it was only possible to create a guest with one, two or more disks from only one storagepool. In order to add a disk from another pool, user would have to edit the guest and add it. Now users can do this in creation time, using pre-configured Template. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 1 + src/wok/plugins/kimchi/model/templates.py | 21 +++++++++++++++------ src/wok/plugins/kimchi/model/vms.py | 18 ++++++------------ src/wok/plugins/kimchi/vmtemplate.py | 30 ++++++++++++++++-------------- 4 files changed, 38 insertions(+), 32 deletions(-) diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index de7962a..b18048c 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -128,6 +128,7 @@ messages = { "KCHVM0069E": _("Password field must be a string."), "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."), + "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."), "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index bac6e47..eae2714 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -147,6 +147,16 @@ class TemplateModel(object): params = session.get('template', name) if overrides: params.update(overrides) + if 'storagepool' in params: + libvVMT = LibvirtVMTemplate(params, conn=conn) + poolType = libvVMT._get_storage_type(params['storagepool']) + for i, disk in enumerate(params['disks']): + if disk['pool']['type'] != poolType: + raise InvalidOperation('KCHVM0072E') + else: + params['disks'][i]['pool']['name'] = \ + params['storagepool'] + return LibvirtVMTemplate(params, False, conn) def lookup(self, name): @@ -274,8 +284,8 @@ class LibvirtVMTemplate(VMTemplate): raise InvalidParameter("KCHTMPL0007E", {'network': name, 'template': self.name}) - def _get_storage_path(self): - pool = self._storage_validate() + def _get_storage_path(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0] @@ -285,7 +295,7 @@ class LibvirtVMTemplate(VMTemplate): return xpath_get_text(xml, "/pool/@type")[0] def _get_volume_path(self, pool, vol): - pool = self._storage_validate() + pool = self._storage_validate(pool) try: return pool.storageVolLookupByName(vol).path() except: @@ -293,12 +303,11 @@ class LibvirtVMTemplate(VMTemplate): 'pool': pool}) def fork_vm_storage(self, vm_uuid): - # Provision storage: - # TODO: Rebase on the storage API once upstream - pool = self._storage_validate() + # Provision storages: vol_list = self.to_volume_list(vm_uuid) try: for v in vol_list: + pool = self._storage_validate(v['pool']) # outgoing text to libvirt, encode('utf-8') pool.createXML(v['xml'].encode('utf-8'), 0) except libvirt.libvirtError as e: diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index 366f1b4..ebe716e 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -151,28 +151,22 @@ class VMsModel(object): wok_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message) - # If storagepool is SCSI, volumes will be LUNs and must be passed by - # the user from UI or manually. - cb('Provisioning storage for new VM') - vol_list = [] - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid) + cb('Provisioning storages for new VM') + vol_list = t.fork_vm_storage(vm_uuid) graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics, - volumes=vol_list) + graphics=graphics) cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: - if t._get_storage_type() not in READONLY_POOL_TYPE: - for v in vol_list: - vol = conn.storageVolLookupByPath(v['path']) - vol.delete(0) + for v in vol_list: + vol = conn.storageVolLookupByPath(v['path']) + vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()}) diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index c7635b0..69cc9b5 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -184,11 +184,6 @@ class VMTemplate(object): return xml def _get_disks_xml(self, vm_uuid): - # Current implementation just allows to create disk in one single - # storage pool, so we cannot mix the types (scsi volumes vs img file) - storage_type = self._get_storage_type() - storage_path = self._get_storage_path() - base_disk_params = {'type': 'disk', 'disk': 'file', 'bus': self.info['disk_bus']} logical_disk_params = {'format': 'raw'} @@ -199,37 +194,44 @@ class VMTemplate(object): 'format': 'raw', 'bus': 'scsi'} disks_xml = '' - pool_name = pool_name_from_uri(self.info['storagepool']) for index, disk in enumerate(self.info['disks']): params = dict(base_disk_params) params['format'] = disk['format'] - params.update(locals().get('%s_disk_params' % storage_type, {})) + params.update(locals().get('%s_disk_params' % + disk['pool']['type'], {})) params['index'] = index volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(pool_name, volume) + params['path'] = self._get_volume_path(disk['pool']['name'], + volume) else: - volume = "%s-%s.img" % (vm_uuid, params['index']) - params['path'] = os.path.join(storage_path, volume) + img = "%s-%s.img" % (vm_uuid, params['index']) + storage_path = self._get_storage_path(disk['pool']['name']) + params['path'] = os.path.join(storage_path, img) disks_xml += get_disk_xml(params)[1] return unicode(disks_xml, 'utf-8') def to_volume_list(self, vm_uuid): - storage_path = self._get_storage_path() ret = [] for i, d in enumerate(self.info['disks']): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue + index = d.get('index', i) volume = "%s-%s.img" % (vm_uuid, index) + storage_path = self._get_storage_path(d['pool']['name']) info = {'name': volume, 'capacity': d['size'], 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume)} + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name']} - if 'logical' == self._get_storage_type() or \ + if 'logical' == d['pool']['type'] or \ info['format'] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: @@ -414,7 +416,7 @@ class VMTemplate(object): def fork_vm_storage(self, vm_uuid): pass - def _get_storage_path(self): + def _get_storage_path(self, pool_uri=None): return '' def _get_storage_type(self, pool=None): -- 2.1.0

On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch changes the Kimchi backend in order to support creation of guests from Templates that have multiple disks from different storage pools. Before, it was only possible to create a guest with one, two or more disks from only one storagepool. In order to add a disk from another pool, user would have to edit the guest and add it. Now users can do this in creation time, using pre-configured Template.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 1 + src/wok/plugins/kimchi/model/templates.py | 21 +++++++++++++++------ src/wok/plugins/kimchi/model/vms.py | 18 ++++++------------ src/wok/plugins/kimchi/vmtemplate.py | 30 ++++++++++++++++-------------- 4 files changed, 38 insertions(+), 32 deletions(-)
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index de7962a..b18048c 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -128,6 +128,7 @@ messages = { "KCHVM0069E": _("Password field must be a string."), "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."),
+ "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."),
As each volume will have its own pool information we should remove the former storage pool information. Otherwise, it will be an useless information.
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index bac6e47..eae2714 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -147,6 +147,16 @@ class TemplateModel(object): params = session.get('template', name) if overrides: params.update(overrides)
+ if 'storagepool' in params: + libvVMT = LibvirtVMTemplate(params, conn=conn) + poolType = libvVMT._get_storage_type(params['storagepool']) + for i, disk in enumerate(params['disks']): + if disk['pool']['type'] != poolType: + raise InvalidOperation('KCHVM0072E') + else: + params['disks'][i]['pool']['name'] = \ + params['storagepool'] +
The pool is defined for each volume. We should remove the former storage pool information
return LibvirtVMTemplate(params, False, conn)
def lookup(self, name): @@ -274,8 +284,8 @@ class LibvirtVMTemplate(VMTemplate): raise InvalidParameter("KCHTMPL0007E", {'network': name, 'template': self.name})
- def _get_storage_path(self): - pool = self._storage_validate() + def _get_storage_path(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
@@ -285,7 +295,7 @@ class LibvirtVMTemplate(VMTemplate): return xpath_get_text(xml, "/pool/@type")[0]
def _get_volume_path(self, pool, vol): - pool = self._storage_validate() + pool = self._storage_validate(pool) try: return pool.storageVolLookupByName(vol).path() except: @@ -293,12 +303,11 @@ class LibvirtVMTemplate(VMTemplate): 'pool': pool})
def fork_vm_storage(self, vm_uuid): - # Provision storage: - # TODO: Rebase on the storage API once upstream - pool = self._storage_validate() + # Provision storages: vol_list = self.to_volume_list(vm_uuid) try: for v in vol_list: + pool = self._storage_validate(v['pool']) # outgoing text to libvirt, encode('utf-8') pool.createXML(v['xml'].encode('utf-8'), 0) except libvirt.libvirtError as e: diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index 366f1b4..ebe716e 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -151,28 +151,22 @@ class VMsModel(object): wok_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message)
- # If storagepool is SCSI, volumes will be LUNs and must be passed by - # the user from UI or manually. - cb('Provisioning storage for new VM') - vol_list = [] - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
+ cb('Provisioning storages for new VM') + vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics, - volumes=vol_list) + graphics=graphics)
cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: - if t._get_storage_type() not in READONLY_POOL_TYPE: - for v in vol_list: - vol = conn.storageVolLookupByPath(v['path']) - vol.delete(0)
+ for v in vol_list: + vol = conn.storageVolLookupByPath(v['path']) + vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()})
Why are not you considering the read-only pools?
diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index c7635b0..69cc9b5 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -184,11 +184,6 @@ class VMTemplate(object): return xml
def _get_disks_xml(self, vm_uuid): - # Current implementation just allows to create disk in one single - # storage pool, so we cannot mix the types (scsi volumes vs img file) - storage_type = self._get_storage_type() - storage_path = self._get_storage_path() - base_disk_params = {'type': 'disk', 'disk': 'file', 'bus': self.info['disk_bus']} logical_disk_params = {'format': 'raw'} @@ -199,37 +194,44 @@ class VMTemplate(object): 'format': 'raw', 'bus': 'scsi'}
disks_xml = '' - pool_name = pool_name_from_uri(self.info['storagepool']) for index, disk in enumerate(self.info['disks']): params = dict(base_disk_params) params['format'] = disk['format'] - params.update(locals().get('%s_disk_params' % storage_type, {})) + params.update(locals().get('%s_disk_params' % + disk['pool']['type'], {})) params['index'] = index
volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(pool_name, volume) + params['path'] = self._get_volume_path(disk['pool']['name'], + volume) else: - volume = "%s-%s.img" % (vm_uuid, params['index']) - params['path'] = os.path.join(storage_path, volume) + img = "%s-%s.img" % (vm_uuid, params['index']) + storage_path = self._get_storage_path(disk['pool']['name']) + params['path'] = os.path.join(storage_path, img)
disks_xml += get_disk_xml(params)[1]
return unicode(disks_xml, 'utf-8')
def to_volume_list(self, vm_uuid): - storage_path = self._get_storage_path() ret = [] for i, d in enumerate(self.info['disks']): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue + index = d.get('index', i) volume = "%s-%s.img" % (vm_uuid, index)
+ storage_path = self._get_storage_path(d['pool']['name']) info = {'name': volume, 'capacity': d['size'], 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume)} + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name']}
- if 'logical' == self._get_storage_type() or \ + if 'logical' == d['pool']['type'] or \ info['format'] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: @@ -414,7 +416,7 @@ class VMTemplate(object): def fork_vm_storage(self, vm_uuid): pass
- def _get_storage_path(self): + def _get_storage_path(self, pool_uri=None): return ''
def _get_storage_type(self, pool=None):

On 11/30/2015 09:08 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch changes the Kimchi backend in order to support creation of guests from Templates that have multiple disks from different storage pools. Before, it was only possible to create a guest with one, two or more disks from only one storagepool. In order to add a disk from another pool, user would have to edit the guest and add it. Now users can do this in creation time, using pre-configured Template.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 1 + src/wok/plugins/kimchi/model/templates.py | 21 +++++++++++++++------ src/wok/plugins/kimchi/model/vms.py | 18 ++++++------------ src/wok/plugins/kimchi/vmtemplate.py | 30 ++++++++++++++++-------------- 4 files changed, 38 insertions(+), 32 deletions(-)
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index de7962a..b18048c 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -128,6 +128,7 @@ messages = { "KCHVM0069E": _("Password field must be a string."), "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."),
+ "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."),
As each volume will have its own pool information we should remove the former storage pool information. Otherwise, it will be an useless information.
I agree, and disagree. I is still used, I decided to keep it in order to have compatibility with older templates version. Otherwise, I would have to find a way to update information in objectstore. So, storagepool is used in the code for 2 reason now: - It holds the DEFAULT pool from templates.conf. If user does not pass pool when create a template, then , it is used - And, for compatibility
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index bac6e47..eae2714 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -147,6 +147,16 @@ class TemplateModel(object): params = session.get('template', name) if overrides: params.update(overrides)
+ if 'storagepool' in params: + libvVMT = LibvirtVMTemplate(params, conn=conn) + poolType = libvVMT._get_storage_type(params['storagepool']) + for i, disk in enumerate(params['disks']): + if disk['pool']['type'] != poolType: + raise InvalidOperation('KCHVM0072E') + else: + params['disks'][i]['pool']['name'] = \ + params['storagepool'] +
The pool is defined for each volume. We should remove the former storage pool information
User is not forced to give each disk 'pool", so, kimchi uses the "default" storagepool configured. This information must be recorded somewhere.
return LibvirtVMTemplate(params, False, conn)
def lookup(self, name): @@ -274,8 +284,8 @@ class LibvirtVMTemplate(VMTemplate): raise InvalidParameter("KCHTMPL0007E", {'network': name, 'template': self.name})
- def _get_storage_path(self): - pool = self._storage_validate() + def _get_storage_path(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
@@ -285,7 +295,7 @@ class LibvirtVMTemplate(VMTemplate): return xpath_get_text(xml, "/pool/@type")[0]
def _get_volume_path(self, pool, vol): - pool = self._storage_validate() + pool = self._storage_validate(pool) try: return pool.storageVolLookupByName(vol).path() except: @@ -293,12 +303,11 @@ class LibvirtVMTemplate(VMTemplate): 'pool': pool})
def fork_vm_storage(self, vm_uuid): - # Provision storage: - # TODO: Rebase on the storage API once upstream - pool = self._storage_validate() + # Provision storages: vol_list = self.to_volume_list(vm_uuid) try: for v in vol_list: + pool = self._storage_validate(v['pool']) # outgoing text to libvirt, encode('utf-8') pool.createXML(v['xml'].encode('utf-8'), 0) except libvirt.libvirtError as e: diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index 366f1b4..ebe716e 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -151,28 +151,22 @@ class VMsModel(object): wok_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message)
- # If storagepool is SCSI, volumes will be LUNs and must be passed by - # the user from UI or manually. - cb('Provisioning storage for new VM') - vol_list = [] - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
+ cb('Provisioning storages for new VM') + vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics, - volumes=vol_list) + graphics=graphics)
cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: - if t._get_storage_type() not in READONLY_POOL_TYPE: - for v in vol_list: - vol = conn.storageVolLookupByPath(v['path']) - vol.delete(0)
+ for v in vol_list: + vol = conn.storageVolLookupByPath(v['path']) + vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()})
Why are not you considering the read-only pools?
Notice above that the old code says: - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid) fork_vm_storage call "to_volume_list", where I moved this condition (see below): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue So vol_list will is always a list of physical ".img"s. Maybe the confusion is in the "volume" name associated. "Volumes" related to SCSI and iSCSI (read-only) pools does not need the path to be discovered here.
diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index c7635b0..69cc9b5 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -184,11 +184,6 @@ class VMTemplate(object): return xml
def _get_disks_xml(self, vm_uuid): - # Current implementation just allows to create disk in one single - # storage pool, so we cannot mix the types (scsi volumes vs img file) - storage_type = self._get_storage_type() - storage_path = self._get_storage_path() - base_disk_params = {'type': 'disk', 'disk': 'file', 'bus': self.info['disk_bus']} logical_disk_params = {'format': 'raw'} @@ -199,37 +194,44 @@ class VMTemplate(object): 'format': 'raw', 'bus': 'scsi'}
disks_xml = '' - pool_name = pool_name_from_uri(self.info['storagepool']) for index, disk in enumerate(self.info['disks']): params = dict(base_disk_params) params['format'] = disk['format'] - params.update(locals().get('%s_disk_params' % storage_type, {})) + params.update(locals().get('%s_disk_params' % + disk['pool']['type'], {})) params['index'] = index
volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(pool_name, volume) + params['path'] = self._get_volume_path(disk['pool']['name'], + volume) else: - volume = "%s-%s.img" % (vm_uuid, params['index']) - params['path'] = os.path.join(storage_path, volume) + img = "%s-%s.img" % (vm_uuid, params['index']) + storage_path = self._get_storage_path(disk['pool']['name']) + params['path'] = os.path.join(storage_path, img)
disks_xml += get_disk_xml(params)[1]
return unicode(disks_xml, 'utf-8')
def to_volume_list(self, vm_uuid): - storage_path = self._get_storage_path() ret = [] for i, d in enumerate(self.info['disks']): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue + index = d.get('index', i) volume = "%s-%s.img" % (vm_uuid, index)
+ storage_path = self._get_storage_path(d['pool']['name']) info = {'name': volume, 'capacity': d['size'], 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume)} + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name']}
- if 'logical' == self._get_storage_type() or \ + if 'logical' == d['pool']['type'] or \ info['format'] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: @@ -414,7 +416,7 @@ class VMTemplate(object): def fork_vm_storage(self, vm_uuid): pass
- def _get_storage_path(self): + def _get_storage_path(self, pool_uri=None): return ''
def _get_storage_type(self, pool=None):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 30/11/2015 11:42, Rodrigo Trujillo wrote:
On 11/30/2015 09:08 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch changes the Kimchi backend in order to support creation of guests from Templates that have multiple disks from different storage pools. Before, it was only possible to create a guest with one, two or more disks from only one storagepool. In order to add a disk from another pool, user would have to edit the guest and add it. Now users can do this in creation time, using pre-configured Template.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 1 + src/wok/plugins/kimchi/model/templates.py | 21 +++++++++++++++------ src/wok/plugins/kimchi/model/vms.py | 18 ++++++------------ src/wok/plugins/kimchi/vmtemplate.py | 30 ++++++++++++++++-------------- 4 files changed, 38 insertions(+), 32 deletions(-)
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index de7962a..b18048c 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -128,6 +128,7 @@ messages = { "KCHVM0069E": _("Password field must be a string."), "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."),
+ "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."),
As each volume will have its own pool information we should remove the former storage pool information. Otherwise, it will be an useless information.
I agree, and disagree. I is still used, I decided to keep it in order to have compatibility with older templates version. Otherwise, I would have to find a way to update information in objectstore. So, storagepool is used in the code for 2 reason now: - It holds the DEFAULT pool from templates.conf. If user does not pass pool when create a template, then , it is used - And, for compatibility
OK. But the default pool does not make sense anymore. It is default for each volume. In the code, you did a compatibility logic to update the objectstore. Why can not you do the same in that case? Otherwise, we will have a monster objectstore only for 'compatibility'
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index bac6e47..eae2714 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -147,6 +147,16 @@ class TemplateModel(object): params = session.get('template', name) if overrides: params.update(overrides)
+ if 'storagepool' in params: + libvVMT = LibvirtVMTemplate(params, conn=conn) + poolType = libvVMT._get_storage_type(params['storagepool']) + for i, disk in enumerate(params['disks']): + if disk['pool']['type'] != poolType: + raise InvalidOperation('KCHVM0072E') + else: + params['disks'][i]['pool']['name'] = \ + params['storagepool'] +
The pool is defined for each volume. We should remove the former storage pool information
User is not forced to give each disk 'pool", so, kimchi uses the "default" storagepool configured. This information must be recorded somewhere.
return LibvirtVMTemplate(params, False, conn)
def lookup(self, name): @@ -274,8 +284,8 @@ class LibvirtVMTemplate(VMTemplate): raise InvalidParameter("KCHTMPL0007E", {'network': name, 'template': self.name})
- def _get_storage_path(self): - pool = self._storage_validate() + def _get_storage_path(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
@@ -285,7 +295,7 @@ class LibvirtVMTemplate(VMTemplate): return xpath_get_text(xml, "/pool/@type")[0]
def _get_volume_path(self, pool, vol): - pool = self._storage_validate() + pool = self._storage_validate(pool) try: return pool.storageVolLookupByName(vol).path() except: @@ -293,12 +303,11 @@ class LibvirtVMTemplate(VMTemplate): 'pool': pool})
def fork_vm_storage(self, vm_uuid): - # Provision storage: - # TODO: Rebase on the storage API once upstream - pool = self._storage_validate() + # Provision storages: vol_list = self.to_volume_list(vm_uuid) try: for v in vol_list: + pool = self._storage_validate(v['pool']) # outgoing text to libvirt, encode('utf-8') pool.createXML(v['xml'].encode('utf-8'), 0) except libvirt.libvirtError as e: diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index 366f1b4..ebe716e 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -151,28 +151,22 @@ class VMsModel(object): wok_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message)
- # If storagepool is SCSI, volumes will be LUNs and must be passed by - # the user from UI or manually. - cb('Provisioning storage for new VM') - vol_list = [] - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
+ cb('Provisioning storages for new VM') + vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics, - volumes=vol_list) + graphics=graphics)
cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: - if t._get_storage_type() not in READONLY_POOL_TYPE: - for v in vol_list: - vol = conn.storageVolLookupByPath(v['path']) - vol.delete(0)
+ for v in vol_list: + vol = conn.storageVolLookupByPath(v['path']) + vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()})
Why are not you considering the read-only pools?
Notice above that the old code says:
- if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
fork_vm_storage call "to_volume_list", where I moved this condition (see below):
+ # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue
OK
So vol_list will is always a list of physical ".img"s. Maybe the confusion is in the "volume" name associated. "Volumes" related to SCSI and iSCSI (read-only) pools does not need the path to be discovered here.
diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index c7635b0..69cc9b5 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -184,11 +184,6 @@ class VMTemplate(object): return xml
def _get_disks_xml(self, vm_uuid): - # Current implementation just allows to create disk in one single - # storage pool, so we cannot mix the types (scsi volumes vs img file) - storage_type = self._get_storage_type() - storage_path = self._get_storage_path() - base_disk_params = {'type': 'disk', 'disk': 'file', 'bus': self.info['disk_bus']} logical_disk_params = {'format': 'raw'} @@ -199,37 +194,44 @@ class VMTemplate(object): 'format': 'raw', 'bus': 'scsi'}
disks_xml = '' - pool_name = pool_name_from_uri(self.info['storagepool']) for index, disk in enumerate(self.info['disks']): params = dict(base_disk_params) params['format'] = disk['format'] - params.update(locals().get('%s_disk_params' % storage_type, {})) + params.update(locals().get('%s_disk_params' % + disk['pool']['type'], {})) params['index'] = index
volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(pool_name, volume) + params['path'] = self._get_volume_path(disk['pool']['name'], + volume) else: - volume = "%s-%s.img" % (vm_uuid, params['index']) - params['path'] = os.path.join(storage_path, volume) + img = "%s-%s.img" % (vm_uuid, params['index']) + storage_path = self._get_storage_path(disk['pool']['name']) + params['path'] = os.path.join(storage_path, img)
disks_xml += get_disk_xml(params)[1]
return unicode(disks_xml, 'utf-8')
def to_volume_list(self, vm_uuid): - storage_path = self._get_storage_path() ret = [] for i, d in enumerate(self.info['disks']): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue + index = d.get('index', i) volume = "%s-%s.img" % (vm_uuid, index)
+ storage_path = self._get_storage_path(d['pool']['name']) info = {'name': volume, 'capacity': d['size'], 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume)} + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name']}
- if 'logical' == self._get_storage_type() or \ + if 'logical' == d['pool']['type'] or \ info['format'] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: @@ -414,7 +416,7 @@ class VMTemplate(object): def fork_vm_storage(self, vm_uuid): pass
- def _get_storage_path(self): + def _get_storage_path(self, pool_uri=None): return ''
def _get_storage_type(self, pool=None):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 30/11/2015 12:09, Aline Manera wrote:
On 30/11/2015 11:42, Rodrigo Trujillo wrote:
On 11/30/2015 09:08 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch changes the Kimchi backend in order to support creation of guests from Templates that have multiple disks from different storage pools. Before, it was only possible to create a guest with one, two or more disks from only one storagepool. In order to add a disk from another pool, user would have to edit the guest and add it. Now users can do this in creation time, using pre-configured Template.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 1 + src/wok/plugins/kimchi/model/templates.py | 21 +++++++++++++++------ src/wok/plugins/kimchi/model/vms.py | 18 ++++++------------ src/wok/plugins/kimchi/vmtemplate.py | 30 ++++++++++++++++-------------- 4 files changed, 38 insertions(+), 32 deletions(-)
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index de7962a..b18048c 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -128,6 +128,7 @@ messages = { "KCHVM0069E": _("Password field must be a string."), "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."),
+ "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."),
As each volume will have its own pool information we should remove the former storage pool information. Otherwise, it will be an useless information.
I agree, and disagree. I is still used, I decided to keep it in order to have compatibility with older templates version. Otherwise, I would have to find a way to update information in objectstore. So, storagepool is used in the code for 2 reason now: - It holds the DEFAULT pool from templates.conf. If user does not pass pool when create a template, then , it is used - And, for compatibility
OK. But the default pool does not make sense anymore. It is default for each volume.
In the code, you did a compatibility logic to update the objectstore. Why can not you do the same in that case? Otherwise, we will have a monster objectstore only for 'compatibility'
You can also update the compatibility script created by Paulo to update the objectstore once when running Kimchi 2.0
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/model/templates.py b/src/wok/plugins/kimchi/model/templates.py index bac6e47..eae2714 100644 --- a/src/wok/plugins/kimchi/model/templates.py +++ b/src/wok/plugins/kimchi/model/templates.py @@ -147,6 +147,16 @@ class TemplateModel(object): params = session.get('template', name) if overrides: params.update(overrides)
+ if 'storagepool' in params: + libvVMT = LibvirtVMTemplate(params, conn=conn) + poolType = libvVMT._get_storage_type(params['storagepool']) + for i, disk in enumerate(params['disks']): + if disk['pool']['type'] != poolType: + raise InvalidOperation('KCHVM0072E') + else: + params['disks'][i]['pool']['name'] = \ + params['storagepool'] +
The pool is defined for each volume. We should remove the former storage pool information
User is not forced to give each disk 'pool", so, kimchi uses the "default" storagepool configured. This information must be recorded somewhere.
return LibvirtVMTemplate(params, False, conn)
def lookup(self, name): @@ -274,8 +284,8 @@ class LibvirtVMTemplate(VMTemplate): raise InvalidParameter("KCHTMPL0007E", {'network': name, 'template': self.name})
- def _get_storage_path(self): - pool = self._storage_validate() + def _get_storage_path(self, pool_uri=None): + pool = self._storage_validate(pool_uri) xml = pool.XMLDesc(0) return xpath_get_text(xml, "/pool/target/path")[0]
@@ -285,7 +295,7 @@ class LibvirtVMTemplate(VMTemplate): return xpath_get_text(xml, "/pool/@type")[0]
def _get_volume_path(self, pool, vol): - pool = self._storage_validate() + pool = self._storage_validate(pool) try: return pool.storageVolLookupByName(vol).path() except: @@ -293,12 +303,11 @@ class LibvirtVMTemplate(VMTemplate): 'pool': pool})
def fork_vm_storage(self, vm_uuid): - # Provision storage: - # TODO: Rebase on the storage API once upstream - pool = self._storage_validate() + # Provision storages: vol_list = self.to_volume_list(vm_uuid) try: for v in vol_list: + pool = self._storage_validate(v['pool']) # outgoing text to libvirt, encode('utf-8') pool.createXML(v['xml'].encode('utf-8'), 0) except libvirt.libvirtError as e: diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index 366f1b4..ebe716e 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -151,28 +151,22 @@ class VMsModel(object): wok_log.error('Error trying to update database with guest ' 'icon information due error: %s', e.message)
- # If storagepool is SCSI, volumes will be LUNs and must be passed by - # the user from UI or manually. - cb('Provisioning storage for new VM') - vol_list = [] - if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
+ cb('Provisioning storages for new VM') + vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics, - volumes=vol_list) + graphics=graphics)
cb('Defining new VM') try: conn.defineXML(xml.encode('utf-8')) except libvirt.libvirtError as e: - if t._get_storage_type() not in READONLY_POOL_TYPE: - for v in vol_list: - vol = conn.storageVolLookupByPath(v['path']) - vol.delete(0)
+ for v in vol_list: + vol = conn.storageVolLookupByPath(v['path']) + vol.delete(0) raise OperationFailed("KCHVM0007E", {'name': name, 'err': e.get_error_message()})
Why are not you considering the read-only pools?
Notice above that the old code says:
- if t._get_storage_type() not in ["iscsi", "scsi"]: - vol_list = t.fork_vm_storage(vm_uuid)
fork_vm_storage call "to_volume_list", where I moved this condition (see below):
+ # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue
OK
So vol_list will is always a list of physical ".img"s. Maybe the confusion is in the "volume" name associated. "Volumes" related to SCSI and iSCSI (read-only) pools does not need the path to be discovered here.
diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index c7635b0..69cc9b5 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -184,11 +184,6 @@ class VMTemplate(object): return xml
def _get_disks_xml(self, vm_uuid): - # Current implementation just allows to create disk in one single - # storage pool, so we cannot mix the types (scsi volumes vs img file) - storage_type = self._get_storage_type() - storage_path = self._get_storage_path() - base_disk_params = {'type': 'disk', 'disk': 'file', 'bus': self.info['disk_bus']} logical_disk_params = {'format': 'raw'} @@ -199,37 +194,44 @@ class VMTemplate(object): 'format': 'raw', 'bus': 'scsi'}
disks_xml = '' - pool_name = pool_name_from_uri(self.info['storagepool']) for index, disk in enumerate(self.info['disks']): params = dict(base_disk_params) params['format'] = disk['format'] - params.update(locals().get('%s_disk_params' % storage_type, {})) + params.update(locals().get('%s_disk_params' % + disk['pool']['type'], {})) params['index'] = index
volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(pool_name, volume) + params['path'] = self._get_volume_path(disk['pool']['name'], + volume) else: - volume = "%s-%s.img" % (vm_uuid, params['index']) - params['path'] = os.path.join(storage_path, volume) + img = "%s-%s.img" % (vm_uuid, params['index']) + storage_path = self._get_storage_path(disk['pool']['name']) + params['path'] = os.path.join(storage_path, img)
disks_xml += get_disk_xml(params)[1]
return unicode(disks_xml, 'utf-8')
def to_volume_list(self, vm_uuid): - storage_path = self._get_storage_path() ret = [] for i, d in enumerate(self.info['disks']): + # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs + if d['pool']['type'] in ["iscsi", "scsi"]: + continue + index = d.get('index', i) volume = "%s-%s.img" % (vm_uuid, index)
+ storage_path = self._get_storage_path(d['pool']['name']) info = {'name': volume, 'capacity': d['size'], 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume)} + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name']}
- if 'logical' == self._get_storage_type() or \ + if 'logical' == d['pool']['type'] or \ info['format'] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: @@ -414,7 +416,7 @@ class VMTemplate(object): def fork_vm_storage(self, vm_uuid): pass
- def _get_storage_path(self): + def _get_storage_path(self, pool_uri=None): return ''
def _get_storage_type(self, pool=None):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

- This patch implements necessary changes in frontend to allow users to edit and add/remove extra disks to a chosen Template. - This patch also refactor the UI code in order to avoid lots of backend calls, improving performance. - Template disks data saved in objectstore have new fields: * 'size' is always recorded, even for ISCSI/SCSI types; * 'storagepool type' is always recorded; * example: "disks":[ { "index": 0, "format": "raw", "volume": "unit:0:0:2", "pool": { "name": "/plugins/kimchi/storagepools/test-iscsi", "type": "iscsi" } "size": 7 }, { "index": 1, "format": "qcow2", "pool": { "name": "/plugins/kimchi/storagepools/default", "type": "dir" } "size": 3 } ] Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- .../kimchi/ui/js/src/kimchi.template_edit_main.js | 246 ++++++++++----------- .../kimchi/ui/pages/template-edit.html.tmpl | 5 +- 2 files changed, 123 insertions(+), 128 deletions(-) diff --git a/src/wok/plugins/kimchi/ui/js/src/kimchi.template_edit_main.js b/src/wok/plugins/kimchi/ui/js/src/kimchi.template_edit_main.js index 5b77892..10f11ed 100644 --- a/src/wok/plugins/kimchi/ui/js/src/kimchi.template_edit_main.js +++ b/src/wok/plugins/kimchi/ui/js/src/kimchi.template_edit_main.js @@ -22,7 +22,6 @@ kimchi.template_edit_main = function() { var origNetworks; var templateDiskSize; $('#template-name', templateEditMain).val(kimchi.selectedTemplate); - //templateEditMain.tabs(); $('#edit-template-tabs a[data-toggle="tab"]').on('shown.bs.tab', function (e) { var target = $(this).attr('href'); @@ -32,7 +31,7 @@ kimchi.template_edit_main = function() { }); var initTemplate = function(template) { - origDisks = template.disks; + origDisks = template.disks; origPool = template.storagepool; origNetworks = template.networks; for(var i=0;i<template.disks.length;i++){ @@ -68,134 +67,132 @@ kimchi.template_edit_main = function() { return false; } enableSpice(); + var initStorage = function(result) { - var scsipools = {}; + // Gather storagepools data + var storagePoolsInfo = new Object(); + $.each(result, function(index, pool) { + if (pool.state === 'active' && pool.type != 'kimchi-iso') { + if (pool.type === 'iscsi' || pool.type === 'scsi') { + volumes = new Object(); + kimchi.listStorageVolumes(pool.name, function(vols) { + $.each(vols, function(i, vol) { + storagePoolsInfo[pool.name + "/" + vol.name] = { + 'type' : pool.type, + 'volSize': vol.capacity / Math.pow(1024, 3)}; + }); + }, null, true); + } else { + storagePoolsInfo[pool.name] = { 'type' : pool.type }; + } + } + }); + var addStorageItem = function(storageData) { var thisName = storageData.storageName; + // Compatibility with old versions + if (storageData.storageVolume) { + storageData.storageDisk = storagePoolsInfo[thisName].volSize; + } + if (!storageData.storageType) { + storageData.storageType = storagePoolsInfo[thisName].type; + } + var nodeStorage = $.parseHTML(wok.substitute($('#template-storage-pool-tmpl').html(), storageData)); $('.template-tab-body', '#form-template-storage').append(nodeStorage); + var storageRow = '#storageRow' + storageData.storageIndex; + var storageOptions = ''; - var scsiOptions = ''; - $('#selectStorageName').find('option').remove(); - $.each(result, function(index, storageEntities) { - if((storageEntities.state === 'active') && (storageEntities.type != 'kimchi-iso')) { - if(storageEntities.type === 'iscsi' || storageEntities.type === 'scsi') { - kimchi.listStorageVolumes(storageEntities.name, function(currentVolume) { - $.each(currentVolume, function(indexSCSI, scsiEntities) { - var tmpPath = storageEntities.name + '/' + scsiEntities.name; - var isSlected = tmpPath === thisName ? ' selected' : ''; - scsiOptions += '<option' + isSlected + '>' + tmpPath + '</option>'; - }); - $('#selectStorageName').append(scsiOptions); - }, function() {}); - } else { - var isSlected = storageEntities.name === thisName ? ' selected' : ''; - storageOptions += '<option' + isSlected + '>' + storageEntities.name + '</option>'; - } - } + $.each(storagePoolsInfo, function(poolName, value) { + storageOptions += '<option value="' + poolName + '">' + poolName + '</option>'; }); - $('#selectStorageName').append(storageOptions); - $('select','#form-template-storage').selectpicker(); + + $(storageRow + ' #selectStorageName').append(storageOptions); + $(storageRow + ' #selectStorageName').val(storageData.storageName); + $(storageRow + ' #selectStorageName').selectpicker(); + + if (storageData.storageType === 'iscsi' || storageData.storageType === 'scsi') { + $(storageRow + ' .template-storage-disk').attr('readonly', true).prop('disabled', true); + $(storageRow + ' #diskFormat').val('raw'); + $(storageRow + ' #diskFormat').prop('disabled', true).change(); + } else if (storageData.storageType === 'logical') { + $(storageRow + ' #diskFormat').val('raw'); + $(storageRow + ' #diskFormat').prop('disabled', true).change(); + } // Set disk format if (isImageBasedTemplate()) { - $('#diskFormat').val('qcow2'); - $('#diskFormat').prop('disabled', 'disabled'); + $(storageRow + ' #diskFormat').val('qcow2'); + $(storageRow + ' #diskFormat').prop('disabled', 'disabled'); } else { - $('#diskFormat').val(storageData.storageDiskFormat); - $('#diskFormat').on('change', function() { - $('.template-storage-disk-format').val($(this).val()); + $(storageRow + ' #diskFormat').val(storageData.storageDiskFormat); + $(storageRow + ' #diskFormat').on('change', function() { + $(storageRow + ' .template-storage-disk-format').val($(this).val()); }); } + $(storageRow + ' #diskFormat').selectpicker(); - $('#selectStorageName').change(function() { - var selectedItem = $(this).parent().parent(); - var tempStorageNameFull = $(this).val(); - var tempName = tempStorageNameFull.split('/'); - var tempStorageName = tempName[0]; - $('.template-storage-name').val(tempStorageNameFull); - kimchi.getStoragePool(tempStorageName, function(info) { - tempType = info.type; - selectedItem.find('.template-storage-type').val(tempType); - if (tempType === 'iscsi' || tempType === 'scsi') { - kimchi.getStoragePoolVolume(tempStorageName, tempName[tempName.length-1], function(info) { - volSize = info.capacity / Math.pow(1024, 3); - $('.template-storage-disk', selectedItem).attr('readonly', true).val(volSize); - if (!isImageBasedTemplate()) { - $('#diskFormat').val('raw'); - $('#diskFormat').prop('disabled', true).change(); - } - }); - } else if (tempType === 'logical') { - $('.template-storage-disk', selectedItem).attr('readonly', false); - if (!isImageBasedTemplate()) { - $('#diskFormat').val('raw'); - $('#diskFormat').prop('disabled', true).change(); - } - } else { - $('.template-storage-disk', selectedItem).attr('readonly', false); - if ($('#diskFormat').prop('disabled') == true && - !isImageBasedTemplate()) { - $('#diskFormat').val('qcow2'); - $('#diskFormat').prop('disabled', false).change(); - } - } - }); + $('.delete', '#form-template-storage').on( "click",function(event) { + event.preventDefault(); + $(this).parent().parent().remove(); }); - }; - if ((origDisks && origDisks.length) && (origPool && origPool.length)) { - splitPool = origPool.split('/'); - var defaultPool = splitPool[splitPool.length-1]; - var defaultType; - - kimchi.getStoragePool(defaultPool, function(info) { - defaultType = info.type; - $.each(origDisks, function(index, diskEntities) { - var storageNodeData = { - viewMode : '', - editMode : 'hide', - storageName : defaultPool, - storageType : defaultType, - storageDisk : diskEntities.size, - storageDiskFormat : diskEntities.format ? diskEntities.format : 'qcow2' + $(storageRow + ' #selectStorageName').change(function() { + var poolType = storagePoolsInfo[$(this).val()].type; + $(storageRow + ' .template-storage-name').val($(this).val()); + $(storageRow + ' .template-storage-type').val(poolType); + if (poolType === 'iscsi' || poolType === 'scsi') { + $(storageRow + ' .template-storage-disk').attr('readonly', true).prop('disabled', true).val(storagePoolsInfo[$(this).val()].volSize); + if (!isImageBasedTemplate()) { + $(storageRow + ' #diskFormat').val('raw').prop('disabled', true).change(); } - - if (diskEntities.volume) { - kimchi.getStoragePoolVolume(defaultPool, diskEntities.volume, function(info) { - var volSize = info.capacity / Math.pow(1024, 3); - var nodeData = storageNodeData - nodeData.storageName = defaultPool + '/' + diskEntities.volume; - nodeData.storageDisk = volSize; - addStorageItem(nodeData); - $('.template-storage-disk').attr('readonly', true); - $('#diskFormat').val('raw'); - $('#diskFormat').prop('disabled', true).change(); - }); - } else if (defaultType === 'logical') { - addStorageItem(storageNodeData); - $('#diskFormat').val('raw'); - $('#diskFormat').prop('disabled', true).change(); - } else { - addStorageItem(storageNodeData); + } else if (poolType === 'logical') { + $(storageRow + ' .template-storage-disk').attr('readonly', false).prop('disabled', false); + if (!isImageBasedTemplate()) { + $(storageRow + ' #diskFormat').val('raw').prop('disabled', true).change(); } - }); + } else { + $(storageRow + ' .template-storage-disk').attr('readonly', false).prop('disabled', false); + if ($(storageRow + ' #diskFormat').prop('disabled') == true && !isImageBasedTemplate()) { + $(storageRow + ' #diskFormat').val('qcow2').prop('disabled', false).change(); + } + } + $(storageRow + ' #diskFormat').selectpicker('refresh'); + }); + }; // End of addStorageItem funtion + + if ((origDisks && origDisks.length) && (origPool && origPool.length)) { + origDisks.sort(function(a, b){return a.index-b.index}); + $.each(origDisks, function(index, diskEntities) { + var defaultPool = diskEntities.pool.name ? diskEntities.pool.name.split('/').pop() : origPool.split('/').pop() + var storageNodeData = { + storageIndex : diskEntities.index, + storageName : diskEntities.volume ? defaultPool + '/' + diskEntities.volume : defaultPool, + storageType : diskEntities.pool.type, + storageDisk : diskEntities.size, + storageDiskFormat : diskEntities.format ? diskEntities.format : 'qcow2', + storageVolume : diskEntities.volume + } + addStorageItem(storageNodeData); }); } + var storageID = origDisks.length -1; $('#template-edit-storage-add-button').on("click", function(event) { event.preventDefault(); + storageID = storageID + 1; var storageNodeData = { - viewMode : 'hide', - editMode : '', - storageName : 'null', + storageName : 'default', storageType : 'dir', - storageDisk : '10' + storageDisk : '10', + storageDiskFormat : 'qcow2', + storageIndex : storageID } addStorageItem(storageNodeData); }); }; + var initInterface = function(result) { var networkItemNum = 0; var addInterfaceItem = function(networkData) { @@ -235,6 +232,7 @@ kimchi.template_edit_main = function() { }); }); }; + var initProcessor = function(){ var setCPUValue = function(){ if(!$('#cores').hasClass("invalid-field")&&$('#cores').val()!=""){ @@ -278,36 +276,30 @@ kimchi.template_edit_main = function() { }; kimchi.retrieveTemplate(kimchi.selectedTemplate, initTemplate); - $('#tmpl-edit-button-save').on('click', function() { - var editableFields = [ 'name', 'memory', 'disks', 'graphics']; + var editableFields = [ 'name', 'memory', 'graphics']; var data = {}; - //Fix me: Only support one storage pool now - var storages = $('.template-tab-body .item', '#form-template-storage'); - var tempName = $('.template-storage-name', storages).val(); - var tmpItem = $('#form-template-storage .item'); - tempName = tempName.split('/'); - var tempNameHead = tempName[0]; - var tempNameTail = tempNameHead; - if($('.template-storage-type', tmpItem).val() === 'iscsi' || $('.template-storage-type', tmpItem).val() == 'scsi') { - tempNameTail = tempName[tempName.length-1]; - } - tempName = '/plugins/kimchi/storagepools/' + tempNameHead; - data['storagepool'] = tempName; - $.each(editableFields, function(i, field) { - /* Support only 1 disk at this moment */ - if (field == 'disks') { - if($('.template-storage-type', tmpItem).val() === 'iscsi' || $('.template-storage-type', tmpItem).val() == 'scsi') { - origDisks[0]['size'] && delete origDisks[0]['size']; - origDisks[0]['volume'] = tempNameTail; - } else { - origDisks[0]['volume'] && delete origDisks[0]['volume']; - origDisks[0].size = Number($('.template-storage-disk', tmpItem).val()); - } - origDisks[0].format = $('.template-storage-disk-format', tmpItem).val(); - data[field] = origDisks; + var disks = $('.template-tab-body .item', '#form-template-storage'); + var disksForUpdate = new Array(); + $.each(disks, function(index, diskEntity) { + var newDisk = { + 'index' : index, + 'pool' : '/plugins/kimchi/storagepools/' + $(diskEntity).find('.template-storage-name').val(), + 'size' : Number($(diskEntity).find('.template-storage-disk').val()), + 'format' : $(diskEntity).find('.template-storage-disk-format').val() + }; + + var storageType = $(diskEntity).find('.template-storage-type').val(); + if(storageType === 'iscsi' || storageType === 'scsi') { + newDisk['volume'] = newDisk['pool'].split('/').pop(); + newDisk['pool'] = newDisk['pool'].slice(0, newDisk['pool'].lastIndexOf('/')); } - else if (field == 'graphics') { + disksForUpdate.push(newDisk); + }); + data.disks = disksForUpdate; + + $.each(editableFields, function(i, field) { + if (field == 'graphics') { var type = $('#form-template-general [name="' + field + '"]').val(); data[field] = {'type': type}; } diff --git a/src/wok/plugins/kimchi/ui/pages/template-edit.html.tmpl b/src/wok/plugins/kimchi/ui/pages/template-edit.html.tmpl index 59a2cfa..dc6b762 100644 --- a/src/wok/plugins/kimchi/ui/pages/template-edit.html.tmpl +++ b/src/wok/plugins/kimchi/ui/pages/template-edit.html.tmpl @@ -156,7 +156,7 @@ kimchi.template_edit_main(); </script> <script id="template-storage-pool-tmpl" type="text/html"> - <div class='item'> + <div id="storageRow{storageIndex}" class='item'> <span class="template-storage-cell storage-pool"> <input class="template-storage-name" value={storageName} type="text" style="display:none" /> <select id="selectStorageName"></select> @@ -182,6 +182,9 @@ <option value="vpc">vpc</option> </select> </span> + <span class="pull-right"> + <button class="delete btn-primary btn"><i class="fa fa-minus-circle"></i> $_("Delete")</button> + </span> </div> </script> <script id="template-interface-tmpl" type="text/html"> -- 2.1.0

This patch fixes problems in test cases after change the way Kimchi updates the disks in the Templates. Now disks have the storagepool assigned in its data. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/mockmodel.py | 5 ++- src/wok/plugins/kimchi/tests/test_model.py | 13 ++++++++ src/wok/plugins/kimchi/tests/test_rest.py | 5 ++- src/wok/plugins/kimchi/tests/test_template.py | 44 ++++++++++++++++++++++----- 4 files changed, 58 insertions(+), 9 deletions(-) diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index 9186f78..f31a297 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -46,6 +46,7 @@ from wok.plugins.kimchi.model import storagevolumes from wok.plugins.kimchi.model.templates import LibvirtVMTemplate from wok.plugins.kimchi.model.users import PAMUsersModel from wok.plugins.kimchi.model.groups import PAMGroupsModel +from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.vmtemplate import VMTemplate @@ -72,6 +73,7 @@ class MockModel(Model): # test:///default driver defaults = dict(osinfo.defaults) defaults.update(mockmodel_defaults) + defaults['disks'][0]['pool'] = defaults['storagepool'] osinfo.defaults = dict(defaults) self._mock_vgs = MockVolumeGroups() @@ -248,7 +250,8 @@ class MockModel(Model): def _probe_image(self, path): return ('unknown', 'unknown') - def _get_volume_path(self, pool, vol): + def _get_volume_path(self, pool_uri, vol): + pool = pool_name_from_uri(pool_uri) pool_info = self.storagepool_lookup(pool) if pool_info['type'] == 'scsi': return self._mock_storagevolumes.scsi_volumes[vol]['path'] diff --git a/src/wok/plugins/kimchi/tests/test_model.py b/src/wok/plugins/kimchi/tests/test_model.py index 65d97f2..2d59594 100644 --- a/src/wok/plugins/kimchi/tests/test_model.py +++ b/src/wok/plugins/kimchi/tests/test_model.py @@ -76,6 +76,16 @@ def tearDownModule(): shutil.rmtree(TMP_DIR) +def _setDiskPoolDefault(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default' + + +def _setDiskPoolDefaultTest(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default-pool' + + class ModelTests(unittest.TestCase): def setUp(self): self.tmp_store = '/tmp/kimchi-store-test' @@ -1051,6 +1061,9 @@ class ModelTests(unittest.TestCase): 'arch': 'i686' } + _setDiskPoolDefaultTest() + rollback.prependDefer(_setDiskPoolDefault) + inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 544f2e6..34348b8 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -220,6 +220,8 @@ class RestTests(unittest.TestCase): vm = json.loads( self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read() ) + # Memory was hot plugged + params['memory'] += 1024 for key in params.keys(): self.assertEquals(params[key], vm[key]) @@ -935,7 +937,8 @@ class RestTests(unittest.TestCase): ) lun_name = json.loads(resp.read())[0]['name'] - tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] + tmpl_params['disks'] = [{'index': 0, 'volume': lun_name, + 'pool': tmpl_params['storagepool']}] req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') self.assertEquals(201, resp.status) diff --git a/src/wok/plugins/kimchi/tests/test_template.py b/src/wok/plugins/kimchi/tests/test_template.py index 3f026f3..3caad93 100644 --- a/src/wok/plugins/kimchi/tests/test_template.py +++ b/src/wok/plugins/kimchi/tests/test_template.py @@ -25,7 +25,6 @@ from functools import partial from tests.utils import get_free_port, patch_auth, request, run_server -from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.mockmodel import MockModel @@ -37,6 +36,8 @@ port = None ssl_port = None cherrypy_port = None +DEFAULT_POOL = u'/plugins/kimchi/storagepools/default-pool' + def setUpModule(): global test_server, model, host, port, ssl_port, cherrypy_port @@ -89,9 +90,11 @@ class TemplateTests(unittest.TestCase): ) self.assertEquals(sorted(tmpl.keys()), sorted(keys)) - # Verify if default disk format was configured - default_disk_format = osinfo.defaults['disks'][0]['format'] - self.assertEquals(tmpl['disks'][0]['format'], default_disk_format) + disk_keys = ['index', 'pool', 'size', 'format'] + disk_pool_keys = ['name', 'type'] + self.assertEquals(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) + self.assertEquals(sorted(tmpl['disks'][0]['pool'].keys()), + sorted(disk_pool_keys)) # Clone a template resp = self.request('/plugins/kimchi/templates/test/clone', '{}', @@ -212,13 +215,20 @@ class TemplateTests(unittest.TestCase): self.assertEquals(update_tmpl['cdrom'], cdrom_data['cdrom']) # Update disks - disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw'}, - {'index': 1, 'size': 20, 'format': 'raw'}]} + disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw', + 'pool': DEFAULT_POOL}, + {'index': 1, 'size': 20, 'format': 'qcow2', + 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status) resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} + + disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks']) # For all supported types, edit the template and check if @@ -227,13 +237,15 @@ class TemplateTests(unittest.TestCase): 'qed', 'raw', 'vmdk', 'vpc'] for disk_type in disk_types: disk_data = {'disks': [{'index': 0, 'format': disk_type, - 'size': 10}]} + 'size': 10, 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status) resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {u'name': DEFAULT_POOL, + u'type': u'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks']) # Update folder @@ -348,6 +360,24 @@ class TemplateTests(unittest.TestCase): 'PUT') self.assertEquals(200, resp.status) + # Test disk template update with different pool + pool_uri = u'/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' + disk_data = {'disks': [{'size': 5, 'format': 'qcow2', + 'pool': pool_uri}]} + req = json.dumps(disk_data) + resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') + self.assertEquals(200, resp.status) + del(disk_data['disks'][0]['pool']) + disk_data['disks'][0]['index'] = 0 + disk_data['disks'][0]['pool'] = {u'name': pool_uri, + u'type': u'dir'} + tmpl = json.loads( + self.request('/plugins/kimchi/templates/test').read()) + self.assertEquals(sorted(disk_data['disks'][0].keys()), + sorted(tmpl['disks'][0].keys())) + self.assertEquals(sorted(disk_data['disks'][0].values()), + sorted(tmpl['disks'][0].values())) + def test_tmpl_integrity(self): # Create a network and a pool for testing template integrity net = {'name': u'nat-network', 'connection': 'nat'} -- 2.1.0

On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes problems in test cases after change the way Kimchi updates the disks in the Templates. Now disks have the storagepool assigned in its data.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/mockmodel.py | 5 ++- src/wok/plugins/kimchi/tests/test_model.py | 13 ++++++++ src/wok/plugins/kimchi/tests/test_rest.py | 5 ++- src/wok/plugins/kimchi/tests/test_template.py | 44 ++++++++++++++++++++++----- 4 files changed, 58 insertions(+), 9 deletions(-)
diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index 9186f78..f31a297 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -46,6 +46,7 @@ from wok.plugins.kimchi.model import storagevolumes from wok.plugins.kimchi.model.templates import LibvirtVMTemplate from wok.plugins.kimchi.model.users import PAMUsersModel from wok.plugins.kimchi.model.groups import PAMGroupsModel +from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.vmtemplate import VMTemplate
@@ -72,6 +73,7 @@ class MockModel(Model): # test:///default driver defaults = dict(osinfo.defaults) defaults.update(mockmodel_defaults) + defaults['disks'][0]['pool'] = defaults['storagepool']
The former storagepool information should be removed as each volume has its own pool information and the template.conf updated accordingly. [[disk.0]] # Disk size in GB #size = 10 # Disk format #format = qcow2 # Pool pool = default
osinfo.defaults = dict(defaults)
self._mock_vgs = MockVolumeGroups() @@ -248,7 +250,8 @@ class MockModel(Model): def _probe_image(self, path): return ('unknown', 'unknown')
- def _get_volume_path(self, pool, vol): + def _get_volume_path(self, pool_uri, vol): + pool = pool_name_from_uri(pool_uri) pool_info = self.storagepool_lookup(pool) if pool_info['type'] == 'scsi': return self._mock_storagevolumes.scsi_volumes[vol]['path'] diff --git a/src/wok/plugins/kimchi/tests/test_model.py b/src/wok/plugins/kimchi/tests/test_model.py index 65d97f2..2d59594 100644 --- a/src/wok/plugins/kimchi/tests/test_model.py +++ b/src/wok/plugins/kimchi/tests/test_model.py @@ -76,6 +76,16 @@ def tearDownModule(): shutil.rmtree(TMP_DIR)
+def _setDiskPoolDefault(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default' + + +def _setDiskPoolDefaultTest(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default-pool' + + class ModelTests(unittest.TestCase): def setUp(self): self.tmp_store = '/tmp/kimchi-store-test' @@ -1051,6 +1061,9 @@ class ModelTests(unittest.TestCase): 'arch': 'i686' }
+ _setDiskPoolDefaultTest() + rollback.prependDefer(_setDiskPoolDefault) + inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test')
diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 544f2e6..34348b8 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -220,6 +220,8 @@ class RestTests(unittest.TestCase): vm = json.loads( self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read() ) + # Memory was hot plugged + params['memory'] += 1024 for key in params.keys(): self.assertEquals(params[key], vm[key])
@@ -935,7 +937,8 @@ class RestTests(unittest.TestCase): ) lun_name = json.loads(resp.read())[0]['name']
- tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] + tmpl_params['disks'] = [{'index': 0, 'volume': lun_name, + 'pool': tmpl_params['storagepool']}] req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') self.assertEquals(201, resp.status) diff --git a/src/wok/plugins/kimchi/tests/test_template.py b/src/wok/plugins/kimchi/tests/test_template.py index 3f026f3..3caad93 100644 --- a/src/wok/plugins/kimchi/tests/test_template.py +++ b/src/wok/plugins/kimchi/tests/test_template.py @@ -25,7 +25,6 @@ from functools import partial
from tests.utils import get_free_port, patch_auth, request, run_server
-from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.mockmodel import MockModel
@@ -37,6 +36,8 @@ port = None ssl_port = None cherrypy_port = None
+DEFAULT_POOL = u'/plugins/kimchi/storagepools/default-pool' +
def setUpModule(): global test_server, model, host, port, ssl_port, cherrypy_port @@ -89,9 +90,11 @@ class TemplateTests(unittest.TestCase): ) self.assertEquals(sorted(tmpl.keys()), sorted(keys))
- # Verify if default disk format was configured - default_disk_format = osinfo.defaults['disks'][0]['format'] - self.assertEquals(tmpl['disks'][0]['format'], default_disk_format) + disk_keys = ['index', 'pool', 'size', 'format'] + disk_pool_keys = ['name', 'type'] + self.assertEquals(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) + self.assertEquals(sorted(tmpl['disks'][0]['pool'].keys()), + sorted(disk_pool_keys))
# Clone a template resp = self.request('/plugins/kimchi/templates/test/clone', '{}', @@ -212,13 +215,20 @@ class TemplateTests(unittest.TestCase): self.assertEquals(update_tmpl['cdrom'], cdrom_data['cdrom'])
# Update disks - disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw'}, - {'index': 1, 'size': 20, 'format': 'raw'}]} + disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw', + 'pool': DEFAULT_POOL}, + {'index': 1, 'size': 20, 'format': 'qcow2', + 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status) resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} + + disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# For all supported types, edit the template and check if @@ -227,13 +237,15 @@ class TemplateTests(unittest.TestCase): 'qed', 'raw', 'vmdk', 'vpc'] for disk_type in disk_types: disk_data = {'disks': [{'index': 0, 'format': disk_type, - 'size': 10}]} + 'size': 10, 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status)
resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {u'name': DEFAULT_POOL, + u'type': u'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# Update folder @@ -348,6 +360,24 @@ class TemplateTests(unittest.TestCase): 'PUT') self.assertEquals(200, resp.status)
+ # Test disk template update with different pool + pool_uri = u'/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' + disk_data = {'disks': [{'size': 5, 'format': 'qcow2', + 'pool': pool_uri}]} + req = json.dumps(disk_data) + resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') + self.assertEquals(200, resp.status) + del(disk_data['disks'][0]['pool']) + disk_data['disks'][0]['index'] = 0 + disk_data['disks'][0]['pool'] = {u'name': pool_uri, + u'type': u'dir'} + tmpl = json.loads( + self.request('/plugins/kimchi/templates/test').read()) + self.assertEquals(sorted(disk_data['disks'][0].keys()), + sorted(tmpl['disks'][0].keys())) + self.assertEquals(sorted(disk_data['disks'][0].values()), + sorted(tmpl['disks'][0].values())) + def test_tmpl_integrity(self): # Create a network and a pool for testing template integrity net = {'name': u'nat-network', 'connection': 'nat'}

comments below :) On 11/30/2015 09:17 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes problems in test cases after change the way Kimchi updates the disks in the Templates. Now disks have the storagepool assigned in its data.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/mockmodel.py | 5 ++- src/wok/plugins/kimchi/tests/test_model.py | 13 ++++++++ src/wok/plugins/kimchi/tests/test_rest.py | 5 ++- src/wok/plugins/kimchi/tests/test_template.py | 44 ++++++++++++++++++++++----- 4 files changed, 58 insertions(+), 9 deletions(-)
diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index 9186f78..f31a297 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -46,6 +46,7 @@ from wok.plugins.kimchi.model import storagevolumes from wok.plugins.kimchi.model.templates import LibvirtVMTemplate from wok.plugins.kimchi.model.users import PAMUsersModel from wok.plugins.kimchi.model.groups import PAMGroupsModel +from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.vmtemplate import VMTemplate
@@ -72,6 +73,7 @@ class MockModel(Model): # test:///default driver defaults = dict(osinfo.defaults) defaults.update(mockmodel_defaults) + defaults['disks'][0]['pool'] = defaults['storagepool']
The former storagepool information should be removed as each volume has its own pool information and the template.conf updated accordingly.
[[disk.0]] # Disk size in GB #size = 10
# Disk format #format = qcow2
# Pool pool = default
oh, I forgot to add this information. But I would keep "storagepool" as a default pool, when "pool" is not given. Also, we may need to change Templates.conf organization. Today is: [storagepool] [[disk.0]] [[disk.1]] [[disk.2]] ... It would become: [storagepool] [disks] [[disk.0]] [[disk.1]] [[disk.2]] ... So, if pool is not passed inside disk.X, storagepool value is used
osinfo.defaults = dict(defaults)
self._mock_vgs = MockVolumeGroups() @@ -248,7 +250,8 @@ class MockModel(Model): def _probe_image(self, path): return ('unknown', 'unknown')
- def _get_volume_path(self, pool, vol): + def _get_volume_path(self, pool_uri, vol): + pool = pool_name_from_uri(pool_uri) pool_info = self.storagepool_lookup(pool) if pool_info['type'] == 'scsi': return self._mock_storagevolumes.scsi_volumes[vol]['path'] diff --git a/src/wok/plugins/kimchi/tests/test_model.py b/src/wok/plugins/kimchi/tests/test_model.py index 65d97f2..2d59594 100644 --- a/src/wok/plugins/kimchi/tests/test_model.py +++ b/src/wok/plugins/kimchi/tests/test_model.py @@ -76,6 +76,16 @@ def tearDownModule(): shutil.rmtree(TMP_DIR)
+def _setDiskPoolDefault(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default' + + +def _setDiskPoolDefaultTest(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default-pool' + + class ModelTests(unittest.TestCase): def setUp(self): self.tmp_store = '/tmp/kimchi-store-test' @@ -1051,6 +1061,9 @@ class ModelTests(unittest.TestCase): 'arch': 'i686' }
+ _setDiskPoolDefaultTest() + rollback.prependDefer(_setDiskPoolDefault) + inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test')
diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 544f2e6..34348b8 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -220,6 +220,8 @@ class RestTests(unittest.TestCase): vm = json.loads( self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read() ) + # Memory was hot plugged + params['memory'] += 1024 for key in params.keys(): self.assertEquals(params[key], vm[key])
@@ -935,7 +937,8 @@ class RestTests(unittest.TestCase): ) lun_name = json.loads(resp.read())[0]['name']
- tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] + tmpl_params['disks'] = [{'index': 0, 'volume': lun_name, + 'pool': tmpl_params['storagepool']}] req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') self.assertEquals(201, resp.status) diff --git a/src/wok/plugins/kimchi/tests/test_template.py b/src/wok/plugins/kimchi/tests/test_template.py index 3f026f3..3caad93 100644 --- a/src/wok/plugins/kimchi/tests/test_template.py +++ b/src/wok/plugins/kimchi/tests/test_template.py @@ -25,7 +25,6 @@ from functools import partial
from tests.utils import get_free_port, patch_auth, request, run_server
-from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.mockmodel import MockModel
@@ -37,6 +36,8 @@ port = None ssl_port = None cherrypy_port = None
+DEFAULT_POOL = u'/plugins/kimchi/storagepools/default-pool' +
def setUpModule(): global test_server, model, host, port, ssl_port, cherrypy_port @@ -89,9 +90,11 @@ class TemplateTests(unittest.TestCase): ) self.assertEquals(sorted(tmpl.keys()), sorted(keys))
- # Verify if default disk format was configured - default_disk_format = osinfo.defaults['disks'][0]['format'] - self.assertEquals(tmpl['disks'][0]['format'], default_disk_format) + disk_keys = ['index', 'pool', 'size', 'format'] + disk_pool_keys = ['name', 'type'] + self.assertEquals(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) + self.assertEquals(sorted(tmpl['disks'][0]['pool'].keys()), + sorted(disk_pool_keys))
# Clone a template resp = self.request('/plugins/kimchi/templates/test/clone', '{}', @@ -212,13 +215,20 @@ class TemplateTests(unittest.TestCase): self.assertEquals(update_tmpl['cdrom'], cdrom_data['cdrom'])
# Update disks - disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw'}, - {'index': 1, 'size': 20, 'format': 'raw'}]} + disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw', + 'pool': DEFAULT_POOL}, + {'index': 1, 'size': 20, 'format': 'qcow2', + 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status) resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} + + disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# For all supported types, edit the template and check if @@ -227,13 +237,15 @@ class TemplateTests(unittest.TestCase): 'qed', 'raw', 'vmdk', 'vpc'] for disk_type in disk_types: disk_data = {'disks': [{'index': 0, 'format': disk_type, - 'size': 10}]} + 'size': 10, 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status)
resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {u'name': DEFAULT_POOL, + u'type': u'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# Update folder @@ -348,6 +360,24 @@ class TemplateTests(unittest.TestCase): 'PUT') self.assertEquals(200, resp.status)
+ # Test disk template update with different pool + pool_uri = u'/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' + disk_data = {'disks': [{'size': 5, 'format': 'qcow2', + 'pool': pool_uri}]} + req = json.dumps(disk_data) + resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') + self.assertEquals(200, resp.status) + del(disk_data['disks'][0]['pool']) + disk_data['disks'][0]['index'] = 0 + disk_data['disks'][0]['pool'] = {u'name': pool_uri, + u'type': u'dir'} + tmpl = json.loads( + self.request('/plugins/kimchi/templates/test').read()) + self.assertEquals(sorted(disk_data['disks'][0].keys()), + sorted(tmpl['disks'][0].keys())) + self.assertEquals(sorted(disk_data['disks'][0].values()), + sorted(tmpl['disks'][0].values())) + def test_tmpl_integrity(self): # Create a network and a pool for testing template integrity net = {'name': u'nat-network', 'connection': 'nat'}
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 30/11/2015 11:48, Rodrigo Trujillo wrote:
comments below :)
On 11/30/2015 09:17 AM, Aline Manera wrote:
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
This patch fixes problems in test cases after change the way Kimchi updates the disks in the Templates. Now disks have the storagepool assigned in its data.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/mockmodel.py | 5 ++- src/wok/plugins/kimchi/tests/test_model.py | 13 ++++++++ src/wok/plugins/kimchi/tests/test_rest.py | 5 ++- src/wok/plugins/kimchi/tests/test_template.py | 44 ++++++++++++++++++++++----- 4 files changed, 58 insertions(+), 9 deletions(-)
diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index 9186f78..f31a297 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -46,6 +46,7 @@ from wok.plugins.kimchi.model import storagevolumes from wok.plugins.kimchi.model.templates import LibvirtVMTemplate from wok.plugins.kimchi.model.users import PAMUsersModel from wok.plugins.kimchi.model.groups import PAMGroupsModel +from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.vmtemplate import VMTemplate
@@ -72,6 +73,7 @@ class MockModel(Model): # test:///default driver defaults = dict(osinfo.defaults) defaults.update(mockmodel_defaults) + defaults['disks'][0]['pool'] = defaults['storagepool']
The former storagepool information should be removed as each volume has its own pool information and the template.conf updated accordingly.
[[disk.0]] # Disk size in GB #size = 10
# Disk format #format = qcow2
# Pool pool = default
oh, I forgot to add this information. But I would keep "storagepool" as a default pool, when "pool" is not given.
Also, we may need to change Templates.conf organization.
Today is: [storagepool] [[disk.0]]
[[disk.1]]
[[disk.2]] ...
It would become: [storagepool]
[disks] [[disk.0]] [[disk.1]] [[disk.2]] ...
So, if pool is not passed inside disk.X, storagepool value is used
Nope! The default pool is listed by each volume. It does not make sense to keep the former pool.
osinfo.defaults = dict(defaults)
self._mock_vgs = MockVolumeGroups() @@ -248,7 +250,8 @@ class MockModel(Model): def _probe_image(self, path): return ('unknown', 'unknown')
- def _get_volume_path(self, pool, vol): + def _get_volume_path(self, pool_uri, vol): + pool = pool_name_from_uri(pool_uri) pool_info = self.storagepool_lookup(pool) if pool_info['type'] == 'scsi': return self._mock_storagevolumes.scsi_volumes[vol]['path'] diff --git a/src/wok/plugins/kimchi/tests/test_model.py b/src/wok/plugins/kimchi/tests/test_model.py index 65d97f2..2d59594 100644 --- a/src/wok/plugins/kimchi/tests/test_model.py +++ b/src/wok/plugins/kimchi/tests/test_model.py @@ -76,6 +76,16 @@ def tearDownModule(): shutil.rmtree(TMP_DIR)
+def _setDiskPoolDefault(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default' + + +def _setDiskPoolDefaultTest(): + osinfo.defaults['disks'][0]['pool'] = \ + '/plugins/kimchi/storagepools/default-pool' + + class ModelTests(unittest.TestCase): def setUp(self): self.tmp_store = '/tmp/kimchi-store-test' @@ -1051,6 +1061,9 @@ class ModelTests(unittest.TestCase): 'arch': 'i686' }
+ _setDiskPoolDefaultTest() + rollback.prependDefer(_setDiskPoolDefault) + inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test')
diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 544f2e6..34348b8 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -220,6 +220,8 @@ class RestTests(unittest.TestCase): vm = json.loads( self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read() ) + # Memory was hot plugged + params['memory'] += 1024 for key in params.keys(): self.assertEquals(params[key], vm[key])
@@ -935,7 +937,8 @@ class RestTests(unittest.TestCase): ) lun_name = json.loads(resp.read())[0]['name']
- tmpl_params['disks'] = [{'index': 0, 'volume': lun_name}] + tmpl_params['disks'] = [{'index': 0, 'volume': lun_name, + 'pool': tmpl_params['storagepool']}] req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') self.assertEquals(201, resp.status) diff --git a/src/wok/plugins/kimchi/tests/test_template.py b/src/wok/plugins/kimchi/tests/test_template.py index 3f026f3..3caad93 100644 --- a/src/wok/plugins/kimchi/tests/test_template.py +++ b/src/wok/plugins/kimchi/tests/test_template.py @@ -25,7 +25,6 @@ from functools import partial
from tests.utils import get_free_port, patch_auth, request, run_server
-from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.mockmodel import MockModel
@@ -37,6 +36,8 @@ port = None ssl_port = None cherrypy_port = None
+DEFAULT_POOL = u'/plugins/kimchi/storagepools/default-pool' +
def setUpModule(): global test_server, model, host, port, ssl_port, cherrypy_port @@ -89,9 +90,11 @@ class TemplateTests(unittest.TestCase): ) self.assertEquals(sorted(tmpl.keys()), sorted(keys))
- # Verify if default disk format was configured - default_disk_format = osinfo.defaults['disks'][0]['format'] - self.assertEquals(tmpl['disks'][0]['format'], default_disk_format) + disk_keys = ['index', 'pool', 'size', 'format'] + disk_pool_keys = ['name', 'type'] + self.assertEquals(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) + self.assertEquals(sorted(tmpl['disks'][0]['pool'].keys()), + sorted(disk_pool_keys))
# Clone a template resp = self.request('/plugins/kimchi/templates/test/clone', '{}', @@ -212,13 +215,20 @@ class TemplateTests(unittest.TestCase): self.assertEquals(update_tmpl['cdrom'], cdrom_data['cdrom'])
# Update disks - disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw'}, - {'index': 1, 'size': 20, 'format': 'raw'}]} + disk_data = {'disks': [{'index': 0, 'size': 10, 'format': 'raw', + 'pool': DEFAULT_POOL}, + {'index': 1, 'size': 20, 'format': 'qcow2', + 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status) resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} + + disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, + 'type': 'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# For all supported types, edit the template and check if @@ -227,13 +237,15 @@ class TemplateTests(unittest.TestCase): 'qed', 'raw', 'vmdk', 'vpc'] for disk_type in disk_types: disk_data = {'disks': [{'index': 0, 'format': disk_type, - 'size': 10}]} + 'size': 10, 'pool': DEFAULT_POOL}]} resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') self.assertEquals(200, resp.status)
resp = self.request(new_tmpl_uri) self.assertEquals(200, resp.status) updated_tmpl = json.loads(resp.read()) + disk_data['disks'][0]['pool'] = {u'name': DEFAULT_POOL, + u'type': u'dir'} self.assertEquals(updated_tmpl['disks'], disk_data['disks'])
# Update folder @@ -348,6 +360,24 @@ class TemplateTests(unittest.TestCase): 'PUT') self.assertEquals(200, resp.status)
+ # Test disk template update with different pool + pool_uri = u'/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' + disk_data = {'disks': [{'size': 5, 'format': 'qcow2', + 'pool': pool_uri}]} + req = json.dumps(disk_data) + resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') + self.assertEquals(200, resp.status) + del(disk_data['disks'][0]['pool']) + disk_data['disks'][0]['index'] = 0 + disk_data['disks'][0]['pool'] = {u'name': pool_uri, + u'type': u'dir'} + tmpl = json.loads( + self.request('/plugins/kimchi/templates/test').read()) + self.assertEquals(sorted(disk_data['disks'][0].keys()), + sorted(tmpl['disks'][0].keys())) + self.assertEquals(sorted(disk_data['disks'][0].values()), + sorted(tmpl['disks'][0].values())) + def test_tmpl_integrity(self): # Create a network and a pool for testing template integrity net = {'name': u'nat-network', 'connection': 'nat'}
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

Currently, the VCPU count is updated by editing the configuration XML (tag: <vcpu>). However, in some cases, editing that tag will only update the maximum VCPU count, not the current one. For example, if the VM has the following XML tag: <vcpu current='2'>8</vcpu> and the user updates the VCPU value to 10, Kimchi will update the XML tag to: <vcpu current='2'>10</vcpu> Use the libvirt function "setVcpusFlags" to update the current and the maximum VCPU values to the one specified by the user. * This patch also changes vcpu setting when guest is being created. Now 'current' is always set and is the total of cpus from the Template and maxVcpu is the >= 255 or (sockets * cores * threads) in PPC. In x86 it is the value of libvirt getMaxVcpu. * Change NUMA management. Sets all cpus to 0. There is not impact to the guest. - Fixes VM cpu/memory offline update Signed-off-by: Aline Manera <alinefm@linux.vnet.ibm.com> Signed-off-by: Crístian Deives <cristiandeives@gmail.com> Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 3 + src/wok/plugins/kimchi/mockmodel.py | 52 ++++++-------- src/wok/plugins/kimchi/model/vms.py | 116 ++++++++++++++++++++++++++---- src/wok/plugins/kimchi/tests/test_rest.py | 10 ++- src/wok/plugins/kimchi/vmtemplate.py | 25 ++++++- 5 files changed, 157 insertions(+), 49 deletions(-) diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index b18048c..2f898c6 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -129,6 +129,9 @@ messages = { "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."), "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."), + "KCHVM0073E": _("Unable to update the following parameters while the VM is offline: %(params)s"), + "KCHVM0074E": _("Unable to update the following parameters while the VM is online: %(params)s"), + "KCHVM0075E": _("Cannot change VCPU value because '%(vm)s' has a topology defined - sockets: %(sockets)s, cores: %(cores)s, threads: %(threads)s."), "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index f31a297..31533e6 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -21,6 +21,8 @@ import libvirt import lxml.etree as ET import os import time + +from collections import defaultdict from lxml import objectify from lxml.builder import E @@ -61,10 +63,9 @@ storagevolumes.VALID_RAW_CONTENT = ['dos/mbr boot sector', class MockModel(Model): - _mock_vms = {} + _mock_vms = defaultdict(list) _mock_snapshots = {} _XMLDesc = libvirt.virDomain.XMLDesc - _defineXML = libvirt.virConnect.defineXML _undefineDomain = libvirt.virDomain.undefine _libvirt_get_vol_path = LibvirtVMTemplate._get_volume_path @@ -83,7 +84,6 @@ class MockModel(Model): cpuinfo.get_topo_capabilities = MockModel.get_topo_capabilities vmifaces.getDHCPLeases = MockModel.getDHCPLeases - libvirt.virConnect.defineXML = MockModel.domainDefineXML libvirt.virDomain.XMLDesc = MockModel.domainXMLDesc libvirt.virDomain.undefine = MockModel.undefineDomain libvirt.virDomain.attachDeviceFlags = MockModel.attachDeviceFlags @@ -126,7 +126,7 @@ class MockModel(Model): imageinfo.probe_image = self._probe_image def reset(self): - MockModel._mock_vms = {} + MockModel._mock_vms = defaultdict(list) MockModel._mock_snapshots = {} if hasattr(self, 'objstore'): @@ -159,21 +159,14 @@ class MockModel(Model): return ET.fromstring(xml) @staticmethod - def domainDefineXML(conn, xml): - name = objectify.fromstring(xml).name.text - try: - dom = conn.lookupByName(name) - if not dom.isActive(): - MockModel._mock_vms[name] = xml - except: - pass - - return MockModel._defineXML(conn, xml) - - @staticmethod def domainXMLDesc(dom, flags=0): - return MockModel._mock_vms.get(dom.name(), - MockModel._XMLDesc(dom, flags)) + xml = MockModel._XMLDesc(dom, flags) + root = objectify.fromstring(xml) + + for dev_xml in MockModel._mock_vms.get(dom.name(), []): + dev = objectify.fromstring(dev_xml) + root.devices.append(dev) + return ET.tostring(root, encoding="utf-8") @staticmethod def undefineDomain(dom): @@ -184,12 +177,7 @@ class MockModel(Model): @staticmethod def attachDeviceFlags(dom, xml, flags=0): - old_xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) - root = objectify.fromstring(old_xml) - dev = objectify.fromstring(xml) - root.devices.append(dev) - - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + MockModel._mock_vms[dom.name()].append(xml) @staticmethod def _get_device_node(dom, xml): @@ -215,16 +203,18 @@ class MockModel(Model): @staticmethod def detachDeviceFlags(dom, xml, flags=0): - root, dev = MockModel._get_device_node(dom, xml) - root.devices.remove(dev) - - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + node = ET.fromstring(xml) + xml = ET.tostring(node, encoding="utf-8", pretty_print=True) + if xml in MockModel._mock_vms[dom.name()]: + MockModel._mock_vms[dom.name()].remove(xml) @staticmethod def updateDeviceFlags(dom, xml, flags=0): - root, old_dev = MockModel._get_device_node(dom, xml) - root.devices.replace(old_dev, objectify.fromstring(xml)) - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + _, old_dev = MockModel._get_device_node(dom, xml) + old_xml = ET.tostring(old_dev, encoding="utf-8", pretty_print=True) + if old_xml in MockModel._mock_vms[dom.name()]: + MockModel._mock_vms[dom.name()].remove(old_xml) + MockModel._mock_vms[dom.name()].append(xml) @staticmethod def volResize(vol, size, flags=0): diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index ebe716e..4835adb 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -50,6 +50,7 @@ from wok.plugins.kimchi.config import READONLY_POOL_TYPE, get_kimchi_version from wok.plugins.kimchi.kvmusertests import UserTests from wok.plugins.kimchi.osinfo import PPC_MEM_ALIGN from wok.plugins.kimchi.model.config import CapabilitiesModel +from wok.plugins.kimchi.model.cpuinfo import CPUInfoModel from wok.plugins.kimchi.model.featuretests import FeatureTests from wok.plugins.kimchi.model.templates import TemplateModel from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name, get_vm_name @@ -71,10 +72,16 @@ DOM_STATE_MAP = {0: 'nostate', 6: 'crashed', 7: 'pmsuspended'} -VM_STATIC_UPDATE_PARAMS = {'name': './name', - 'cpus': './vcpu'} +VM_STATIC_UPDATE_PARAMS = {'name': './name', 'cpus': './vcpu'} + VM_LIVE_UPDATE_PARAMS = {} +# update parameters which are updatable when the VM is online +VM_ONLINE_UPDATE_PARAMS = ['graphics', 'groups', 'memory', 'users'] +# update parameters which are updatable when the VM is offline +VM_OFFLINE_UPDATE_PARAMS = ['cpus', 'graphics', 'groups', 'memory', 'name', + 'users'] + XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" XPATH_DOMAIN_NAME = '/domain/name' @@ -84,8 +91,10 @@ XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ XPATH_DOMAIN_MEMORY = '/domain/memory' XPATH_DOMAIN_MEMORY_UNIT = '/domain/memory/@unit' XPATH_DOMAIN_UUID = '/domain/uuid' +XPATH_DOMAIN_DEV_CPU_ID = '/domain/devices/spapr-cpu-socket/@id' XPATH_NUMA_CELL = './cpu/numa/cell' +XPATH_TOPOLOGY = './cpu/topology' # key: VM name; value: lock object vm_locks = {} @@ -98,6 +107,17 @@ class VMsModel(object): self.caps = CapabilitiesModel(**kargs) self.task = TaskModel(**kargs) + def _get_host_maxcpu(self): + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']: + cpu_model = CPUInfoModel(conn=self.conn) + max_vcpu_val = (cpu_model.cores_available * + cpu_model.threads_per_core) + if max_vcpu_val > 255: + max_vcpu_val = 255 + else: + max_vcpu_val = self.conn.get().getMaxVcpus('kvm') + return max_vcpu_val + def create(self, params): t_name = template_name_from_uri(params['template']) vm_list = self.get_list() @@ -158,7 +178,8 @@ class VMsModel(object): stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics) + graphics=graphics, + max_vcpus=self._get_host_maxcpu()) cb('Defining new VM') try: @@ -225,6 +246,30 @@ class VMModel(object): self.vmsnapshots = cls(**kargs) self.stats = {} + def has_topology(self, dom): + xml = dom.XMLDesc(0) + sockets = xpath_get_text(xml, XPATH_TOPOLOGY + '/@sockets') + cores = xpath_get_text(xml, XPATH_TOPOLOGY + '/@cores') + threads = xpath_get_text(xml, XPATH_TOPOLOGY + '/@threads') + return sockets and cores and threads + + def get_vm_max_sockets(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@sockets')[0]) + + def get_vm_sockets(self, dom): + current_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT) + return (current_vcpu / self.get_vm_cores(dom) / + self.get_vm_threads(dom)) + + def get_vm_cores(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@cores')[0]) + + def get_vm_threads(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@threads')[0]) + def update(self, name, params): lock = vm_locks.get(name) if lock is None: @@ -241,8 +286,30 @@ class VMModel(object): 'alignment': str(PPC_MEM_ALIGN)}) dom = self.get_vm(name, self.conn) - vm_name, dom = self._static_vm_update(name, dom, params) + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + ext_params = set(params.keys()) - set(VM_OFFLINE_UPDATE_PARAMS) + if len(ext_params) > 0: + raise InvalidParameter('KCHVM0073E', + {'params': ', '.join(ext_params)}) + else: + ext_params = set(params.keys()) - set(VM_ONLINE_UPDATE_PARAMS) + if len(ext_params) > 0: + raise InvalidParameter('KCHVM0074E', + {'params': ', '.join(ext_params)}) + + if 'cpus' in params and DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + # user cannot change vcpu if topology is defined. + curr_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT) + if self.has_topology(dom) and curr_vcpu != params['cpus']: + raise InvalidOperation( + 'KCHVM0075E', + {'vm': dom.name(), + 'sockets': self.get_vm_sockets(dom), + 'cores': self.get_vm_cores(dom), + 'threads': self.get_vm_threads(dom)}) + self._live_vm_update(dom, params) + vm_name, dom = self._static_vm_update(name, dom, params) return vm_name def clone(self, name): @@ -707,23 +774,34 @@ class VMModel(object): params['name'], nonascii_name = get_ascii_nonascii_name(name) for key, val in params.items(): + change_numa = True if key in VM_STATIC_UPDATE_PARAMS: if type(val) == int: val = str(val) xpath = VM_STATIC_UPDATE_PARAMS[key] - new_xml = xml_item_update(new_xml, xpath, val) + attrib = None + if key == 'cpus': + if self.has_topology(dom) or dom.isActive(): + change_numa = False + continue + # Update maxvcpu firstly + new_xml = xml_item_update(new_xml, xpath, + str(self._get_host_maxcpu()), + attrib) + # Update current vcpu + attrib = 'current' + new_xml = xml_item_update(new_xml, xpath, val, attrib) # Updating memory and NUMA if necessary, if vm is offline if not dom.isActive(): if 'memory' in params: new_xml = self._update_memory_config(new_xml, params) - elif 'cpus' in params and \ + elif 'cpus' in params and change_numa and \ (xpath_get_text(new_xml, XPATH_NUMA_CELL + '/@memory') != []): - vcpus = params['cpus'] new_xml = xml_item_update( new_xml, XPATH_NUMA_CELL, - value='0-' + str(vcpus - 1) if vcpus > 1 else '0', + value='0', attr='cpus') if 'graphics' in params: @@ -758,6 +836,8 @@ class VMModel(object): raise OperationFailed("KCHVM0008E", {'name': vm_name, 'err': e.get_error_message()}) + if name is not None: + vm_name = name return (nonascii_name if nonascii_name is not None else vm_name, dom) def _update_memory_config(self, xml, params): @@ -769,21 +849,20 @@ class VMModel(object): vcpus = params.get('cpus') if numa_mem == []: if vcpus is None: - vcpus = int(xpath_get_text(xml, - VM_STATIC_UPDATE_PARAMS['cpus'])[0]) + vcpus = int(xpath_get_text(xml, 'vcpu')[0]) cpu = root.find('./cpu') if cpu is None: - cpu = get_cpu_xml(vcpus, params['memory'] << 10) + cpu = get_cpu_xml(0, params['memory'] << 10) root.insert(0, ET.fromstring(cpu)) else: - numa_element = get_numa_xml(vcpus, params['memory'] << 10) + numa_element = get_numa_xml(0, params['memory'] << 10) cpu.insert(0, ET.fromstring(numa_element)) else: if vcpus is not None: xml = xml_item_update( xml, XPATH_NUMA_CELL, - value='0-' + str(vcpus - 1) if vcpus > 1 else '0', + value='0', attr='cpus') root = ET.fromstring(xml_item_update(xml, XPATH_NUMA_CELL, str(params['memory'] << 10), @@ -847,6 +926,17 @@ class VMModel(object): return new_xml return ET.tostring(root, encoding="utf-8") + def _get_host_maxcpu(self): + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']: + cpu_model = CPUInfoModel(conn=self.conn) + max_vcpu_val = (cpu_model.cores_available * + cpu_model.threads_per_core) + if max_vcpu_val > 255: + max_vcpu_val = 255 + else: + max_vcpu_val = self.conn.get().getMaxVcpus('kvm') + return max_vcpu_val + def _live_vm_update(self, dom, params): self._vm_update_access_metadata(dom, params) if 'memory' in params and dom.isActive(): diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 34348b8..89f5d31 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -144,6 +144,10 @@ class RestTests(unittest.TestCase): vm = json.loads(self.request('/plugins/kimchi/vms/vm-1').read()) self.assertEquals('vm-1', vm['name']) + req = json.dumps({'cpus': 3}) + resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') + self.assertEquals(200, resp.status) + resp = self.request('/plugins/kimchi/vms/vm-1/start', '{}', 'POST') self.assertEquals(200, resp.status) @@ -155,9 +159,10 @@ class RestTests(unittest.TestCase): resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status) - req = json.dumps({'cpus': 3}) + # Unable to do CPU hotplug + req = json.dumps({'cpus': 5}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEquals(400, resp.status) # Check if there is support to memory hotplug, once vm is running resp = self.request('/plugins/kimchi/config/capabilities').read() @@ -171,6 +176,7 @@ class RestTests(unittest.TestCase): req = json.dumps({"graphics": {'passwd': "abcdef"}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') + self.assertEquals(200, resp.status) info = json.loads(resp.read()) self.assertEquals('abcdef', info["graphics"]["passwd"]) self.assertEquals(None, info["graphics"]["passwdValidTo"]) diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 69cc9b5..08adf4c 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -316,7 +316,7 @@ class VMTemplate(object): cpu_info = self.info.get('cpu_info') if cpu_info is not None: cpu_topo = cpu_info.get('topology') - return get_cpu_xml(self.info.get('cpus'), + return get_cpu_xml(0, self.info.get('memory') << 10, cpu_topo) @@ -329,7 +329,6 @@ class VMTemplate(object): params['qemu-namespace'] = '' params['cdroms'] = '' params['qemu-stream-cmdline'] = '' - params['cpu_info'] = self._get_cpu_xml() params['disks'] = self._get_disks_xml(vm_uuid) params['serial'] = get_serial_xml(params) @@ -340,6 +339,8 @@ class VMTemplate(object): libvirt_stream_protocols = kwargs.get('libvirt_stream_protocols', []) cdrom_xml = self._get_cdrom_xml(libvirt_stream_protocols) + max_vcpus = kwargs.get('max_vcpus', 1) + if not urlparse.urlparse(self.info.get('cdrom', "")).scheme in \ libvirt_stream_protocols and \ params.get('iso_stream', False): @@ -362,6 +363,24 @@ class VMTemplate(object): if distro == "IBM_PowerKVM": params['slots'] = 32 + cpu_topo = self.info.get('cpu_info').get('topology') + if (cpu_topo is not None): + sockets = int(max_vcpus / (cpu_topo['cores'] * + cpu_topo['threads'])) + self.info['cpu_info']['topology']['sockets'] = sockets + + # Reduce maxvcpu to fit number of sockets if necessary + total_max_vcpu = sockets * cpu_topo['cores'] * cpu_topo['threads'] + if total_max_vcpu != max_vcpus: + max_vcpus = total_max_vcpu + + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \ + (params['cpus'], max_vcpus) + else: + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \ + (params['cpus'], max_vcpus) + params['cpu_info'] = self._get_cpu_xml() + xml = """ <domain type='%(domain)s'> %(qemu-stream-cmdline)s @@ -369,7 +388,7 @@ class VMTemplate(object): <uuid>%(uuid)s</uuid> <maxMemory slots='%(slots)s' unit='KiB'>%(max_memory)s</maxMemory> <memory unit='MiB'>%(memory)s</memory> - <vcpu>%(cpus)s</vcpu> + %(vcpus)s %(cpu_info)s <os> <type arch='%(arch)s'>hvm</type> -- 2.1.0

I was not able to test this patch as it depends on others. But the code looks good for me. On 30/11/2015 01:29, Rodrigo Trujillo wrote:
Currently, the VCPU count is updated by editing the configuration XML (tag: <vcpu>). However, in some cases, editing that tag will only update the maximum VCPU count, not the current one. For example, if the VM has the following XML tag:
<vcpu current='2'>8</vcpu>
and the user updates the VCPU value to 10, Kimchi will update the XML tag to:
<vcpu current='2'>10</vcpu>
Use the libvirt function "setVcpusFlags" to update the current and the maximum VCPU values to the one specified by the user.
* This patch also changes vcpu setting when guest is being created. Now 'current' is always set and is the total of cpus from the Template and maxVcpu is the >= 255 or (sockets * cores * threads) in PPC. In x86 it is the value of libvirt getMaxVcpu.
* Change NUMA management. Sets all cpus to 0. There is not impact to the guest.
- Fixes VM cpu/memory offline update
Signed-off-by: Aline Manera <alinefm@linux.vnet.ibm.com> Signed-off-by: Crístian Deives <cristiandeives@gmail.com> Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/i18n.py | 3 + src/wok/plugins/kimchi/mockmodel.py | 52 ++++++-------- src/wok/plugins/kimchi/model/vms.py | 116 ++++++++++++++++++++++++++---- src/wok/plugins/kimchi/tests/test_rest.py | 10 ++- src/wok/plugins/kimchi/vmtemplate.py | 25 ++++++- 5 files changed, 157 insertions(+), 49 deletions(-)
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index b18048c..2f898c6 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -129,6 +129,9 @@ messages = { "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."), "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."), + "KCHVM0073E": _("Unable to update the following parameters while the VM is offline: %(params)s"), + "KCHVM0074E": _("Unable to update the following parameters while the VM is online: %(params)s"), + "KCHVM0075E": _("Cannot change VCPU value because '%(vm)s' has a topology defined - sockets: %(sockets)s, cores: %(cores)s, threads: %(threads)s."),
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index f31a297..31533e6 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -21,6 +21,8 @@ import libvirt import lxml.etree as ET import os import time + +from collections import defaultdict from lxml import objectify from lxml.builder import E
@@ -61,10 +63,9 @@ storagevolumes.VALID_RAW_CONTENT = ['dos/mbr boot sector',
class MockModel(Model): - _mock_vms = {} + _mock_vms = defaultdict(list) _mock_snapshots = {} _XMLDesc = libvirt.virDomain.XMLDesc - _defineXML = libvirt.virConnect.defineXML _undefineDomain = libvirt.virDomain.undefine _libvirt_get_vol_path = LibvirtVMTemplate._get_volume_path
@@ -83,7 +84,6 @@ class MockModel(Model):
cpuinfo.get_topo_capabilities = MockModel.get_topo_capabilities vmifaces.getDHCPLeases = MockModel.getDHCPLeases - libvirt.virConnect.defineXML = MockModel.domainDefineXML libvirt.virDomain.XMLDesc = MockModel.domainXMLDesc libvirt.virDomain.undefine = MockModel.undefineDomain libvirt.virDomain.attachDeviceFlags = MockModel.attachDeviceFlags @@ -126,7 +126,7 @@ class MockModel(Model): imageinfo.probe_image = self._probe_image
def reset(self): - MockModel._mock_vms = {} + MockModel._mock_vms = defaultdict(list) MockModel._mock_snapshots = {}
if hasattr(self, 'objstore'): @@ -159,21 +159,14 @@ class MockModel(Model): return ET.fromstring(xml)
@staticmethod - def domainDefineXML(conn, xml): - name = objectify.fromstring(xml).name.text - try: - dom = conn.lookupByName(name) - if not dom.isActive(): - MockModel._mock_vms[name] = xml - except: - pass - - return MockModel._defineXML(conn, xml) - - @staticmethod def domainXMLDesc(dom, flags=0): - return MockModel._mock_vms.get(dom.name(), - MockModel._XMLDesc(dom, flags)) + xml = MockModel._XMLDesc(dom, flags) + root = objectify.fromstring(xml) + + for dev_xml in MockModel._mock_vms.get(dom.name(), []): + dev = objectify.fromstring(dev_xml) + root.devices.append(dev) + return ET.tostring(root, encoding="utf-8")
@staticmethod def undefineDomain(dom): @@ -184,12 +177,7 @@ class MockModel(Model):
@staticmethod def attachDeviceFlags(dom, xml, flags=0): - old_xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) - root = objectify.fromstring(old_xml) - dev = objectify.fromstring(xml) - root.devices.append(dev) - - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + MockModel._mock_vms[dom.name()].append(xml)
@staticmethod def _get_device_node(dom, xml): @@ -215,16 +203,18 @@ class MockModel(Model):
@staticmethod def detachDeviceFlags(dom, xml, flags=0): - root, dev = MockModel._get_device_node(dom, xml) - root.devices.remove(dev) - - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + node = ET.fromstring(xml) + xml = ET.tostring(node, encoding="utf-8", pretty_print=True) + if xml in MockModel._mock_vms[dom.name()]: + MockModel._mock_vms[dom.name()].remove(xml)
@staticmethod def updateDeviceFlags(dom, xml, flags=0): - root, old_dev = MockModel._get_device_node(dom, xml) - root.devices.replace(old_dev, objectify.fromstring(xml)) - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8") + _, old_dev = MockModel._get_device_node(dom, xml) + old_xml = ET.tostring(old_dev, encoding="utf-8", pretty_print=True) + if old_xml in MockModel._mock_vms[dom.name()]: + MockModel._mock_vms[dom.name()].remove(old_xml) + MockModel._mock_vms[dom.name()].append(xml)
@staticmethod def volResize(vol, size, flags=0): diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py index ebe716e..4835adb 100644 --- a/src/wok/plugins/kimchi/model/vms.py +++ b/src/wok/plugins/kimchi/model/vms.py @@ -50,6 +50,7 @@ from wok.plugins.kimchi.config import READONLY_POOL_TYPE, get_kimchi_version from wok.plugins.kimchi.kvmusertests import UserTests from wok.plugins.kimchi.osinfo import PPC_MEM_ALIGN from wok.plugins.kimchi.model.config import CapabilitiesModel +from wok.plugins.kimchi.model.cpuinfo import CPUInfoModel from wok.plugins.kimchi.model.featuretests import FeatureTests from wok.plugins.kimchi.model.templates import TemplateModel from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name, get_vm_name @@ -71,10 +72,16 @@ DOM_STATE_MAP = {0: 'nostate', 6: 'crashed', 7: 'pmsuspended'}
-VM_STATIC_UPDATE_PARAMS = {'name': './name', - 'cpus': './vcpu'} +VM_STATIC_UPDATE_PARAMS = {'name': './name', 'cpus': './vcpu'} + VM_LIVE_UPDATE_PARAMS = {}
+# update parameters which are updatable when the VM is online +VM_ONLINE_UPDATE_PARAMS = ['graphics', 'groups', 'memory', 'users'] +# update parameters which are updatable when the VM is offline +VM_OFFLINE_UPDATE_PARAMS = ['cpus', 'graphics', 'groups', 'memory', 'name', + 'users'] + XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" XPATH_DOMAIN_NAME = '/domain/name' @@ -84,8 +91,10 @@ XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ XPATH_DOMAIN_MEMORY = '/domain/memory' XPATH_DOMAIN_MEMORY_UNIT = '/domain/memory/@unit' XPATH_DOMAIN_UUID = '/domain/uuid' +XPATH_DOMAIN_DEV_CPU_ID = '/domain/devices/spapr-cpu-socket/@id'
XPATH_NUMA_CELL = './cpu/numa/cell' +XPATH_TOPOLOGY = './cpu/topology'
# key: VM name; value: lock object vm_locks = {} @@ -98,6 +107,17 @@ class VMsModel(object): self.caps = CapabilitiesModel(**kargs) self.task = TaskModel(**kargs)
+ def _get_host_maxcpu(self): + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']: + cpu_model = CPUInfoModel(conn=self.conn) + max_vcpu_val = (cpu_model.cores_available * + cpu_model.threads_per_core) + if max_vcpu_val > 255: + max_vcpu_val = 255 + else: + max_vcpu_val = self.conn.get().getMaxVcpus('kvm') + return max_vcpu_val + def create(self, params): t_name = template_name_from_uri(params['template']) vm_list = self.get_list() @@ -158,7 +178,8 @@ class VMsModel(object): stream_protocols = self.caps.libvirt_stream_protocols xml = t.to_vm_xml(name, vm_uuid, libvirt_stream_protocols=stream_protocols, - graphics=graphics) + graphics=graphics, + max_vcpus=self._get_host_maxcpu())
cb('Defining new VM') try: @@ -225,6 +246,30 @@ class VMModel(object): self.vmsnapshots = cls(**kargs) self.stats = {}
+ def has_topology(self, dom): + xml = dom.XMLDesc(0) + sockets = xpath_get_text(xml, XPATH_TOPOLOGY + '/@sockets') + cores = xpath_get_text(xml, XPATH_TOPOLOGY + '/@cores') + threads = xpath_get_text(xml, XPATH_TOPOLOGY + '/@threads') + return sockets and cores and threads + + def get_vm_max_sockets(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@sockets')[0]) + + def get_vm_sockets(self, dom): + current_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT) + return (current_vcpu / self.get_vm_cores(dom) / + self.get_vm_threads(dom)) + + def get_vm_cores(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@cores')[0]) + + def get_vm_threads(self, dom): + return int(xpath_get_text(dom.XMLDesc(0), + XPATH_TOPOLOGY + '/@threads')[0]) + def update(self, name, params): lock = vm_locks.get(name) if lock is None: @@ -241,8 +286,30 @@ class VMModel(object): 'alignment': str(PPC_MEM_ALIGN)})
dom = self.get_vm(name, self.conn) - vm_name, dom = self._static_vm_update(name, dom, params) + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + ext_params = set(params.keys()) - set(VM_OFFLINE_UPDATE_PARAMS) + if len(ext_params) > 0: + raise InvalidParameter('KCHVM0073E', + {'params': ', '.join(ext_params)}) + else: + ext_params = set(params.keys()) - set(VM_ONLINE_UPDATE_PARAMS) + if len(ext_params) > 0: + raise InvalidParameter('KCHVM0074E', + {'params': ', '.join(ext_params)}) + + if 'cpus' in params and DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + # user cannot change vcpu if topology is defined. + curr_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT) + if self.has_topology(dom) and curr_vcpu != params['cpus']: + raise InvalidOperation( + 'KCHVM0075E', + {'vm': dom.name(), + 'sockets': self.get_vm_sockets(dom), + 'cores': self.get_vm_cores(dom), + 'threads': self.get_vm_threads(dom)}) + self._live_vm_update(dom, params) + vm_name, dom = self._static_vm_update(name, dom, params) return vm_name
def clone(self, name): @@ -707,23 +774,34 @@ class VMModel(object): params['name'], nonascii_name = get_ascii_nonascii_name(name)
for key, val in params.items(): + change_numa = True if key in VM_STATIC_UPDATE_PARAMS: if type(val) == int: val = str(val) xpath = VM_STATIC_UPDATE_PARAMS[key] - new_xml = xml_item_update(new_xml, xpath, val) + attrib = None + if key == 'cpus': + if self.has_topology(dom) or dom.isActive(): + change_numa = False + continue + # Update maxvcpu firstly + new_xml = xml_item_update(new_xml, xpath, + str(self._get_host_maxcpu()), + attrib) + # Update current vcpu + attrib = 'current' + new_xml = xml_item_update(new_xml, xpath, val, attrib)
# Updating memory and NUMA if necessary, if vm is offline if not dom.isActive(): if 'memory' in params: new_xml = self._update_memory_config(new_xml, params) - elif 'cpus' in params and \ + elif 'cpus' in params and change_numa and \ (xpath_get_text(new_xml, XPATH_NUMA_CELL + '/@memory') != []): - vcpus = params['cpus'] new_xml = xml_item_update( new_xml, XPATH_NUMA_CELL, - value='0-' + str(vcpus - 1) if vcpus > 1 else '0', + value='0', attr='cpus')
if 'graphics' in params: @@ -758,6 +836,8 @@ class VMModel(object):
raise OperationFailed("KCHVM0008E", {'name': vm_name, 'err': e.get_error_message()}) + if name is not None: + vm_name = name return (nonascii_name if nonascii_name is not None else vm_name, dom)
def _update_memory_config(self, xml, params): @@ -769,21 +849,20 @@ class VMModel(object): vcpus = params.get('cpus') if numa_mem == []: if vcpus is None: - vcpus = int(xpath_get_text(xml, - VM_STATIC_UPDATE_PARAMS['cpus'])[0]) + vcpus = int(xpath_get_text(xml, 'vcpu')[0]) cpu = root.find('./cpu') if cpu is None: - cpu = get_cpu_xml(vcpus, params['memory'] << 10) + cpu = get_cpu_xml(0, params['memory'] << 10) root.insert(0, ET.fromstring(cpu)) else: - numa_element = get_numa_xml(vcpus, params['memory'] << 10) + numa_element = get_numa_xml(0, params['memory'] << 10) cpu.insert(0, ET.fromstring(numa_element)) else: if vcpus is not None: xml = xml_item_update( xml, XPATH_NUMA_CELL, - value='0-' + str(vcpus - 1) if vcpus > 1 else '0', + value='0', attr='cpus') root = ET.fromstring(xml_item_update(xml, XPATH_NUMA_CELL, str(params['memory'] << 10), @@ -847,6 +926,17 @@ class VMModel(object): return new_xml return ET.tostring(root, encoding="utf-8")
+ def _get_host_maxcpu(self): + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']: + cpu_model = CPUInfoModel(conn=self.conn) + max_vcpu_val = (cpu_model.cores_available * + cpu_model.threads_per_core) + if max_vcpu_val > 255: + max_vcpu_val = 255 + else: + max_vcpu_val = self.conn.get().getMaxVcpus('kvm') + return max_vcpu_val + def _live_vm_update(self, dom, params): self._vm_update_access_metadata(dom, params) if 'memory' in params and dom.isActive(): diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py index 34348b8..89f5d31 100644 --- a/src/wok/plugins/kimchi/tests/test_rest.py +++ b/src/wok/plugins/kimchi/tests/test_rest.py @@ -144,6 +144,10 @@ class RestTests(unittest.TestCase): vm = json.loads(self.request('/plugins/kimchi/vms/vm-1').read()) self.assertEquals('vm-1', vm['name'])
+ req = json.dumps({'cpus': 3}) + resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') + self.assertEquals(200, resp.status) + resp = self.request('/plugins/kimchi/vms/vm-1/start', '{}', 'POST') self.assertEquals(200, resp.status)
@@ -155,9 +159,10 @@ class RestTests(unittest.TestCase): resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') self.assertEquals(400, resp.status)
- req = json.dumps({'cpus': 3}) + # Unable to do CPU hotplug + req = json.dumps({'cpus': 5}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEquals(400, resp.status)
# Check if there is support to memory hotplug, once vm is running resp = self.request('/plugins/kimchi/config/capabilities').read() @@ -171,6 +176,7 @@ class RestTests(unittest.TestCase):
req = json.dumps({"graphics": {'passwd': "abcdef"}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') + self.assertEquals(200, resp.status) info = json.loads(resp.read()) self.assertEquals('abcdef', info["graphics"]["passwd"]) self.assertEquals(None, info["graphics"]["passwdValidTo"]) diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py index 69cc9b5..08adf4c 100644 --- a/src/wok/plugins/kimchi/vmtemplate.py +++ b/src/wok/plugins/kimchi/vmtemplate.py @@ -316,7 +316,7 @@ class VMTemplate(object): cpu_info = self.info.get('cpu_info') if cpu_info is not None: cpu_topo = cpu_info.get('topology') - return get_cpu_xml(self.info.get('cpus'), + return get_cpu_xml(0, self.info.get('memory') << 10, cpu_topo)
@@ -329,7 +329,6 @@ class VMTemplate(object): params['qemu-namespace'] = '' params['cdroms'] = '' params['qemu-stream-cmdline'] = '' - params['cpu_info'] = self._get_cpu_xml() params['disks'] = self._get_disks_xml(vm_uuid) params['serial'] = get_serial_xml(params)
@@ -340,6 +339,8 @@ class VMTemplate(object): libvirt_stream_protocols = kwargs.get('libvirt_stream_protocols', []) cdrom_xml = self._get_cdrom_xml(libvirt_stream_protocols)
+ max_vcpus = kwargs.get('max_vcpus', 1) + if not urlparse.urlparse(self.info.get('cdrom', "")).scheme in \ libvirt_stream_protocols and \ params.get('iso_stream', False): @@ -362,6 +363,24 @@ class VMTemplate(object): if distro == "IBM_PowerKVM": params['slots'] = 32
+ cpu_topo = self.info.get('cpu_info').get('topology') + if (cpu_topo is not None): + sockets = int(max_vcpus / (cpu_topo['cores'] * + cpu_topo['threads'])) + self.info['cpu_info']['topology']['sockets'] = sockets + + # Reduce maxvcpu to fit number of sockets if necessary + total_max_vcpu = sockets * cpu_topo['cores'] * cpu_topo['threads'] + if total_max_vcpu != max_vcpus: + max_vcpus = total_max_vcpu + + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \ + (params['cpus'], max_vcpus) + else: + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \ + (params['cpus'], max_vcpus) + params['cpu_info'] = self._get_cpu_xml() + xml = """ <domain type='%(domain)s'> %(qemu-stream-cmdline)s @@ -369,7 +388,7 @@ class VMTemplate(object): <uuid>%(uuid)s</uuid> <maxMemory slots='%(slots)s' unit='KiB'>%(max_memory)s</maxMemory> <memory unit='MiB'>%(memory)s</memory> - <vcpu>%(cpus)s</vcpu> + %(vcpus)s %(cpu_info)s <os> <type arch='%(arch)s'>hvm</type>
participants (2)
-
Aline Manera
-
Rodrigo Trujillo