[Kimchi-devel] [PATCH 5/5 - v4] Update VCPU by using libvirt function
Aline Manera
alinefm at linux.vnet.ibm.com
Mon Nov 30 11:20:38 UTC 2015
I was not able to test this patch as it depends on others.
But the code looks good for me.
On 30/11/2015 01:29, Rodrigo Trujillo wrote:
> Currently, the VCPU count is updated by editing the configuration XML
> (tag: <vcpu>). However, in some cases, editing that tag will only update
> the maximum VCPU count, not the current one. For example, if the VM has
> the following XML tag:
>
> <vcpu current='2'>8</vcpu>
>
> and the user updates the VCPU value to 10, Kimchi will update the
> XML tag to:
>
> <vcpu current='2'>10</vcpu>
>
> Use the libvirt function "setVcpusFlags" to update the current and
> the maximum VCPU values to the one specified by the user.
>
> * This patch also changes vcpu setting when guest is being
> created. Now 'current' is always set and is the total of cpus
> from the Template and maxVcpu is the >= 255 or (sockets * cores
> * threads) in PPC. In x86 it is the value of libvirt getMaxVcpu.
>
> * Change NUMA management. Sets all cpus to 0. There is not
> impact to the guest.
>
> - Fixes VM cpu/memory offline update
>
> Signed-off-by: Aline Manera <alinefm at linux.vnet.ibm.com>
> Signed-off-by: Crístian Deives <cristiandeives at gmail.com>
> Signed-off-by: Jose Ricardo Ziviani <joserz at linux.vnet.ibm.com>
> Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo at linux.vnet.ibm.com>
> ---
> src/wok/plugins/kimchi/i18n.py | 3 +
> src/wok/plugins/kimchi/mockmodel.py | 52 ++++++--------
> src/wok/plugins/kimchi/model/vms.py | 116 ++++++++++++++++++++++++++----
> src/wok/plugins/kimchi/tests/test_rest.py | 10 ++-
> src/wok/plugins/kimchi/vmtemplate.py | 25 ++++++-
> 5 files changed, 157 insertions(+), 49 deletions(-)
>
> diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py
> index b18048c..2f898c6 100644
> --- a/src/wok/plugins/kimchi/i18n.py
> +++ b/src/wok/plugins/kimchi/i18n.py
> @@ -129,6 +129,9 @@ messages = {
> "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."),
> "KCHVM0071E": _("Memory value %(mem)s must be aligned to %(alignment)sMiB."),
> "KCHVM0072E": _("Template given has multiple disks assigned to different types of storage pools, conflicting with storage pool provided."),
> + "KCHVM0073E": _("Unable to update the following parameters while the VM is offline: %(params)s"),
> + "KCHVM0074E": _("Unable to update the following parameters while the VM is online: %(params)s"),
> + "KCHVM0075E": _("Cannot change VCPU value because '%(vm)s' has a topology defined - sockets: %(sockets)s, cores: %(cores)s, threads: %(threads)s."),
>
> "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."),
> "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."),
> diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py
> index f31a297..31533e6 100644
> --- a/src/wok/plugins/kimchi/mockmodel.py
> +++ b/src/wok/plugins/kimchi/mockmodel.py
> @@ -21,6 +21,8 @@ import libvirt
> import lxml.etree as ET
> import os
> import time
> +
> +from collections import defaultdict
> from lxml import objectify
> from lxml.builder import E
>
> @@ -61,10 +63,9 @@ storagevolumes.VALID_RAW_CONTENT = ['dos/mbr boot sector',
>
>
> class MockModel(Model):
> - _mock_vms = {}
> + _mock_vms = defaultdict(list)
> _mock_snapshots = {}
> _XMLDesc = libvirt.virDomain.XMLDesc
> - _defineXML = libvirt.virConnect.defineXML
> _undefineDomain = libvirt.virDomain.undefine
> _libvirt_get_vol_path = LibvirtVMTemplate._get_volume_path
>
> @@ -83,7 +84,6 @@ class MockModel(Model):
>
> cpuinfo.get_topo_capabilities = MockModel.get_topo_capabilities
> vmifaces.getDHCPLeases = MockModel.getDHCPLeases
> - libvirt.virConnect.defineXML = MockModel.domainDefineXML
> libvirt.virDomain.XMLDesc = MockModel.domainXMLDesc
> libvirt.virDomain.undefine = MockModel.undefineDomain
> libvirt.virDomain.attachDeviceFlags = MockModel.attachDeviceFlags
> @@ -126,7 +126,7 @@ class MockModel(Model):
> imageinfo.probe_image = self._probe_image
>
> def reset(self):
> - MockModel._mock_vms = {}
> + MockModel._mock_vms = defaultdict(list)
> MockModel._mock_snapshots = {}
>
> if hasattr(self, 'objstore'):
> @@ -159,21 +159,14 @@ class MockModel(Model):
> return ET.fromstring(xml)
>
> @staticmethod
> - def domainDefineXML(conn, xml):
> - name = objectify.fromstring(xml).name.text
> - try:
> - dom = conn.lookupByName(name)
> - if not dom.isActive():
> - MockModel._mock_vms[name] = xml
> - except:
> - pass
> -
> - return MockModel._defineXML(conn, xml)
> -
> - @staticmethod
> def domainXMLDesc(dom, flags=0):
> - return MockModel._mock_vms.get(dom.name(),
> - MockModel._XMLDesc(dom, flags))
> + xml = MockModel._XMLDesc(dom, flags)
> + root = objectify.fromstring(xml)
> +
> + for dev_xml in MockModel._mock_vms.get(dom.name(), []):
> + dev = objectify.fromstring(dev_xml)
> + root.devices.append(dev)
> + return ET.tostring(root, encoding="utf-8")
>
> @staticmethod
> def undefineDomain(dom):
> @@ -184,12 +177,7 @@ class MockModel(Model):
>
> @staticmethod
> def attachDeviceFlags(dom, xml, flags=0):
> - old_xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
> - root = objectify.fromstring(old_xml)
> - dev = objectify.fromstring(xml)
> - root.devices.append(dev)
> -
> - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8")
> + MockModel._mock_vms[dom.name()].append(xml)
>
> @staticmethod
> def _get_device_node(dom, xml):
> @@ -215,16 +203,18 @@ class MockModel(Model):
>
> @staticmethod
> def detachDeviceFlags(dom, xml, flags=0):
> - root, dev = MockModel._get_device_node(dom, xml)
> - root.devices.remove(dev)
> -
> - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8")
> + node = ET.fromstring(xml)
> + xml = ET.tostring(node, encoding="utf-8", pretty_print=True)
> + if xml in MockModel._mock_vms[dom.name()]:
> + MockModel._mock_vms[dom.name()].remove(xml)
>
> @staticmethod
> def updateDeviceFlags(dom, xml, flags=0):
> - root, old_dev = MockModel._get_device_node(dom, xml)
> - root.devices.replace(old_dev, objectify.fromstring(xml))
> - MockModel._mock_vms[dom.name()] = ET.tostring(root, encoding="utf-8")
> + _, old_dev = MockModel._get_device_node(dom, xml)
> + old_xml = ET.tostring(old_dev, encoding="utf-8", pretty_print=True)
> + if old_xml in MockModel._mock_vms[dom.name()]:
> + MockModel._mock_vms[dom.name()].remove(old_xml)
> + MockModel._mock_vms[dom.name()].append(xml)
>
> @staticmethod
> def volResize(vol, size, flags=0):
> diff --git a/src/wok/plugins/kimchi/model/vms.py b/src/wok/plugins/kimchi/model/vms.py
> index ebe716e..4835adb 100644
> --- a/src/wok/plugins/kimchi/model/vms.py
> +++ b/src/wok/plugins/kimchi/model/vms.py
> @@ -50,6 +50,7 @@ from wok.plugins.kimchi.config import READONLY_POOL_TYPE, get_kimchi_version
> from wok.plugins.kimchi.kvmusertests import UserTests
> from wok.plugins.kimchi.osinfo import PPC_MEM_ALIGN
> from wok.plugins.kimchi.model.config import CapabilitiesModel
> +from wok.plugins.kimchi.model.cpuinfo import CPUInfoModel
> from wok.plugins.kimchi.model.featuretests import FeatureTests
> from wok.plugins.kimchi.model.templates import TemplateModel
> from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name, get_vm_name
> @@ -71,10 +72,16 @@ DOM_STATE_MAP = {0: 'nostate',
> 6: 'crashed',
> 7: 'pmsuspended'}
>
> -VM_STATIC_UPDATE_PARAMS = {'name': './name',
> - 'cpus': './vcpu'}
> +VM_STATIC_UPDATE_PARAMS = {'name': './name', 'cpus': './vcpu'}
> +
> VM_LIVE_UPDATE_PARAMS = {}
>
> +# update parameters which are updatable when the VM is online
> +VM_ONLINE_UPDATE_PARAMS = ['graphics', 'groups', 'memory', 'users']
> +# update parameters which are updatable when the VM is offline
> +VM_OFFLINE_UPDATE_PARAMS = ['cpus', 'graphics', 'groups', 'memory', 'name',
> + 'users']
> +
> XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file"
> XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']"
> XPATH_DOMAIN_NAME = '/domain/name'
> @@ -84,8 +91,10 @@ XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\
> XPATH_DOMAIN_MEMORY = '/domain/memory'
> XPATH_DOMAIN_MEMORY_UNIT = '/domain/memory/@unit'
> XPATH_DOMAIN_UUID = '/domain/uuid'
> +XPATH_DOMAIN_DEV_CPU_ID = '/domain/devices/spapr-cpu-socket/@id'
>
> XPATH_NUMA_CELL = './cpu/numa/cell'
> +XPATH_TOPOLOGY = './cpu/topology'
>
> # key: VM name; value: lock object
> vm_locks = {}
> @@ -98,6 +107,17 @@ class VMsModel(object):
> self.caps = CapabilitiesModel(**kargs)
> self.task = TaskModel(**kargs)
>
> + def _get_host_maxcpu(self):
> + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']:
> + cpu_model = CPUInfoModel(conn=self.conn)
> + max_vcpu_val = (cpu_model.cores_available *
> + cpu_model.threads_per_core)
> + if max_vcpu_val > 255:
> + max_vcpu_val = 255
> + else:
> + max_vcpu_val = self.conn.get().getMaxVcpus('kvm')
> + return max_vcpu_val
> +
> def create(self, params):
> t_name = template_name_from_uri(params['template'])
> vm_list = self.get_list()
> @@ -158,7 +178,8 @@ class VMsModel(object):
> stream_protocols = self.caps.libvirt_stream_protocols
> xml = t.to_vm_xml(name, vm_uuid,
> libvirt_stream_protocols=stream_protocols,
> - graphics=graphics)
> + graphics=graphics,
> + max_vcpus=self._get_host_maxcpu())
>
> cb('Defining new VM')
> try:
> @@ -225,6 +246,30 @@ class VMModel(object):
> self.vmsnapshots = cls(**kargs)
> self.stats = {}
>
> + def has_topology(self, dom):
> + xml = dom.XMLDesc(0)
> + sockets = xpath_get_text(xml, XPATH_TOPOLOGY + '/@sockets')
> + cores = xpath_get_text(xml, XPATH_TOPOLOGY + '/@cores')
> + threads = xpath_get_text(xml, XPATH_TOPOLOGY + '/@threads')
> + return sockets and cores and threads
> +
> + def get_vm_max_sockets(self, dom):
> + return int(xpath_get_text(dom.XMLDesc(0),
> + XPATH_TOPOLOGY + '/@sockets')[0])
> +
> + def get_vm_sockets(self, dom):
> + current_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT)
> + return (current_vcpu / self.get_vm_cores(dom) /
> + self.get_vm_threads(dom))
> +
> + def get_vm_cores(self, dom):
> + return int(xpath_get_text(dom.XMLDesc(0),
> + XPATH_TOPOLOGY + '/@cores')[0])
> +
> + def get_vm_threads(self, dom):
> + return int(xpath_get_text(dom.XMLDesc(0),
> + XPATH_TOPOLOGY + '/@threads')[0])
> +
> def update(self, name, params):
> lock = vm_locks.get(name)
> if lock is None:
> @@ -241,8 +286,30 @@ class VMModel(object):
> 'alignment': str(PPC_MEM_ALIGN)})
>
> dom = self.get_vm(name, self.conn)
> - vm_name, dom = self._static_vm_update(name, dom, params)
> + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff':
> + ext_params = set(params.keys()) - set(VM_OFFLINE_UPDATE_PARAMS)
> + if len(ext_params) > 0:
> + raise InvalidParameter('KCHVM0073E',
> + {'params': ', '.join(ext_params)})
> + else:
> + ext_params = set(params.keys()) - set(VM_ONLINE_UPDATE_PARAMS)
> + if len(ext_params) > 0:
> + raise InvalidParameter('KCHVM0074E',
> + {'params': ', '.join(ext_params)})
> +
> + if 'cpus' in params and DOM_STATE_MAP[dom.info()[0]] == 'shutoff':
> + # user cannot change vcpu if topology is defined.
> + curr_vcpu = dom.vcpusFlags(libvirt.VIR_DOMAIN_AFFECT_CURRENT)
> + if self.has_topology(dom) and curr_vcpu != params['cpus']:
> + raise InvalidOperation(
> + 'KCHVM0075E',
> + {'vm': dom.name(),
> + 'sockets': self.get_vm_sockets(dom),
> + 'cores': self.get_vm_cores(dom),
> + 'threads': self.get_vm_threads(dom)})
> +
> self._live_vm_update(dom, params)
> + vm_name, dom = self._static_vm_update(name, dom, params)
> return vm_name
>
> def clone(self, name):
> @@ -707,23 +774,34 @@ class VMModel(object):
> params['name'], nonascii_name = get_ascii_nonascii_name(name)
>
> for key, val in params.items():
> + change_numa = True
> if key in VM_STATIC_UPDATE_PARAMS:
> if type(val) == int:
> val = str(val)
> xpath = VM_STATIC_UPDATE_PARAMS[key]
> - new_xml = xml_item_update(new_xml, xpath, val)
> + attrib = None
> + if key == 'cpus':
> + if self.has_topology(dom) or dom.isActive():
> + change_numa = False
> + continue
> + # Update maxvcpu firstly
> + new_xml = xml_item_update(new_xml, xpath,
> + str(self._get_host_maxcpu()),
> + attrib)
> + # Update current vcpu
> + attrib = 'current'
> + new_xml = xml_item_update(new_xml, xpath, val, attrib)
>
> # Updating memory and NUMA if necessary, if vm is offline
> if not dom.isActive():
> if 'memory' in params:
> new_xml = self._update_memory_config(new_xml, params)
> - elif 'cpus' in params and \
> + elif 'cpus' in params and change_numa and \
> (xpath_get_text(new_xml, XPATH_NUMA_CELL + '/@memory') != []):
> - vcpus = params['cpus']
> new_xml = xml_item_update(
> new_xml,
> XPATH_NUMA_CELL,
> - value='0-' + str(vcpus - 1) if vcpus > 1 else '0',
> + value='0',
> attr='cpus')
>
> if 'graphics' in params:
> @@ -758,6 +836,8 @@ class VMModel(object):
>
> raise OperationFailed("KCHVM0008E", {'name': vm_name,
> 'err': e.get_error_message()})
> + if name is not None:
> + vm_name = name
> return (nonascii_name if nonascii_name is not None else vm_name, dom)
>
> def _update_memory_config(self, xml, params):
> @@ -769,21 +849,20 @@ class VMModel(object):
> vcpus = params.get('cpus')
> if numa_mem == []:
> if vcpus is None:
> - vcpus = int(xpath_get_text(xml,
> - VM_STATIC_UPDATE_PARAMS['cpus'])[0])
> + vcpus = int(xpath_get_text(xml, 'vcpu')[0])
> cpu = root.find('./cpu')
> if cpu is None:
> - cpu = get_cpu_xml(vcpus, params['memory'] << 10)
> + cpu = get_cpu_xml(0, params['memory'] << 10)
> root.insert(0, ET.fromstring(cpu))
> else:
> - numa_element = get_numa_xml(vcpus, params['memory'] << 10)
> + numa_element = get_numa_xml(0, params['memory'] << 10)
> cpu.insert(0, ET.fromstring(numa_element))
> else:
> if vcpus is not None:
> xml = xml_item_update(
> xml,
> XPATH_NUMA_CELL,
> - value='0-' + str(vcpus - 1) if vcpus > 1 else '0',
> + value='0',
> attr='cpus')
> root = ET.fromstring(xml_item_update(xml, XPATH_NUMA_CELL,
> str(params['memory'] << 10),
> @@ -847,6 +926,17 @@ class VMModel(object):
> return new_xml
> return ET.tostring(root, encoding="utf-8")
>
> + def _get_host_maxcpu(self):
> + if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']:
> + cpu_model = CPUInfoModel(conn=self.conn)
> + max_vcpu_val = (cpu_model.cores_available *
> + cpu_model.threads_per_core)
> + if max_vcpu_val > 255:
> + max_vcpu_val = 255
> + else:
> + max_vcpu_val = self.conn.get().getMaxVcpus('kvm')
> + return max_vcpu_val
> +
> def _live_vm_update(self, dom, params):
> self._vm_update_access_metadata(dom, params)
> if 'memory' in params and dom.isActive():
> diff --git a/src/wok/plugins/kimchi/tests/test_rest.py b/src/wok/plugins/kimchi/tests/test_rest.py
> index 34348b8..89f5d31 100644
> --- a/src/wok/plugins/kimchi/tests/test_rest.py
> +++ b/src/wok/plugins/kimchi/tests/test_rest.py
> @@ -144,6 +144,10 @@ class RestTests(unittest.TestCase):
> vm = json.loads(self.request('/plugins/kimchi/vms/vm-1').read())
> self.assertEquals('vm-1', vm['name'])
>
> + req = json.dumps({'cpus': 3})
> + resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT')
> + self.assertEquals(200, resp.status)
> +
> resp = self.request('/plugins/kimchi/vms/vm-1/start', '{}', 'POST')
> self.assertEquals(200, resp.status)
>
> @@ -155,9 +159,10 @@ class RestTests(unittest.TestCase):
> resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT')
> self.assertEquals(400, resp.status)
>
> - req = json.dumps({'cpus': 3})
> + # Unable to do CPU hotplug
> + req = json.dumps({'cpus': 5})
> resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT')
> - self.assertEquals(200, resp.status)
> + self.assertEquals(400, resp.status)
>
> # Check if there is support to memory hotplug, once vm is running
> resp = self.request('/plugins/kimchi/config/capabilities').read()
> @@ -171,6 +176,7 @@ class RestTests(unittest.TestCase):
>
> req = json.dumps({"graphics": {'passwd': "abcdef"}})
> resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT')
> + self.assertEquals(200, resp.status)
> info = json.loads(resp.read())
> self.assertEquals('abcdef', info["graphics"]["passwd"])
> self.assertEquals(None, info["graphics"]["passwdValidTo"])
> diff --git a/src/wok/plugins/kimchi/vmtemplate.py b/src/wok/plugins/kimchi/vmtemplate.py
> index 69cc9b5..08adf4c 100644
> --- a/src/wok/plugins/kimchi/vmtemplate.py
> +++ b/src/wok/plugins/kimchi/vmtemplate.py
> @@ -316,7 +316,7 @@ class VMTemplate(object):
> cpu_info = self.info.get('cpu_info')
> if cpu_info is not None:
> cpu_topo = cpu_info.get('topology')
> - return get_cpu_xml(self.info.get('cpus'),
> + return get_cpu_xml(0,
> self.info.get('memory') << 10,
> cpu_topo)
>
> @@ -329,7 +329,6 @@ class VMTemplate(object):
> params['qemu-namespace'] = ''
> params['cdroms'] = ''
> params['qemu-stream-cmdline'] = ''
> - params['cpu_info'] = self._get_cpu_xml()
> params['disks'] = self._get_disks_xml(vm_uuid)
> params['serial'] = get_serial_xml(params)
>
> @@ -340,6 +339,8 @@ class VMTemplate(object):
> libvirt_stream_protocols = kwargs.get('libvirt_stream_protocols', [])
> cdrom_xml = self._get_cdrom_xml(libvirt_stream_protocols)
>
> + max_vcpus = kwargs.get('max_vcpus', 1)
> +
> if not urlparse.urlparse(self.info.get('cdrom', "")).scheme in \
> libvirt_stream_protocols and \
> params.get('iso_stream', False):
> @@ -362,6 +363,24 @@ class VMTemplate(object):
> if distro == "IBM_PowerKVM":
> params['slots'] = 32
>
> + cpu_topo = self.info.get('cpu_info').get('topology')
> + if (cpu_topo is not None):
> + sockets = int(max_vcpus / (cpu_topo['cores'] *
> + cpu_topo['threads']))
> + self.info['cpu_info']['topology']['sockets'] = sockets
> +
> + # Reduce maxvcpu to fit number of sockets if necessary
> + total_max_vcpu = sockets * cpu_topo['cores'] * cpu_topo['threads']
> + if total_max_vcpu != max_vcpus:
> + max_vcpus = total_max_vcpu
> +
> + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \
> + (params['cpus'], max_vcpus)
> + else:
> + params['vcpus'] = "<vcpu current='%s'>%d</vcpu>" % \
> + (params['cpus'], max_vcpus)
> + params['cpu_info'] = self._get_cpu_xml()
> +
> xml = """
> <domain type='%(domain)s'>
> %(qemu-stream-cmdline)s
> @@ -369,7 +388,7 @@ class VMTemplate(object):
> <uuid>%(uuid)s</uuid>
> <maxMemory slots='%(slots)s' unit='KiB'>%(max_memory)s</maxMemory>
> <memory unit='MiB'>%(memory)s</memory>
> - <vcpu>%(cpus)s</vcpu>
> + %(vcpus)s
> %(cpu_info)s
> <os>
> <type arch='%(arch)s'>hvm</type>
More information about the Kimchi-devel
mailing list