[Kimchi-devel] [PATCH 5/6 - V2] [Memory HotPlug] Fix VM offline memory update and fix slots assignment
Rodrigo Trujillo
rodrigo.trujillo at linux.vnet.ibm.com
Thu May 28 13:59:24 UTC 2015
This patch changes the current memory update process, the static update,
allowed when the guest is offline. Now, the update creates the new xml
elements that will allow to hotplug memory, if necessary (MAXMEMORY, CPU,
NUMA, NODE, etc.). It also introduce some tests to avoid errors if user
sets more memory than the allowed in the server.
The memory device slots count and assignment was changed to avoid erros,
like negative or zero slots. Slots counting will have the number updated
as the static memory is changed.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo at linux.vnet.ibm.com>
---
src/kimchi/i18n.py | 1 +
src/kimchi/model/vms.py | 87 ++++++++++++++++++++++++++++++++++++++----------
src/kimchi/vmtemplate.py | 11 ++++++
3 files changed, 82 insertions(+), 17 deletions(-)
diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py
index e6e00b8..1779597 100644
--- a/src/kimchi/i18n.py
+++ b/src/kimchi/i18n.py
@@ -111,6 +111,7 @@ messages = {
"KCHVM0038E": _("Unable to suspend VM '%(name)s'. Details: %(err)s"),
"KCHVM0039E": _("Cannot resume VM '%(name)s' because it is not paused."),
"KCHVM0040E": _("Unable to resume VM '%(name)s'. Details: %(err)s"),
+ "KCHVM0041E": _("Memory assigned is higher then the maximum allowed in the host."),
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."),
"KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."),
diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py
index dc7f91f..5834a65 100644
--- a/src/kimchi/model/vms.py
+++ b/src/kimchi/model/vms.py
@@ -59,8 +59,7 @@ DOM_STATE_MAP = {0: 'nostate',
7: 'pmsuspended'}
VM_STATIC_UPDATE_PARAMS = {'name': './name',
- 'cpus': './vcpu',
- 'memory': './memory'}
+ 'cpus': './vcpu'}
VM_LIVE_UPDATE_PARAMS = {}
XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file"
@@ -73,6 +72,8 @@ XPATH_DOMAIN_MEMORY = '/domain/memory'
XPATH_DOMAIN_MEMORY_UNIT = '/domain/memory/@unit'
XPATH_DOMAIN_UUID = '/domain/uuid'
+XPATH_NUMA_CELL = './cpu/numa/cell'
+
class VMsModel(object):
def __init__(self, **kargs):
@@ -95,12 +96,9 @@ class VMsModel(object):
vm_overrides['storagepool'] = pool_uri
vm_overrides['fc_host_support'] = self.caps.fc_host_support
- # Setting maxMemory and slots parameter values
- # getInfo memory value comes in MiB, so dividing by 1024 integer,
- # gives the interger maximum number of slots to use in chunks of
- # 1 GB
+ # Setting maxMemory of the VM, which will be equal the Host memory.
+ # Host memory comes in MiB, so transform in KiB
vm_overrides['max_memory'] = self.conn.get().getInfo()[1] * 1024
- vm_overrides['slots'] = self.conn.get().getInfo()[1] / 1024
t = TemplateModel.get_template(t_name, self.objstore, self.conn,
vm_overrides)
@@ -659,15 +657,15 @@ class VMModel(object):
for key, val in params.items():
if key in VM_STATIC_UPDATE_PARAMS:
- if key == 'memory':
- # Libvirt saves memory in KiB. Retrieved xml has memory
- # in KiB too, so new valeu must be in KiB here
- val = val * 1024
if type(val) == int:
val = str(val)
xpath = VM_STATIC_UPDATE_PARAMS[key]
new_xml = xml_item_update(new_xml, xpath, val)
+ # Updating memory and adds NUMA if necessary
+ if 'memory' in params:
+ new_xml = self._update_memory(new_xml, params)
+
if 'graphics' in params:
new_xml = self._update_graphics(dom, new_xml, params)
@@ -695,12 +693,7 @@ class VMModel(object):
# Undefine old vm, only if name is going to change
dom.undefine()
- root = ET.fromstring(new_xml)
- currentMem = root.find('.currentMemory')
- if currentMem is not None:
- root.remove(currentMem)
-
- dom = conn.defineXML(ET.tostring(root, encoding="utf-8"))
+ dom = conn.defineXML(new_xml)
if 'name' in params:
self._redefine_snapshots(dom, snapshots_info)
except libvirt.libvirtError as e:
@@ -712,6 +705,66 @@ class VMModel(object):
'err': e.get_error_message()})
return dom
+ def _update_memory(self, xml, params):
+ # Checks if NUMA memory is already configured, if not, checks if CPU
+ # element is already configured (topology). Then add NUMA element as
+ # apropriated
+ root = ET.fromstring(xml)
+ numa_mem = xpath_get_text(xml, XPATH_NUMA_CELL + '/@memory')
+ if numa_mem == []:
+ vcpus = xpath_get_text(xml, VM_STATIC_UPDATE_PARAMS['cpus'])
+ numa_element = E.numa(E.cell(
+ id='0',
+ cpus='0-' + str(vcpus - 1) if vcpus > 1 else '0',
+ memory=str(params['memory'] << 10),
+ unit='KiB'))
+ cpu = root.find('./cpu')
+ if cpu is None:
+ root.insert(0, E.cpu(numa_element))
+ else:
+ cpu.insert(0, numa_element)
+ else:
+ root = ET.fromstring(xml_item_update(xml, XPATH_NUMA_CELL,
+ str(params['memory'] << 10),
+ attr='memory'))
+
+ # Remove currentMemory, automatically set later by libvirt
+ currentMem = root.find('.currentMemory')
+ if currentMem is not None:
+ root.remove(currentMem)
+
+ memory = root.find('.memory')
+ # Update/Adds maxMemory accordingly
+ if not self.caps.mem_hotplug_support:
+ if memory is not None:
+ memory.text = str(params['memory'] << 10)
+ else:
+ if memory is not None:
+ root.remove(memory)
+ maxMem = root.find('.maxMemory')
+ host_mem = self.conn.get().getInfo()[1]
+ slots = (host_mem - params['memory']) / 1024
+ # Libvirt does not accepts slots <= 1
+ if slots < 0:
+ raise OperationFailed("KCHVM0041E")
+ elif slots == 0:
+ slots = 1
+ if maxMem is None:
+ max_mem_xml = E.maxMemory(
+ str(host_mem * 1024),
+ unit='Kib',
+ slots=str(slots))
+ root.insert(0, max_mem_xml)
+ new_xml = ET.tostring(root, encoding="utf-8")
+ else:
+ # Update slots only
+ new_xml = xml_item_update(ET.tostring(root, encoding="utf-8"),
+ './maxMemory',
+ str(slots),
+ attr='slots')
+ return new_xml
+ return ET.tostring(root, encoding="utf-8")
+
def _live_vm_update(self, dom, params):
self._vm_update_access_metadata(dom, params)
diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py
index a20098d..00de7c2 100644
--- a/src/kimchi/vmtemplate.py
+++ b/src/kimchi/vmtemplate.py
@@ -316,6 +316,17 @@ class VMTemplate(object):
else:
params['cdroms'] = cdrom_xml
+ # Setting maximum number of slots to avoid errors when hotplug memory
+ # Number of slots are the numbers of chunks of 1GB that fit inside
+ # the max_memory of the host minus memory assigned to the VM
+ params['slots'] = ((params['max_memory'] / 1024) -
+ params['memory']) / 1024
+
+ if params['slots'] < 0:
+ raise OperationFailed("KCHVM0041E")
+ elif params['slots'] == 0:
+ params['slots'] = 1
+
xml = """
<domain type='%(domain)s'>
%(qemu-stream-cmdline)s
--
2.1.0
More information about the Kimchi-devel
mailing list