This patch changes the backend in order to allow user to edit max memory
tag. It changes the memory API to return and receive:
'memory': {'current': XXX, 'maxmemory': YYY}.
Other changes include:
- maxMemory xml tag is not created by default anymore;
- maxMemory xml tag is only created if user passes maxmemory;
- if memory and maxmemory are equal, than maxMemory tag is removed;
- keeps the limit to max memory and memory to 1TiB;
- adds tests to json and modify documentation;
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo(a)linux.vnet.ibm.com>
---
API.json | 22 +++++-
docs/API.md | 11 ++-
model/vms.py | 235 ++++++++++++++++++++++++++++++++++-------------------------
3 files changed, 161 insertions(+), 107 deletions(-)
diff --git a/API.json b/API.json
index fcde123..9f478e3 100644
--- a/API.json
+++ b/API.json
@@ -310,10 +310,24 @@
},
"cpu_info": { "$ref":
"#/kimchitype/cpu_info" },
"memory": {
- "description": "The new amount (MB) of memory for the
VM",
- "type": "integer",
- "minimum": 512,
- "error": "KCHTMPL0013E"
+ "description": "New guest memory and maximum memory
values",
+ "type": "object",
+ "properties": {
+ "current": {
+ "description": "The new amount (MB) of current
memory for the VM",
+ "type": "integer",
+ "minimum": 512,
+ "error": "KCHTMPL0013E"
+ },
+ "maxmemory": {
+ "description": "Maximum amount (MB) of memory
that a VM can reach when memory devices are live attached (memory hotplug)",
+ "type": "integer",
+ "minimum": 512,
+ "error": "KCHTMPL0013E"
+ }
+ },
+ "additionalProperties": false,
+ "error": "KCHTMPL0030E"
}
},
"additionalProperties": false
diff --git a/docs/API.md b/docs/API.md
index 33d51ac..7cb7952 100644
--- a/docs/API.md
+++ b/docs/API.md
@@ -116,7 +116,10 @@ server.
writes across all virtual disks (kb/s).
* io_throughput_peak: The highest recent value of 'io_throughput'.
* uuid: UUID of the VM.
- * memory: The amount of memory assigned to the VM (in MB)
+ * memory: The memory parameters of the VM in the unit of MiB.
+ * current: The amount of memory that is assigned to the VM.
+ * maxmemory: The maximum total of memory that the VM can have. Amount
+ over current will be used exclusively for memory hotplug
* cpu_info: CPU-specific information.
* vcpus: The number of CPUs assigned to the VM
* maxvcpus: The maximum number of CPUs that can be assigned to the VM
@@ -148,8 +151,10 @@ server.
* name: New name for this VM (only applied for shutoff VM)
* users: New list of system users.
* groups: New list of system groups.
- * memory: New amount of memory (MB) for this VM (if VM is running, new
- value will take effect in next reboot)
+ * memory: New memory parameters of the VM in the unit of MiB.
+ Provide one or both.
+ * current: New amount of memory that will be assigned to the VM.
+ * maxmemory: New maximum total of memory that the VM can have.
* graphics: A dict to show detail of VM graphics.
* passwd *(optional)*: console password. When omitted a random password
willbe generated.
diff --git a/model/vms.py b/model/vms.py
index 400ef46..6b3578f 100644
--- a/model/vms.py
+++ b/model/vms.py
@@ -97,6 +97,7 @@ XPATH_NAME = './name'
XPATH_NUMA_CELL = './cpu/numa/cell'
XPATH_TOPOLOGY = './cpu/topology'
XPATH_VCPU = './vcpu'
+XPATH_MAX_MEMORY = './maxMemory'
# key: VM name; value: lock object
vm_locks = {}
@@ -251,10 +252,12 @@ class VMModel(object):
vm_locks[name] = lock
with lock:
- # make sure memory is alingned in 256MiB in PowerPC
+ # make sure memory and Maxmemory are alingned in 256MiB in PowerPC
distro, _, _ = platform.linux_distribution()
if 'memory' in params and distro == "IBM_PowerKVM":
- if params['memory'] % PPC_MEM_ALIGN != 0:
+ memory = params['memory'].get('current')
+ maxmem = params['memory'].get('maxmemory')
+ if memory % PPC_MEM_ALIGN != 0:
raise InvalidParameter('KCHVM0071E',
{'param': "Memory",
'mem': str(memory),
@@ -736,8 +739,9 @@ class VMModel(object):
# Update name
name = params.get('name')
nonascii_name = None
+ state = DOM_STATE_MAP[dom.info()[0]]
+
if name is not None:
- state = DOM_STATE_MAP[dom.info()[0]]
if state != 'shutoff':
msg_args = {'name': vm_name, 'new_name':
params['name']}
raise InvalidParameter("KCHVM0003E", msg_args)
@@ -775,8 +779,13 @@ class VMModel(object):
# topology is being undefined: remove it
new_xml = xml_item_remove(new_xml, XPATH_TOPOLOGY)
+ # You can only change <maxMemory> offline, updating guest XML
+ if ("memory" in params) and ('maxmemory' in
params['memory']) and\
+ (state != 'shutoff'):
+ raise InvalidParameter("KCHVM0077E")
+
# Updating memory and NUMA if necessary, if vm is offline
- if 'memory' in params and not dom.isActive():
+ if not dom.isActive() and 'memory' in params:
new_xml = self._update_memory_config(new_xml, params)
if 'graphics' in params:
@@ -816,103 +825,122 @@ class VMModel(object):
return (nonascii_name if nonascii_name is not None else vm_name, dom)
def _update_memory_config(self, xml, params):
- # Checks if NUMA memory is already configured, if not, checks if CPU
- # element is already configured (topology). Then add NUMA element as
- # apropriated
root = ET.fromstring(xml)
- numa_mem = xpath_get_text(xml, XPATH_NUMA_CELL + '/@memory')
- maxvcpus = params.get('cpu_info', {}).get('maxvcpus')
- if numa_mem == []:
- if maxvcpus is None:
- maxvcpus = int(xpath_get_text(xml, XPATH_VCPU)[0])
- cpu = root.find('./cpu')
- if cpu is None:
- cpu = get_cpu_xml(0, params['memory'] << 10)
- root.insert(0, ET.fromstring(cpu))
+ # MiB to KiB
+ hostMem = self.conn.get().getInfo()[1] << 10
+ newMem = (params['memory'].get('current', 0)) << 10
+ newMaxMem = (params['memory'].get('maxmemory', 0)) << 10
+
+ if (not newMem):
+ newMem = int(xpath_get_text(xml, XPATH_DOMAIN_MEMORY)[0])
+
+ # Check if the host supports max memory amount requested
+ if newMaxMem > MAX_MEM_LIM:
+ raise InvalidParameter('KCHVM0076E')
+ elif newMaxMem > hostMem:
+ raise InvalidParameter('KCHVM0075E',
+ {'memHost': str(hostMem)})
+
+ def _get_slots(mem, maxMem):
+ slots = (maxMem - mem) >> 10 >> 10
+ # Libvirt does not accepts slots <= 1
+ if slots < 0:
+ raise OperationFailed("KCHTMPL0031E",
+ {'mem': str(mem >> 10),
+ 'maxmem': str(maxMem >> 10)})
+ elif slots == 0:
+ slots = 1
+
+ # max 32 slots on Power
+ distro, _, _ = platform.linux_distribution()
+ if distro == "IBM_PowerKVM" and slots > 32:
+ slots = 32
+ return slots
+ # End of _get_slots
+
+ # There is an issue in Libvirt/Qemu, where Guest does not start if
+ # memory and max memory are the same. So we decided to remove max
+ # memory and only add it if user explicitly provides it, willing to
+ # do memory hotplug
+ maxMemTag = root.find('.maxMemory')
+ if newMaxMem:
+ # Conditions:
+ if (maxMemTag is None) and (newMem != newMaxMem):
+ # Creates the maxMemory tag
+ slots = _get_slots(newMem, newMaxMem)
+ max_mem_xml = E.maxMemory(
+ str(newMaxMem),
+ unit='Kib',
+ slots=str(slots))
+ root.insert(0, max_mem_xml)
+ elif (maxMemTag is None) and (newMem == newMaxMem):
+ # Nothing to do
+ pass
+ elif (maxMemTag is not None) and (newMem != newMaxMem):
+ # Just update value in max memory tag
+ maxMemTag.text = str(newMaxMem)
+ maxMemTag.set('slots', str(_get_slots(newMem, newMaxMem)))
+ elif (maxMemTag is not None) and (newMem == newMaxMem):
+ # Remove the tag
+ root.remove(maxMemTag)
+
+ # Update memory, if necessary
+ if 'current' in params['memory']:
+ # Remove currentMemory, automatically set later by libvirt, with
+ # memory value
+ currentMem = root.find('.currentMemory')
+ if currentMem is not None:
+ root.remove(currentMem)
+
+ memory = root.find('.memory')
+ # If host/guest does not support memory hot plug, then there is
+ # NUMA configure, we must update the tag directly
+ if not self.caps.mem_hotplug_support:
+ if memory is not None:
+ memory.text = str(newMem)
else:
- numa_element = get_numa_xml(0, params['memory'] << 10)
- cpu.insert(0, ET.fromstring(numa_element))
+ if memory is not None:
+ # Libvirt is going to set the value automatically with
+ # the value configured in NUMA tag
+ root.remove(memory)
+
+ if maxMemTag is not None:
+ if ((not newMaxMem) and (newMem == int(maxMemTag.text))):
+ root.remove(maxMemTag)
+ elif ((not newMaxMem) and (newMem != int(maxMemTag.text))):
+ maxMemTag.set('slots',
+ str(_get_slots(newMem, int(maxMemTag.text))))
+
+ # Set NUMA parameterers and create it if does not exist
+ # CPU tag DO NOT exist? Create it
+ cpu = root.find('./cpu')
+ if cpu is None:
+ cpu = get_cpu_xml(0, newMem)
+ root.insert(0, ET.fromstring(cpu))
else:
- if maxvcpus is not None:
- xml = xml_item_update(
- xml,
- XPATH_NUMA_CELL,
- value='0',
- attr='cpus')
- root = ET.fromstring(xml_item_update(xml, XPATH_NUMA_CELL,
- str(params['memory'] <<
10),
- attr='memory'))
-
- # Remove currentMemory, automatically set later by libvirt
- currentMem = root.find('.currentMemory')
- if currentMem is not None:
- root.remove(currentMem)
-
- memory = root.find('.memory')
- # Update/Adds maxMemory accordingly
- if not self.caps.mem_hotplug_support:
- if memory is not None:
- memory.text = str(params['memory'] << 10)
- else:
- if memory is not None:
- root.remove(memory)
-
- def _get_slots(maxMem):
- slots = (maxMem - params['memory']) >> 10
- # Libvirt does not accepts slots <= 1
- if slots < 0:
- raise OperationFailed("KCHVM0041E",
- {'maxmem': str(maxMem)})
- elif slots == 0:
- slots = 1
-
- distro, _, _ = platform.linux_distribution()
- if distro == "IBM_PowerKVM":
- # max 32 slots on Power
- if slots > 32:
- slots = 32
- return slots
- # End of _get_slots
-
- def _get_newMaxMem():
- # Setting max memory to 4x memory requested, host total memory,
- # or 1 TB. This should avoid problems with live migration
- newMaxMem = MAX_MEM_LIM
- hostMem = self.conn.get().getInfo()[1] << 10
- if hostMem < newMaxMem:
- newMaxMem = hostMem
- mem = params.get('memory', 0)
- if (mem != 0) and (((mem * 4) << 10) < newMaxMem):
- newMaxMem = (mem * 4) << 10
-
- distro, _, _ = platform.linux_distribution()
- if distro == "IBM_PowerKVM":
- # max memory 256MiB alignment
- newMaxMem -= (newMaxMem % 256)
- return newMaxMem
-
- maxMem = root.find('.maxMemory')
- if maxMem is not None:
- root.remove(maxMem)
-
- # Setting maxMemory
- newMaxMem = _get_newMaxMem()
- slots = _get_slots(newMaxMem >> 10)
- max_mem_xml = E.maxMemory(
- str(newMaxMem),
- unit='Kib',
- slots=str(slots))
- root.insert(0, max_mem_xml)
-
- # Setting memory hard limit to max_memory + 1GiB
- memtune = root.find('memtune')
- if memtune is not None:
- hl = memtune.find('hard_limit')
- if hl is not None:
- memtune.remove(hl)
- memtune.insert(0, E.hard_limit(str(newMaxMem + 1048576),
- unit='Kib'))
-
+ # Does NUMA tag exist ?
+ numaTag = cpu.find('./numa')
+ if numaTag is None:
+ numa_element = get_numa_xml(0, newMem)
+ cpu.insert(0, ET.fromstring(numa_element))
+ else:
+ cellTag = cpu.find('./numa/cell')
+ cellTag.set('memory', str(newMem))
+
+ # Setting memory hard limit to max_memory + 1GiB
+ memtune = root.find('memtune')
+ if memtune is not None:
+ hl = memtune.find('hard_limit')
+ if hl is not None:
+ memtune.remove(hl)
+ memLimit = newMaxMem
+ if (not memLimit):
+ try:
+ memLimit = int(maxMemTag.text)
+ except:
+ memLimit = newMem
+ memtune.insert(0, E.hard_limit(str(memLimit + 1048576),
+ unit='Kib'))
return ET.tostring(root, encoding="utf-8")
def _update_cpu_info(self, new_xml, dom, new_info):
@@ -965,7 +993,7 @@ class VMModel(object):
raise OperationFailed('KCHVM0042E', {'name': dom.name()})
# Memory live update must be done in chunks of 1024 Mib or 1Gib
- new_mem = params['memory']
+ new_mem = params['memory']['current']
old_mem = int(xpath_get_text(xml, XPATH_DOMAIN_MEMORY)[0]) >> 10
if new_mem < old_mem:
raise OperationFailed('KCHVM0043E')
@@ -1195,11 +1223,18 @@ class VMModel(object):
proc.join(1)
self._serial_procs.remove(proc)
+ # Get max memory, or return "memory" if not set
+ maxmemory = xpath_get_text(xml, XPATH_MAX_MEMORY)
+ if len(maxmemory) > 0:
+ maxmemory = convert_data_size(maxmemory[0], 'KiB', 'MiB')
+ else:
+ maxmemory = memory
+
return {'name': name,
'state': state,
'stats': res,
'uuid': dom.UUIDString(),
- 'memory': memory,
+ 'memory': {'current': memory, 'maxmemory':
maxmemory},
'cpu_info': cpu_info,
'screenshot': screenshot,
'icon': icon,
--
2.1.0