[PATCH] Multiple changes in guest maxMemory management
by Rodrigo Trujillo
This patch includes 3 changes:
- Set max_memory limit to 1TB
- Set max_memory to 4*Memory
- Set memory "hard_limit" tag to (maxMemory + 1Gib)
All changes are related to issues found with memory in PCI passthrough
or during Live Migration tests.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo(a)linux.vnet.ibm.com>
---
model/vms.py | 98 ++++++++++++++++++++++++++++++++++-------------------------
osinfo.py | 11 ++++++-
vmtemplate.py | 13 ++++++++
3 files changed, 79 insertions(+), 43 deletions(-)
diff --git a/model/vms.py b/model/vms.py
index 4835adb..47dd0ce 100644
--- a/model/vms.py
+++ b/model/vms.py
@@ -57,6 +57,7 @@ from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name, get_vm_name
from wok.plugins.kimchi.model.utils import get_metadata_node
from wok.plugins.kimchi.model.utils import remove_metadata_node
from wok.plugins.kimchi.model.utils import set_metadata_node
+from wok.plugins.kimchi.osinfo import MAX_MEM_LIM
from wok.plugins.kimchi.screenshot import VMScreenshot
from wok.plugins.kimchi.utils import template_name_from_uri
from wok.plugins.kimchi.xmlutils.cpu import get_cpu_xml, get_numa_xml
@@ -881,49 +882,62 @@ class VMModel(object):
else:
if memory is not None:
root.remove(memory)
+
+ def _get_slots(maxMem):
+ slots = (maxMem - params['memory']) >> 10
+ # Libvirt does not accepts slots <= 1
+ if slots < 0:
+ raise OperationFailed("KCHVM0041E")
+ elif slots == 0:
+ slots = 1
+
+ distro, _, _ = platform.linux_distribution()
+ if distro == "IBM_PowerKVM":
+ # max 32 slots on Power
+ if slots > 32:
+ slots = 32
+ return slots
+ # End of _get_slots
+
+ def _get_newMaxMem():
+ # Setting max memory to 4x memory requested, host total memory,
+ # or 1 TB. This should avoid problems with live migration
+ newMaxMem = MAX_MEM_LIM
+ hostMem = self.conn.get().getInfo()[1] << 10
+ if hostMem < newMaxMem:
+ newMaxMem = hostMem
+ mem = params.get('memory', 0)
+ if (mem != 0) and (((mem * 4) << 10) < newMaxMem):
+ newMaxMem = (mem * 4) << 10
+
+ distro, _, _ = platform.linux_distribution()
+ if distro == "IBM_PowerKVM":
+ # max memory 256MiB alignment
+ newMaxMem -= (newMaxMem % 256)
+ return newMaxMem
+
maxMem = root.find('.maxMemory')
- host_mem = self.conn.get().getInfo()[1]
- slots = (host_mem - params['memory']) >> 10
- # Libvirt does not accepts slots <= 1
- if slots < 0:
- raise OperationFailed("KCHVM0041E")
- elif slots == 0:
- slots = 1
-
- force_max_mem_update = False
- distro, _, _ = platform.linux_distribution()
- if distro == "IBM_PowerKVM":
- # max memory 256MiB alignment
- host_mem -= (host_mem % PPC_MEM_ALIGN)
- # force max memory update if it exists but it's wrong.
- if maxMem is not None and\
- int(maxMem.text) != (host_mem << 10):
- force_max_mem_update = True
-
- # max 32 slots on Power
- if slots > 32:
- slots = 32
-
- if maxMem is None:
- max_mem_xml = E.maxMemory(
- str(host_mem << 10),
- unit='Kib',
- slots=str(slots))
- root.insert(0, max_mem_xml)
- new_xml = ET.tostring(root, encoding="utf-8")
- else:
- # Update slots only
- new_xml = xml_item_update(ET.tostring(root, encoding="utf-8"),
- './maxMemory',
- str(slots),
- attr='slots')
-
- if force_max_mem_update:
- new_xml = xml_item_update(new_xml,
- './maxMemory',
- str(host_mem << 10))
-
- return new_xml
+ if maxMem is not None:
+ root.remove(maxMem)
+
+ # Setting maxMemory
+ newMaxMem = _get_newMaxMem()
+ slots = _get_slots(newMaxMem >> 10)
+ max_mem_xml = E.maxMemory(
+ str(newMaxMem),
+ unit='Kib',
+ slots=str(slots))
+ root.insert(0, max_mem_xml)
+
+ # Setting memory hard limit to max_memory + 1GiB
+ memtune = root.find('memtune')
+ if memtune is not None:
+ hl = memtune.find('hard_limit')
+ if hl is not None:
+ memtune.remove(hl)
+ memtune.insert(0, E.hard_limit(str(newMaxMem + 1048576),
+ unit='Kib'))
+
return ET.tostring(root, encoding="utf-8")
def _get_host_maxcpu(self):
diff --git a/osinfo.py b/osinfo.py
index 1891398..33757a3 100644
--- a/osinfo.py
+++ b/osinfo.py
@@ -88,6 +88,9 @@ modern_version_bases = {'x86': {'debian': '6.0', 'ubuntu': '7.10',
icon_available_distros = [icon[5:-4] for icon in glob.glob1('%s/images/'
% PluginPaths('kimchi').ui_dir, 'icon-*.png')]
+# Max memory 1TB, in KiB
+MAX_MEM_LIM = 1073741824
+
def _get_arch():
for arch, sub_archs in SUPPORTED_ARCHS.iteritems():
@@ -199,7 +202,9 @@ def lookup(distro, version):
params['os_version'] = version
arch = _get_arch()
- # Setting maxMemory of the VM, which will be equal total Host memory in Kib
+ # Setting maxMemory of the VM, which will be lesser value between:
+ # [ 1TB, (Template Memory * 4), Host Physical Memory.
+ # Here, we return 1TB or aligned Host Physical Memory
if hasattr(psutil, 'virtual_memory'):
params['max_memory'] = psutil.virtual_memory().total >> 10
else:
@@ -212,6 +217,10 @@ def lookup(distro, version):
alignment = params['max_memory'] % (PPC_MEM_ALIGN << 10)
params['max_memory'] -= alignment
+ # Setting limit to 1TB
+ if params['max_memory'] > MAX_MEM_LIM:
+ params['max_memory'] = MAX_MEM_LIM
+
if distro in modern_version_bases[arch]:
if LooseVersion(version) >= LooseVersion(
modern_version_bases[arch][distro]):
diff --git a/vmtemplate.py b/vmtemplate.py
index b90f221..b6e9431 100644
--- a/vmtemplate.py
+++ b/vmtemplate.py
@@ -352,6 +352,13 @@ class VMTemplate(object):
else:
params['cdroms'] = cdrom_xml
+ # In order to avoid problems with live migration, setting maxMemory of
+ # the VM, which will be lesser value between:
+ # [ 1TB, (Template Memory * 4), Host Physical Memory.
+ tmp_max_mem = (params['memory'] << 10) * 4
+ if tmp_max_mem < params['max_memory']:
+ params['max_memory'] = tmp_max_mem
+
# Setting maximum number of slots to avoid errors when hotplug memory
# Number of slots are the numbers of chunks of 1GB that fit inside
# the max_memory of the host minus memory assigned to the VM. It
@@ -367,6 +374,9 @@ class VMTemplate(object):
if distro == "IBM_PowerKVM":
params['slots'] = 32
+ # set a hard limit using max_memory + 1GiB
+ params['hard_limit'] = params['max_memory'] + (1024 << 10)
+
cpu_topo = self.info.get('cpu_info').get('topology')
if (cpu_topo is not None):
sockets = int(max_vcpus / (cpu_topo['cores'] *
@@ -390,6 +400,9 @@ class VMTemplate(object):
%(qemu-stream-cmdline)s
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
+ <memtune>
+ <hard_limit unit='KiB'>%(hard_limit)s</hard_limit>
+ </memtune>
<maxMemory slots='%(slots)s' unit='KiB'>%(max_memory)s</maxMemory>
<memory unit='MiB'>%(memory)s</memory>
%(vcpus)s
--
2.1.0
9 years
[Wok 0/2] RPM build fixes
by Lucio Correia
Lucio Correia (2):
Fix version error in RPM building
Remove unnecessary kimchi references
Makefile.am | 6 +-----
contrib/wok.spec.suse.in | 8 --------
2 files changed, 1 insertion(+), 13 deletions(-)
--
1.9.1
9 years
[PATCH] [Kimchi] Bugfix 778: Add volume freezes screen
by Socorro Stoppler
This patch fixes the screen freezing when adding a storage volume
via upload file. The problem was that the item being retrieved
from the array was the wrong one since the path has changed.
Socorro Stoppler (1):
Fix screen freezing when upload file for storage volume
ui/js/src/kimchi.storage_main.js | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--
1.9.1
9 years
[PATCH] Issue #753: Avoid store max_memory in Templates records
by Rodrigo Trujillo
When Kimchi creates a new template, it is recording the value of
max_memory. This value is related to host and guest memory assigned or
updated. If the value remains in objectstore, Kimchi will always use
it, what is wrong, since host total memory or guest memory can change,
and this should update max_memory value as well.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo(a)linux.vnet.ibm.com>
---
model/templates.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/model/templates.py b/model/templates.py
index 84cdd02..eb72ea1 100644
--- a/model/templates.py
+++ b/model/templates.py
@@ -95,6 +95,9 @@ class TemplatesModel(object):
with self.objstore as session:
if name in session.get_list('template'):
raise InvalidOperation("KCHTMPL0001E", {'name': name})
+ # It is not necessary to store max_memory in Template record
+ if 'max_memory' in t.info:
+ del t.info['max_memory']
session.store('template', name, t.info,
get_kimchi_version())
except InvalidOperation:
--
2.1.0
9 years
[PATCH] [Kimchi 0/2] Issue 793: Disable Libvirt hot-plugging support for multi-function adapters
by Jose Ricardo Ziviani
Libvirt does not support multi-function devices hot-plug (but it will). So this patchset will disable hot-plugging for such devices.
Jose Ricardo Ziviani (2):
Add multi-function field in PCI information
Disable hotplugging buttons of multi-function devices
model/host.py | 27 ++++++++++++++++++++++++++-
model/vmhostdevs.py | 3 ++-
ui/js/src/kimchi.guest_edit_main.js | 3 +++
3 files changed, 31 insertions(+), 2 deletions(-)
--
1.9.1
9 years
[PATCH 00/10] FVT testcases base framework
by archus@linux.vnet.ibm.com
From: Archana Singh <archus(a)linux.vnet.ibm.com>
Patch for adding FVT testcases base framework which can be used across plugins.
The changes are:
1) Added fvt package inside tests directory.
2) Added config file inside fvt package to have session details.
3) restapilib.py to have common classes/methods for REST API calls.
4) fvt_base.py, a base test class to take care of doing
common setup and treadown required for any fvt test cases like
creating/destroying session using config file and restapilib.py.
5) run_test.sh.in script to install all the dependencies and to run all FVT.
6) make file changes to have 'make check-fvt' for running all the FVT.
Archana Singh (10):
Package for functional verification testcases
Wok level config file to have sections required for functional
verification test common across plugins.
Lists all dependecies for fvt testcases.
Common classes/methods for API calls as per config file
configuration.
Base test class, takes care common actions required for any FVT
test cases like creating/destoring session, authorization using
wok level fvt config file, creating JSON validator and logging.
Install all the dependencies from requirements.txt in python
virtualenv and runs all the FVT test cases.
Makefile needed for build and run fvt.
Added fvt as subdirs and check-fvt to run fvt testcases using
make.
Added FVT Makefile in AC_CONFIG_FILES list.
Added check-fvt to run FVT testcases using make and venv dir to be
cleaned.
Makefile.am | 8 +-
configure.ac | 1 +
tests/Makefile.am | 6 +
tests/fvt/Makefile.am | 43 +++
tests/fvt/__init__.py | 18 ++
tests/fvt/config | 7 +
tests/fvt/fvt_base.py | 92 ++++++
tests/fvt/requirements.txt | 23 ++
tests/fvt/restapilib.py | 760 +++++++++++++++++++++++++++++++++++++++++++++
tests/fvt/run_tests.sh.in | 92 ++++++
10 files changed, 1048 insertions(+), 2 deletions(-)
create mode 100644 tests/fvt/Makefile.am
create mode 100644 tests/fvt/__init__.py
create mode 100644 tests/fvt/config
create mode 100644 tests/fvt/fvt_base.py
create mode 100644 tests/fvt/requirements.txt
create mode 100644 tests/fvt/restapilib.py
create mode 100755 tests/fvt/run_tests.sh.in
--
2.1.0
9 years
[PATCH] Adding Admin New Custom Backup History
by sguimaraes943@gmail.com
From: samhenri <samuel.guimaraes(a)eldorado.org.br>
Andre Teodoro (1):
Adding Admin New Custom Backup history
ui/css/src/modules/_administration.scss | 54 +++++++++++++++++++++++++++++++-
ui/js/host-admin.js | Bin 34874 -> 35478 bytes
ui/pages/host-admin.html.tmpl | 31 ++++++++++++------
3 files changed, 74 insertions(+), 11 deletions(-)
--
1.9.3
9 years
[PATCH 0/2] Media Queries and Repositories modals
by sguimaraes943@gmail.com
From: samhenri <samuel.guimaraes(a)eldorado.org.br>
This patch adds support for media queries in Gingerbase.
The second patch adds wok-radio and wok-checkboxes to the modal windows. It also fixes the "Cancel" button that wasn't working.
Regards,
Samuel
samhenri (2):
Adding Media Queries support to Gingerbase
Fixed Repository Add and Edit modals
ui/css/src/modules/_host.scss | 113 +++++++++++++++++++++++++++++++------
ui/pages/host-dashboard.html.tmpl | 50 ++++++++--------
ui/pages/repository-add.html.tmpl | 8 +--
ui/pages/repository-edit.html.tmpl | 17 +++---
4 files changed, 133 insertions(+), 55 deletions(-)
--
1.9.3
9 years
[PATCH] [Wok] Create utils method get_task_id()
by pvital@linux.vnet.ibm.com
From: Paulo Vital <pvital(a)linux.vnet.ibm.com>
The task_id, used to store the AsyncTasks on objectstore, is a global
variable. This patch creates a simple method to return the value of
this variable.
Signed-off-by: Paulo Vital <pvital(a)linux.vnet.ibm.com>
---
src/wok/utils.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/src/wok/utils.py b/src/wok/utils.py
index 74c521f..8c8973a 100644
--- a/src/wok/utils.py
+++ b/src/wok/utils.py
@@ -48,6 +48,11 @@ def get_next_task_id():
return task_id
+def get_task_id():
+ global task_id
+ return task_id
+
+
def add_task(target_uri, fn, objstore, opaque=None):
id = get_next_task_id()
AsyncTask(id, target_uri, fn, objstore, opaque)
--
2.5.0
9 years
[PATCH] [Wok] Create utils method get_task_id()
by pvital@linux.vnet.ibm.com
From: Paulo Vital <pvital(a)linux.vnet.ibm.com>
The task_id, used to store the AsyncTasks on objectstore, is a global
variable. This patch creates a simple method to return the value of
this variable.
Signed-off-by: Paulo Vital <pvital(a)linux.vnet.ibm.com>
---
src/wok/utils.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/src/wok/utils.py b/src/wok/utils.py
index 74c521f..8c8973a 100644
--- a/src/wok/utils.py
+++ b/src/wok/utils.py
@@ -48,6 +48,11 @@ def get_next_task_id():
return task_id
+def get_task_id():
+ global task_id
+ return task_id
+
+
def add_task(target_uri, fn, objstore, opaque=None):
id = get_next_task_id()
AsyncTask(id, target_uri, fn, objstore, opaque)
--
2.5.0
9 years