[PATCH v2 0/5] Clone virtual machine

This is the difference between this and the previous patchset (v1): - The function "StorageVolumes.create(volume_path)" was rewritten as "StorageVolume.clone(pool, name)". - Patch 2 has been updated to fix the test error (introduced by a commit pushed later). - The function "get_next_clone_name" was moved to the module "kimchi.utils" and was generified (i.e. it can handle different resources other than VMs, such as storage volumes) Crístian Viana (5): Add model function to wait for task Clean up test pool directories Clone storage volume Clone virtual machine Add tests and mockmodel for the cloning feature docs/API.md | 10 ++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 5 + src/kimchi/mockmodel.py | 86 ++++++++++- src/kimchi/model/storagevolumes.py | 97 +++++++++++- src/kimchi/model/tasks.py | 28 ++++ src/kimchi/model/vms.py | 280 ++++++++++++++++++++++++++++++++++- src/kimchi/utils.py | 41 +++++ tests/test_model.py | 81 ++++++++-- tests/test_rest.py | 54 +++++++ 11 files changed, 669 insertions(+), 15 deletions(-) -- 1.9.3

Sometimes we need to wait for a task to finish. Instead of writing the waiting loop everytime, we can now use the function "wait". Add the function "TaskModel.wait" which waits for a task to finish for a maximum ammount of time (in seconds). If the task finishes before the timeout expires, the function returns successfully; otherwise, an exception is raised. Also, update calls to an already existing function which does the same thing in the test code, and replace its usages with this new function. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/i18n.py | 1 + src/kimchi/model/tasks.py | 28 ++++++++++++++++++++++++++++ tests/test_model.py | 19 +++++++++---------- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index f789259..10408bf 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -35,6 +35,7 @@ messages = { "KCHASYNC0001E": _("Datastore is not initiated in the model object."), "KCHASYNC0002E": _("Unable to start task due error: %(err)s"), + "KCHASYNC0003E": _("Timeout of %(seconds)s seconds expired while running task '%(task)s."), "KCHAUTH0001E": _("Authentication failed for user '%(username)s'. [Error code: %(code)s]"), "KCHAUTH0002E": _("You are not authorized to access Kimchi"), diff --git a/src/kimchi/model/tasks.py b/src/kimchi/model/tasks.py index f25bcbf..61bc2f3 100644 --- a/src/kimchi/model/tasks.py +++ b/src/kimchi/model/tasks.py @@ -18,6 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import time + +from kimchi.exception import TimeoutExpired + + class TasksModel(object): def __init__(self, **kargs): self.objstore = kargs['objstore'] @@ -34,3 +39,26 @@ class TaskModel(object): def lookup(self, id): with self.objstore as session: return session.get('task', str(id)) + + def wait(self, id, timeout=10): + """Wait for a task until it stops running (successfully or due to + an error). If the Task finishes its execution before <timeout>, this + function returns normally; otherwise an exception is raised. + + Parameters: + id -- The Task ID. + timeout -- The maximum time, in seconds, that this function should wait + for the Task. If the Task runs for more than <timeout>, + "TimeoutExpired" is raised. + """ + for i in range(0, timeout): + with self.objstore as session: + task = session.get('task', str(id)) + + if task['status'] != 'running': + return + + time.sleep(1) + + raise TimeoutExpired('KCHASYNC0003E', {'seconds': timeout, + 'task': task['target_uri']}) diff --git a/tests/test_model.py b/tests/test_model.py index 3177fd4..d8c904e 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -45,7 +45,6 @@ from kimchi.iscsi import TargetClient from kimchi.model import model from kimchi.rollbackcontext import RollbackContext from kimchi.utils import add_task -from utils import wait_task invalid_repository_urls = ['www.fedora.org', # missing protocol @@ -130,7 +129,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create('default', params)['id'] rollback.prependDefer(inst.storagevolume_delete, 'default', vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) vol_path = inst.storagevolume_lookup('default', vol)['path'] @@ -291,7 +290,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) vm_name = 'kimchi-cdrom' params = {'name': 'test', 'disks': [], 'cdrom': self.kimchi_iso} @@ -570,7 +569,7 @@ class ModelTests(unittest.TestCase): params['name'] = vol task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) fd, path = tempfile.mkstemp(dir=path) @@ -611,7 +610,7 @@ class ModelTests(unittest.TestCase): taskid = task_response['id'] vol_name = task_response['target_uri'].split('/')[-1] self.assertEquals('COPYING', vol_name) - wait_task(inst.task_lookup, taskid, timeout=60) + inst.task_wait(taskid, timeout=60) self.assertEquals('finished', inst.task_lookup(taskid)['status']) vol_path = os.path.join(args['path'], vol_name) self.assertTrue(os.path.isfile(vol_path)) @@ -1156,7 +1155,7 @@ class ModelTests(unittest.TestCase): inst = model.Model('test:///default', objstore_loc=self.tmp_store) taskid = add_task('', quick_op, inst.objstore, 'Hello') - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals(1, taskid) self.assertEquals('finished', inst.task_lookup(taskid)['status']) self.assertEquals('Hello', inst.task_lookup(taskid)['message']) @@ -1167,12 +1166,12 @@ class ModelTests(unittest.TestCase): self.assertEquals(2, taskid) self.assertEquals('running', inst.task_lookup(taskid)['status']) self.assertEquals('OK', inst.task_lookup(taskid)['message']) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('failed', inst.task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', inst.task_lookup(taskid)['message']) taskid = add_task('', abnormal_op, inst.objstore, {}) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('Exception raised', inst.task_lookup(taskid)['message']) self.assertEquals('failed', inst.task_lookup(taskid)['status']) @@ -1180,7 +1179,7 @@ class ModelTests(unittest.TestCase): taskid = add_task('', continuous_ops, inst.objstore, {'result': True}) self.assertEquals('running', inst.task_lookup(taskid)['status']) - wait_task(inst.task_lookup, taskid, timeout=10) + inst.task_wait(taskid, timeout=10) self.assertEquals('finished', inst.task_lookup(taskid)['status']) # This wrapper function is needed due to the new backend messaging in @@ -1286,7 +1285,7 @@ class ModelTests(unittest.TestCase): task = inst.debugreports_create({'name': reportName}) rollback.prependDefer(inst.debugreport_delete, tmp_name) taskid = task['id'] - wait_task(inst.task_lookup, taskid, timeout) + inst.task_wait(taskid, timeout) self.assertEquals('finished', inst.task_lookup(taskid)['status'], "It is not necessary an error. " -- 1.9.3

During tests, some storage pools are created locally and their directories are left behind in the system. The pool itself is removed from libvirt, but the directory remains. Remove the test storage pools directories after the tests are finished. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- tests/test_model.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_model.py b/tests/test_model.py index d8c904e..bd41c79 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -492,6 +492,8 @@ class ModelTests(unittest.TestCase): num = len(pools) + 1 inst.storagepools_create(poolDef) + if poolDef['type'] == 'dir': + rollback.prependDefer(shutil.rmtree, poolDef['path']) rollback.prependDefer(inst.storagepool_delete, name) pools = inst.storagepools_get_list() @@ -548,6 +550,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) self.assertRaises(InvalidOperation, inst.storagevolumes_get_list, @@ -657,6 +660,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) inst.template_update('test', params) @@ -1373,6 +1377,8 @@ class ModelTests(unittest.TestCase): 'path': '/tmp/kimchi-images', 'type': 'kimchi-iso'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, '/tmp/kimchi-images') + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_deactivate, args['name']) time.sleep(1) -- 1.9.3

In order to clone VMs, we will need to be able to clone storage volumes. Add a new method to clone a storage volume, e.g.: POST /storagepools/<pool-name>/storagevolumes/<volume-name>/clone {'pool': 'another-pool', 'name': 'new-name'} This method uses the parameters 'pool' and 'name' which specify the destination storage pool and the volume name. If 'pool' is ommited, the new volume will be cloned to the same pool as the original volume; if 'name' is ommited, a default value will be used. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 3 ++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/i18n.py | 1 + src/kimchi/mockmodel.py | 45 ++++++++++++++++- src/kimchi/model/storagevolumes.py | 97 +++++++++++++++++++++++++++++++++++- src/kimchi/utils.py | 41 +++++++++++++++ tests/test_model.py | 18 +++++++ tests/test_rest.py | 23 +++++++++ 8 files changed, 226 insertions(+), 3 deletions(-) diff --git a/docs/API.md b/docs/API.md index 9c06f85..b80dbe7 100644 --- a/docs/API.md +++ b/docs/API.md @@ -478,6 +478,9 @@ A interface represents available network interface on VM. * size: resize the total space which can be used to store data The unit is MBytes * wipe: Wipe a Storage Volume +* clone: Clone a Storage Volume. + * pool: The name of the destination pool (optional). + * name: The new storage volume name (optional). ### Collection: Interfaces diff --git a/src/kimchi/control/storagevolumes.py b/src/kimchi/control/storagevolumes.py index 79170ee..9f5fcea 100644 --- a/src/kimchi/control/storagevolumes.py +++ b/src/kimchi/control/storagevolumes.py @@ -47,6 +47,7 @@ class StorageVolume(Resource): self.uri_fmt = '/storagepools/%s/storagevolumes/%s' self.resize = self.generate_action_handler('resize', ['size']) self.wipe = self.generate_action_handler('wipe') + self.clone = self.generate_action_handler_task('clone') @property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 10408bf..2aa6d5e 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -207,6 +207,7 @@ messages = { "KCHVOL0020E": _("Storage volume capacity must be an integer number."), "KCHVOL0021E": _("Storage volume URL must be http://, https://, ftp:// or ftps://."), "KCHVOL0022E": _("Unable to access file %(url)s. Please, check it."), + "KCHVOL0023E": _("Unable to clone storage volume '%(name)s' in pool '%(pool)s'. Details: %(err)s"), "KCHIFACE0001E": _("Interface %(name)s does not exist"), diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 7163f8d..ffddb78 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -52,8 +52,8 @@ from kimchi.model.storageservers import STORAGE_SERVERS from kimchi.model.utils import get_vm_name from kimchi.objectstore import ObjectStore from kimchi.screenshot import VMScreenshot -from kimchi.utils import pool_name_from_uri, validate_repo_url -from kimchi.utils import template_name_from_uri +from kimchi.utils import get_next_clone_name, pool_name_from_uri +from kimchi.utils import validate_repo_url, template_name_from_uri from kimchi.vmtemplate import VMTemplate @@ -588,6 +588,47 @@ class MockModel(object): cb('OK', True) + def storagevolume_clone(self, pool, name, new_pool=None, new_name=None): + if new_name is None: + base, ext = os.path.splitext(name) + new_name = get_next_clone_name(self.vms_get_list(), base, ext) + + if new_pool is None: + new_pool = pool + + params = {'name': name, + 'pool': pool, + 'new_name': new_name, + 'new_pool': new_pool} + taskid = self.add_task('/storagepools/%s/storagevolumes/%s' % + (new_pool, new_name), + self._storagevolume_clone_task, params) + return self.task_lookup(taskid) + + def _storagevolume_clone_task(self, cb, params): + try: + vol_name = params['name'].decode('utf-8') + pool_name = params['pool'].decode('utf-8') + new_vol_name = params['new_name'].decode('utf-8') + new_pool_name = params['new_pool'].decode('utf-8') + + orig_pool = self._get_storagepool(pool_name) + orig_vol = self._get_storagevolume(pool_name, vol_name) + + new_vol = copy.deepcopy(orig_vol) + new_vol.info['name'] = new_vol_name + new_vol.info['path'] = os.path.join(orig_pool.info['path'], + new_vol_name) + + new_pool = self._get_storagepool(new_pool_name) + new_pool._volumes[new_vol_name] = new_vol + except (KeyError, NotFoundError), e: + raise OperationFailed('KCHVOL0023E', + {'name': vol_name, 'pool': pool_name, + 'err': e.message}) + + cb('OK', True) + def storagevolume_lookup(self, pool, name): if self._get_storagepool(pool).info['state'] != 'active': raise InvalidOperation("KCHVOL0005E", {'pool': pool, diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 9ff43e6..d610059 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -18,9 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import contextlib +import lxml.etree as ET import os import time import urllib2 +from lxml.builder import E import libvirt @@ -31,7 +33,7 @@ from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel from kimchi.model.tasks import TaskModel from kimchi.model.vms import VMsModel, VMModel -from kimchi.utils import add_task, kimchi_log +from kimchi.utils import add_task, get_next_clone_name, kimchi_log from kimchi.xmlutils.disk import get_vm_disk_info, get_vm_disks from kimchi.xmlutils.utils import xpath_get_text @@ -238,6 +240,8 @@ class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] + self.task = TaskModel(**kargs) + self.storagevolumes = StorageVolumesModel(**kargs) def _get_storagevolume(self, poolname, name): pool = StoragePoolModel.get_storagepool(poolname, self.conn) @@ -345,6 +349,97 @@ class StorageVolumeModel(object): raise OperationFailed("KCHVOL0011E", {'name': name, 'err': e.get_error_message()}) + def clone(self, pool, name, new_pool=None, new_name=None): + """Clone a storage volume. + + Arguments: + pool -- The name of the original pool. + name -- The name of the original volume. + new_pool -- The name of the destination pool (optional). If omitted, + the new volume will be created on the same pool as the + original one. + new_name -- The name of the new volume (optional). If omitted, a new + value based on the original volume's name will be used. + + Return: + A Task running the clone operation. + """ + pool = pool.decode('utf-8') + name = name.decode('utf-8') + + # the same pool will be used if no pool is specified + if new_pool is None: + new_pool = pool + + # a default name based on the original name will be used if no name + # is specified + if new_name is None: + base, ext = os.path.splitext(name) + new_name = get_next_clone_name(self.storagevolumes.get_list(pool), + base, ext) + + params = {'pool': pool, + 'name': name, + 'new_pool': new_pool, + 'new_name': new_name} + taskid = add_task(u'/storagepools/%s/storagevolumes/%s' % + (pool, new_name), self._clone_task, self.objstore, + params) + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + This function copies all the data inside the original volume into the + new one. + + Arguments: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "pool": The name of the original pool. + "name": The name of the original volume. + "new_pool": The name of the destination pool. + "new_name": The name of the new volume. + """ + orig_pool_name = params['pool'].decode('utf-8') + orig_vol_name = params['name'].decode('utf-8') + new_pool_name = params['new_pool'].decode('utf-8') + new_vol_name = params['new_name'].decode('utf-8') + + try: + cb('setting up volume cloning') + orig_vir_vol = self._get_storagevolume(orig_pool_name, + orig_vol_name) + orig_vol = self.lookup(orig_pool_name, orig_vol_name) + new_vir_pool = StoragePoolModel.get_storagepool(new_pool_name, + self.conn) + + cb('building volume XML') + root_elem = E.volume() + root_elem.append(E.name(new_vol_name)) + root_elem.append(E.capacity(unicode(orig_vol['capacity']), + unit='bytes')) + target_elem = E.target() + target_elem.append(E.format(type=orig_vol['format'])) + root_elem.append(target_elem) + new_vol_xml = ET.tostring(root_elem, encoding='utf-8', + pretty_print=True) + + cb('cloning volume') + new_vir_pool.createXMLFrom(new_vol_xml, orig_vir_vol, 0) + except (InvalidOperation, NotFoundError, libvirt.libvirtError), e: + raise OperationFailed('KCHVOL0023E', + {'name': orig_vol_name, + 'pool': orig_pool_name, + 'err': e.get_error_message()}) + + cb('adding volume to the object store') + new_vol_id = '%s:%s' % (new_pool_name, new_vol_name) + with self.objstore as session: + session.store('storagevolume', new_vol_id, {'ref_cnt': 0}) + + cb('OK', True) + class IsoVolumesModel(object): def __init__(self, **kargs): diff --git a/src/kimchi/utils.py b/src/kimchi/utils.py index 0977b9f..68415dc 100644 --- a/src/kimchi/utils.py +++ b/src/kimchi/utils.py @@ -310,3 +310,44 @@ def validate_repo_url(url): raise InvalidParameter("KCHUTILS0001E", {'uri': url}) else: raise InvalidParameter("KCHREPOS0002E") + + +def get_next_clone_name(all_names, basename, name_suffix=''): + """Find the next available name for a cloned resource. + + If any resource named "<basename>-clone-<number><name_suffix>" is found + in "all_names", use the maximum "number" + 1; else, use 1. + + Arguments: + all_names -- All existing names for the resource type. This list will + be used to make sure the new name won't conflict with + existing names. + basename -- The name of the original resource. + name_suffix -- The resource name suffix (optional). This parameter + exist so that a resource named "foo.img" gets the name + "foo-clone-1.img" instead of "foo.img-clone-1". If this parameter + is used, the suffix should not be present in "basename". + + Return: + A UTF-8 string in the format "<basename>-clone-<number><name_suffix>". + """ + re_group_num = 'num' + + re_expr = u'%s-clone-(?P<%s>\d+)' % (basename, re_group_num) + if name_suffix != '': + re_expr = u'%s-%s' % (re_expr, name_suffix) + + max_num = 0 + re_compiled = re.compile(re_expr) + + for n in all_names: + match = re_compiled.match(n) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + # increments the maximum "clone number" found + new_name = u'%s-clone-%d' % (basename, max_num + 1) + if name_suffix != '': + new_name = new_name + name_suffix + + return new_name diff --git a/tests/test_model.py b/tests/test_model.py index bd41c79..32777b1 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -623,6 +623,24 @@ class ModelTests(unittest.TestCase): cp_content = cp_file.read() self.assertEquals(vol_content, cp_content) + # clone the volume created above + task = inst.storagevolume_clone(pool, vol_name) + taskid = task['id'] + cloned_vol_name = task['target_uri'].split('/')[-1] + inst.task_wait(taskid) + self.assertEquals('finished', inst.task_lookup(taskid)['status']) + rollback.prependDefer(inst.storagevolume_delete, pool, + cloned_vol_name) + + orig_vol = inst.storagevolume_lookup(pool, vol_name) + cloned_vol = inst.storagevolume_lookup(pool, cloned_vol_name) + + self.assertNotEquals(orig_vol['path'], cloned_vol['path']) + del orig_vol['path'] + del cloned_vol['path'] + + self.assertEquals(orig_vol, cloned_vol) + @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_storage_customise(self): inst = model.Model(objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 9bc930f..3cf9e2b 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -1047,6 +1047,29 @@ class RestTests(unittest.TestCase): resp = self.request('/storagepools/pool-1/storagevolumes/%s' % vol_name, '{}', 'GET') self.assertEquals(200, resp.status) + vol = json.loads(resp.read()) + + # clone the volume created above + resp = self.request('/storagepools/pool-1/storagevolumes/%s/clone' % + vol_name, {}, 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + cloned_vol_name = task['target_uri'].split('/')[-1] + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id']).read()) + self.assertEquals('finished', task['status']) + resp = self.request('/storagepools/pool-1/storagevolumes/%s' % + cloned_vol_name, '{}', 'GET') + self.assertEquals(200, resp.status) + cloned_vol = json.loads(resp.read()) + + self.assertNotEquals(vol['name'], cloned_vol['name']) + del vol['name'] + del cloned_vol['name'] + self.assertNotEquals(vol['path'], cloned_vol['path']) + del vol['path'] + del cloned_vol['path'] + self.assertEquals(vol, cloned_vol) # Now remove the StoragePool from mock model self._delete_pool('pool-1') -- 1.9.3

The new command "POST /vms/<vm-name>/clone" will create a new virtual machine based on <vm-name>. The new VM will have the exact same settings, except the name, UUID, MAC address and disk paths; those values will be generated automatically. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 7 ++ src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 3 + src/kimchi/model/vms.py | 280 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 289 insertions(+), 2 deletions(-) diff --git a/docs/API.md b/docs/API.md index b80dbe7..9b866f3 100644 --- a/docs/API.md +++ b/docs/API.md @@ -133,6 +133,13 @@ the following general conventions: risk of data loss caused by reset without the guest OS shutdown. * connect: Prepare the connection for spice or vnc +* clone: Create a new VM identical to this VM. The new VM's name, UUID and + network MAC addresses will be generated automatically. Each existing + disks will be copied to a new volume in the same storage pool. If + there is no available space on that storage pool to hold the new + volume, it will be created on the pool 'default'. This action returns + a Task. + ### Sub-resource: Virtual Machine Screenshot **URI:** /vms/*:name*/screenshot diff --git a/src/kimchi/control/vms.py b/src/kimchi/control/vms.py index 88d8a81..a1589ef 100644 --- a/src/kimchi/control/vms.py +++ b/src/kimchi/control/vms.py @@ -46,6 +46,7 @@ class VM(Resource): self.shutdown = self.generate_action_handler('shutdown') self.reset = self.generate_action_handler('reset') self.connect = self.generate_action_handler('connect') + self.clone = self.generate_action_handler_task('clone') @property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 2aa6d5e..e823f2b 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -102,6 +102,9 @@ messages = { "KCHVM0030E": _("Unable to get access metadata of virtual machine %(name)s. Details: %(err)s"), "KCHVM0031E": _("The guest console password must be a string."), "KCHVM0032E": _("The life time for the guest console password must be a number."), + "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), + "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"), + "KCHVM0035E": _("Unable to clone VM '%(name)s'. Details: %(err)s"), "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index f2e4ae3..8c1f581 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -30,17 +30,20 @@ from xml.etree import ElementTree import libvirt from cherrypy.process.plugins import BackgroundTask -from kimchi import vnc +from kimchi import model, vnc from kimchi.config import READONLY_POOL_TYPE from kimchi.exception import InvalidOperation, InvalidParameter from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.config import CapabilitiesModel +from kimchi.model.tasks import TaskModel from kimchi.model.templates import TemplateModel from kimchi.model.utils import get_vm_name from kimchi.model.utils import get_metadata_node from kimchi.model.utils import set_metadata_node +from kimchi.rollbackcontext import RollbackContext from kimchi.screenshot import VMScreenshot -from kimchi.utils import import_class, kimchi_log, run_setfacl_set_attr +from kimchi.utils import add_task, get_next_clone_name, import_class +from kimchi.utils import kimchi_log, run_setfacl_set_attr from kimchi.utils import template_name_from_uri from kimchi.xmlutils.utils import xpath_get_text, xml_item_update @@ -63,6 +66,15 @@ VM_LIVE_UPDATE_PARAMS = {} stats = {} +XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" +XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" +XPATH_DOMAIN_NAME = '/domain/name' +XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address" +XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ + "mac[@address='%s']" +XPATH_DOMAIN_UUID = '/domain/uuid' + + class VMsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] @@ -251,6 +263,11 @@ class VMModel(object): self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class('kimchi.model.host.UsersModel')(**kargs) self.groups = import_class('kimchi.model.host.GroupsModel')(**kargs) + self.vms = VMsModel(**kargs) + self.task = TaskModel(**kargs) + self.storagepool = model.storagepools.StoragePoolModel(**kargs) + self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) + self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs) def update(self, name, params): dom = self.get_vm(name, self.conn) @@ -258,6 +275,265 @@ class VMModel(object): self._live_vm_update(dom, params) return dom.name().decode('utf-8') + def clone(self, name): + """Clone a virtual machine based on an existing one. + + The new virtual machine will have the exact same configuration as the + original VM, except for the name, UUID, MAC addresses and disks. The + name will have the form "<name>-clone-<number>", with <number> starting + at 1; the UUID will be generated randomly; the MAC addresses will be + generated randomly with no conflicts within the original and the new + VM; and the disks will be new volumes [mostly] on the same storage + pool, with the same content as the original disks. The storage pool + 'default' will always be used when cloning SCSI and iSCSI disks and + when the original storage pool cannot hold the new volume. + + An exception will be raised if the virtual machine <name> is not + shutoff, if there is no available space to copy a new volume to the + storage pool 'default' (when there was also no space to copy it to the + original storage pool) and if one of the virtual machine's disks belong + to a storage pool not supported by Kimchi. + + Parameters: + name -- The name of the existing virtual machine to be cloned. + + Return: + A Task running the clone operation. + """ + name = name.decode('utf-8') + + # VM must be shutoff in order to clone it + info = self.lookup(name) + if info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + # this name will be used as the Task's 'target_uri' so it needs to be + # defined now. + new_name = get_next_clone_name(self.vms.get_list(), name) + + # create a task with the actual clone function + taskid = add_task(u'/vms/%s' % new_name, self._clone_task, + self.objstore, + {'name': name, 'new_name': new_name}) + + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + Parameters: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "name": the name of the original VM. + "new_name": the name of the new VM. + """ + name = params['name'] + new_name = params['new_name'] + vir_conn = self.conn.get() + + # fetch base XML + cb('reading source VM XML') + try: + vir_dom = vir_conn.lookupByName(name) + flags = libvirt.VIR_DOMAIN_XML_SECURE + xml = vir_dom.XMLDesc(flags).decode('utf-8') + except libvirt.libvirtError, e: + raise OperationFailed('KCHVM0035E', {'name': name, + 'err': e.message}) + + # update UUID + cb('updating VM UUID') + old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + new_uuid = unicode(uuid.uuid4()) + xml = xml_item_update(xml, './uuid', new_uuid) + + # update MAC addresses + cb('updating VM MAC addresses') + xml = self._clone_update_mac_addresses(xml) + + with RollbackContext() as rollback: + # copy disks + cb('copying VM disks') + xml = self._clone_update_disks(xml, rollback) + + # update objstore entry + cb('updating object store') + self._clone_update_objstore(old_uuid, new_uuid, rollback) + + # update name + cb('updating VM name') + xml = xml_item_update(xml, './name', new_name) + + # create new guest + cb('defining new VM') + try: + vir_conn.defineXML(xml) + except libvirt.libvirtError, e: + raise OperationFailed('KCHVM0035E', {'name': name, + 'err': e.message}) + + rollback.commitAll() + + cb('OK', True) + + @staticmethod + def _clone_update_mac_addresses(xml): + """Update the MAC addresses with new values in the XML descriptor of a + cloning domain. + + The new MAC addresses will be generated randomly, and their values are + guaranteed to be distinct from the ones in the original VM. + + Arguments: + xml -- The XML descriptor of the original domain. + + Return: + The XML descriptor <xml> with the new MAC addresses instead of the + old ones. + """ + old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC) + new_macs = [] + + for mac in old_macs: + while True: + new_mac = model.vmifaces.VMIfacesModel.random_mac() + # make sure the new MAC doesn't conflict with the original VM + # and with the new values on the new VM. + if new_mac not in (old_macs + new_macs): + new_macs.append(new_mac) + break + + xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, + new_mac, 'address') + + return xml + + def _clone_update_disks(self, xml, rollback): + """Clone disks from a virtual machine. The disks are copied as new + volumes and the new VM's XML is updated accordingly. + + Arguments: + xml -- The XML descriptor of the original VM + new value for + "/domain/uuid". + rollback -- A rollback context so the new volumes can be removed if an + error occurs during the cloning operation. + + Return: + The XML descriptor <xml> with the new disk paths instead of the + old ones. + """ + # the UUID will be used to create the disk paths + uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) + + vir_conn = self.conn.get() + + for i, path in enumerate(all_paths): + try: + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() + + orig_pool_name = vir_pool.name().decode('utf-8') + orig_vol_name = vir_orig_vol.name().decode('utf-8') + except libvirt.libvirtError, e: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise OperationFailed('KCHVM0035E', {'name': domain_name, + 'err': e.message}) + + orig_pool = self.storagepool.lookup(orig_pool_name) + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_pool_name = orig_pool_name + new_pool = orig_pool + + if orig_pool['type'] in ['dir', 'netfs', 'logical']: + # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name}) + + elif orig_pool['type'] in ['scsi', 'iscsi']: + # SCSI and iSCSI always fall back to the storage pool 'default' + kimchi_log.warning('cannot create new volume for clone in ' + 'storage pool \'%s\'; falling back to ' + '\'default\'', orig_pool_name) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # if the pool 'default' cannot hold a new volume, raise + # an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', {'name': domain_name}) + + else: + # unexpected storage pool type + raise InvalidOperation('KCHPOOL0014E', + {'type': orig_pool['type']}) + + # new volume name: <UUID>-<loop-index>.<original extension> + # e.g. 1234-5678-9012-3456-0.img + ext = os.path.splitext(path)[1] + new_vol_name = u'%s-%d%s' % (uuid, i, ext) + task = self.storagevolume.clone(orig_pool_name, orig_vol_name, + new_name=new_vol_name) + self.task.wait(task['id'], 3600) # 1 h + + # get the new volume path and update the XML descriptor + new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) + xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, + new_vol['path'], 'file') + + # remove the new volume should an error occur later + rollback.prependDefer(self.storagevolume.delete, new_pool_name, + new_vol_name) + + return xml + + def _clone_update_objstore(self, old_uuid, new_uuid, rollback): + """Update Kimchi's object store with the cloning VM. + + Arguments: + old_uuid -- The UUID of the original VM. + new_uuid -- The UUID of the new, clonning VM. + rollback -- A rollback context so the object store entry can be removed + if an error occurs during the cloning operation. + """ + with self.objstore as session: + try: + vm = session.get('vm', old_uuid) + icon = vm['icon'] + session.store('vm', new_uuid, {'icon': icon}) + except NotFoundError: + # if we cannot find an object store entry for the original VM, + # don't store one with an empty value. + pass + else: + # we need to define a custom function to prepend to the + # rollback context because the object store session needs to be + # opened and closed correctly (i.e. "prependDefer" only + # accepts one command at a time but we need more than one to + # handle an object store). + def _rollback_objstore(): + with self.objstore as session_rb: + session_rb.delete('vm', new_uuid, ignore_missing=True) + + # remove the new object store entry should an error occur later + rollback.prependDefer(_rollback_objstore) + def _build_access_elem(self, users, groups): access = E.access() for user in users: -- 1.9.3

The function "vm_clone" is now implemented on the mockmodel with a similar implementation as the real model; the biggest difference is that it doesn't deal with the different storage pool details. Also, new tests were added. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 41 +++++++++++++++++++++++++++++++++++++++++ tests/test_model.py | 38 ++++++++++++++++++++++++++++++++++++++ tests/test_rest.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+) diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index ffddb78..626ef35 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -181,6 +181,47 @@ class MockModel(object): def vm_connect(self, name): pass + def vm_clone(self, name): + vm = self._mock_vms[name] + if vm.info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + new_name = get_next_clone_name(self.vms_get_list(), name) + + taskid = self.add_task(u'/vms/%s' % new_name, self._do_clone, + {'name': name, 'new_name': new_name}) + return self.task_lookup(taskid) + + def _do_clone(self, cb, params): + name = params['name'] + new_name = params['new_name'] + + vm = self._mock_vms[name] + new_vm = copy.deepcopy(vm) + + new_uuid = unicode(uuid.uuid4()) + + new_vm.name = new_name + new_vm.info['name'] = new_name + new_vm.uuid = new_uuid + new_vm.info['uuid'] = new_uuid + + for mac, iface in new_vm.ifaces.items(): + new_mac = MockVMIface.get_mac() + iface.info['mac'] = new_mac + new_vm.ifaces[new_mac] = iface + + storage_names = new_vm.storagedevices.keys() + for i, storage_name in enumerate(storage_names): + storage = new_vm.storagedevices[storage_name] + basename, ext = os.path.splitext(storage.info['path']) + new_path = u'%s-%d%s' % (basename, i, ext) + new_vm.storagedevices[storage_name].path = new_path + + self._mock_vms[new_name] = new_vm + + cb('OK', True) + def vms_create(self, params): t_name = template_name_from_uri(params['template']) name = get_vm_name(params.get('name'), t_name, self._mock_vms.keys()) diff --git a/tests/test_model.py b/tests/test_model.py index 32777b1..f4d842f 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1255,6 +1255,44 @@ class ModelTests(unittest.TestCase): self.assertEquals(vms, sorted(vms, key=unicode.lower)) + def test_vm_clone(self): + inst = model.Model('test:///default', objstore_loc=self.tmp_store) + + all_vm_names = inst.vms_get_list() + name = all_vm_names[0] + + original_vm = inst.vm_lookup(name) + + # the VM 'test' should be running by now, so we can't clone it yet + self.assertRaises(InvalidParameter, inst.vm_clone, name) + + with RollbackContext() as rollback: + inst.vm_poweroff(name) + rollback.prependDefer(inst.vm_start, name) + + task = inst.vm_clone(name) + clone_name = task['target_uri'].split('/')[-1] + rollback.prependDefer(inst.vm_delete, clone_name) + inst.task_wait(task['id']) + + # update the original VM info because its state has changed + original_vm = inst.vm_lookup(name) + clone_vm = inst.vm_lookup(clone_name) + + self.assertNotEqual(original_vm['name'], clone_vm['name']) + self.assertTrue(re.match(u'%s-clone-\d+' % original_vm['name'], + clone_vm['name'])) + del original_vm['name'] + del clone_vm['name'] + + self.assertNotEqual(original_vm['uuid'], clone_vm['uuid']) + del original_vm['uuid'] + del clone_vm['uuid'] + + # compare all VM settings except the ones already compared + # (and removed) above (i.e. 'name' and 'uuid') + self.assertEquals(original_vm, clone_vm) + def test_use_test_host(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 3cf9e2b..6770647 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -22,6 +22,7 @@ import base64 import json import os import random +import re import requests import shutil import time @@ -341,6 +342,10 @@ class RestTests(unittest.TestCase): self.assertEquals(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) + # Clone a running VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(400, resp.status) + # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) @@ -351,6 +356,32 @@ class RestTests(unittest.TestCase): resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status) + # Clone a VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id'], '{}').read()) + self.assertEquals('finished', task['status']) + clone_vm_name = task['target_uri'].split('/')[-1] + self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name)) + + resp = self.request('/vms/test-vm', '{}') + original_vm_info = json.loads(resp.read()) + resp = self.request('/vms/%s' % clone_vm_name, '{}') + self.assertEquals(200, resp.status) + clone_vm_info = json.loads(resp.read()) + + self.assertNotEqual(original_vm_info['name'], clone_vm_info['name']) + del original_vm_info['name'] + del clone_vm_info['name'] + + self.assertNotEqual(original_vm_info['uuid'], clone_vm_info['uuid']) + del original_vm_info['uuid'] + del clone_vm_info['uuid'] + + self.assertEquals(original_vm_info, clone_vm_info) + # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) -- 1.9.3
participants (2)
-
Aline Manera
-
Crístian Viana