[PATCH 0/8] Clone virtual machines

Crístian Viana (8): Make function "randomMAC" public Allow updating XML attribute in "xml_item_update" Render different types of data in generate_action_handler Add model function to wait for task Clean up test pool directories Create storage volume based on an existing volume Clone virtual machines Add tests and mockmodel for the cloning feature docs/API.md | 8 + src/kimchi/API.json | 5 + src/kimchi/control/base.py | 37 +++-- src/kimchi/control/host.py | 18 +-- src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 4 + src/kimchi/mockmodel.py | 102 ++++++++++++- src/kimchi/model/storagevolumes.py | 73 ++++++++- src/kimchi/model/tasks.py | 28 ++++ src/kimchi/model/vmifaces.py | 18 +-- src/kimchi/model/vms.py | 297 ++++++++++++++++++++++++++++++++++++- src/kimchi/xmlutils/utils.py | 7 +- tests/test_model.py | 83 +++++++++-- tests/test_rest.py | 54 +++++++ 14 files changed, 682 insertions(+), 53 deletions(-) -- 1.9.3

A function from a different module might require a new random MAC so the function "randomMAC", defined inside another function in VMIfacesModel, should be declared in a public scope. Move the definition of "randomMAC" to outside its outer function in order to make it public. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/model/vmifaces.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/kimchi/model/vmifaces.py b/src/kimchi/model/vmifaces.py index 66b3827..721bf14 100644 --- a/src/kimchi/model/vmifaces.py +++ b/src/kimchi/model/vmifaces.py @@ -38,13 +38,6 @@ class VMIfacesModel(object): return macs def create(self, vm, params): - def randomMAC(): - mac = [0x52, 0x54, 0x00, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks() @@ -59,11 +52,10 @@ class VMIfacesModel(object): macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn)) - mac = randomMAC() while True: + mac = VMIfacesModel.random_mac() if mac not in macs: break - mac = randomMAC() children = [E.mac(address=mac)] ("network" in params.keys() and @@ -86,6 +78,14 @@ class VMIfacesModel(object): return root.devices.findall("interface") + @staticmethod + def random_mac(): + mac = [0x52, 0x54, 0x00, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: u'%02x' % x, mac)) + class VMIfaceModel(object): def __init__(self, **kargs): -- 1.9.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 11/02/2014 11:05 PM, Crístian Viana wrote:
A function from a different module might require a new random MAC so the function "randomMAC", defined inside another function in VMIfacesModel, should be declared in a public scope.
Move the definition of "randomMAC" to outside its outer function in order to make it public.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/model/vmifaces.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/kimchi/model/vmifaces.py b/src/kimchi/model/vmifaces.py index 66b3827..721bf14 100644 --- a/src/kimchi/model/vmifaces.py +++ b/src/kimchi/model/vmifaces.py @@ -38,13 +38,6 @@ class VMIfacesModel(object): return macs
def create(self, vm, params): - def randomMAC(): - mac = [0x52, 0x54, 0x00, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) - conn = self.conn.get() networks = conn.listNetworks() + conn.listDefinedNetworks()
@@ -59,11 +52,10 @@ class VMIfacesModel(object): macs = (iface.mac.get('address') for iface in self.get_vmifaces(vm, self.conn))
- mac = randomMAC() while True: + mac = VMIfacesModel.random_mac() if mac not in macs: break - mac = randomMAC()
children = [E.mac(address=mac)] ("network" in params.keys() and @@ -86,6 +78,14 @@ class VMIfacesModel(object):
return root.devices.findall("interface")
+ @staticmethod + def random_mac(): + mac = [0x52, 0x54, 0x00, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: u'%02x' % x, mac)) +
class VMIfaceModel(object): def __init__(self, **kargs):

The function "xml_item_update" only updates the content of any given tag, filtered by an XPath expression. However, one might want to update one of the tag's attributes instead of its content. Add an optional parameter to "xml_item_update" which sets the attribute name to be updated; if not set, the function will behave as before, i.e. update the tag's content. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/xmlutils/utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/kimchi/xmlutils/utils.py b/src/kimchi/xmlutils/utils.py index bfbe0c1..4863bb5 100644 --- a/src/kimchi/xmlutils/utils.py +++ b/src/kimchi/xmlutils/utils.py @@ -27,10 +27,13 @@ def xpath_get_text(xml, expr): return res -def xml_item_update(xml, xpath, value): +def xml_item_update(xml, xpath, value, attr=None): root = ET.fromstring(xml) item = root.find(xpath) - item.text = value + if attr is None: + item.text = value + else: + item.set(attr, value) return ET.tostring(root, encoding="utf-8") -- 1.9.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 11/02/2014 11:05 PM, Crístian Viana wrote:
The function "xml_item_update" only updates the content of any given tag, filtered by an XPath expression. However, one might want to update one of the tag's attributes instead of its content.
Add an optional parameter to "xml_item_update" which sets the attribute name to be updated; if not set, the function will behave as before, i.e. update the tag's content.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/xmlutils/utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/src/kimchi/xmlutils/utils.py b/src/kimchi/xmlutils/utils.py index bfbe0c1..4863bb5 100644 --- a/src/kimchi/xmlutils/utils.py +++ b/src/kimchi/xmlutils/utils.py @@ -27,10 +27,13 @@ def xpath_get_text(xml, expr): return res
-def xml_item_update(xml, xpath, value): +def xml_item_update(xml, xpath, value, attr=None): root = ET.fromstring(xml) item = root.find(xpath) - item.text = value + if attr is None: + item.text = value + else: + item.set(attr, value) return ET.tostring(root, encoding="utf-8")

The function "generate_action_handler" creates a function which handles a string being returned by the underlying action function. However, in some cases, another type of element might be returned. The action "swupdate", for example, doesn't use "generate_action_handler" because it returns a Task instead of a string. Make the function "generate_action_handler" more modular so it can process different types of data. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/control/base.py | 37 +++++++++++++++++++++++++++---------- src/kimchi/control/host.py | 18 +++--------------- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/src/kimchi/control/base.py b/src/kimchi/control/base.py index 1eb10bd..60db1df 100644 --- a/src/kimchi/control/base.py +++ b/src/kimchi/control/base.py @@ -65,6 +65,29 @@ class Resource(object): raise cherrypy.HTTPRedirect(self.uri_fmt % tuple(uri_params), code) def generate_action_handler(self, action_name, action_args=None): + def _render_element(self, ident): + self._redirect(ident) + uri_params = [] + for arg in self.model_args: + if arg is None: + arg = '' + uri_params.append(urllib2.quote(arg.encode('utf-8'), + safe="")) + raise internal_redirect(self.uri_fmt % tuple(uri_params)) + + return self._generate_action_handler_base(action_name, _render_element, + action_args) + + def generate_action_handler_task(self, action_name, action_args=None): + def _render_task(self, task): + cherrypy.response.status = 202 + return kimchi.template.render('Task', task) + + return self._generate_action_handler_base(action_name, _render_task, + action_args) + + def _generate_action_handler_base(self, action_name, render_fn, + action_args=None): def wrapper(*args, **kwargs): validate_method(('POST'), self.role_key, self.admin_methods) try: @@ -76,16 +99,10 @@ class Resource(object): if action_args is not None: request = parse_request() model_args.extend(request[key] for key in action_args) - fn = getattr(self.model, model_fn(self, action_name)) - ident = fn(*model_args) - self._redirect(ident) - uri_params = [] - for arg in self.model_args: - if arg is None: - arg = '' - uri_params.append(urllib2.quote(arg.encode('utf-8'), - safe="")) - raise internal_redirect(self.uri_fmt % tuple(uri_params)) + + action_fn = getattr(self.model, model_fn(self, action_name)) + action_result = action_fn(*model_args) + return render_fn(self, action_result) except MissingParameter, e: raise cherrypy.HTTPError(400, e.message) except InvalidParameter, e: diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 7bcae72..4362da7 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -17,12 +17,9 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -import cherrypy - from kimchi.control.base import Collection, Resource, SimpleCollection -from kimchi.control.utils import UrlSubNode, validate_method -from kimchi.exception import OperationFailed, NotFoundError -from kimchi.template import render +from kimchi.control.utils import UrlSubNode +from kimchi.exception import NotFoundError @UrlSubNode('host', True) @@ -41,16 +38,7 @@ class Host(Resource): self.repositories = Repositories(self.model) self.users = Users(self.model) self.groups = Groups(self.model) - - @cherrypy.expose - def swupdate(self): - validate_method(('POST'), self.role_key, self.admin_methods) - try: - task = self.model.host_swupdate() - cherrypy.response.status = 202 - return render("Task", task) - except OperationFailed, e: - raise cherrypy.HTTPError(500, e.message) + self.swupdate = self.generate_action_handler_task('swupdate') @property def data(self): -- 1.9.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 11/02/2014 11:05 PM, Crístian Viana wrote:
The function "generate_action_handler" creates a function which handles a string being returned by the underlying action function. However, in some cases, another type of element might be returned. The action "swupdate", for example, doesn't use "generate_action_handler" because it returns a Task instead of a string.
Make the function "generate_action_handler" more modular so it can process different types of data.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/control/base.py | 37 +++++++++++++++++++++++++++---------- src/kimchi/control/host.py | 18 +++--------------- 2 files changed, 30 insertions(+), 25 deletions(-)
diff --git a/src/kimchi/control/base.py b/src/kimchi/control/base.py index 1eb10bd..60db1df 100644 --- a/src/kimchi/control/base.py +++ b/src/kimchi/control/base.py @@ -65,6 +65,29 @@ class Resource(object): raise cherrypy.HTTPRedirect(self.uri_fmt % tuple(uri_params), code)
def generate_action_handler(self, action_name, action_args=None): + def _render_element(self, ident): + self._redirect(ident) + uri_params = [] + for arg in self.model_args: + if arg is None: + arg = '' + uri_params.append(urllib2.quote(arg.encode('utf-8'), + safe="")) + raise internal_redirect(self.uri_fmt % tuple(uri_params)) + + return self._generate_action_handler_base(action_name, _render_element, + action_args) + + def generate_action_handler_task(self, action_name, action_args=None): + def _render_task(self, task): + cherrypy.response.status = 202 + return kimchi.template.render('Task', task) + + return self._generate_action_handler_base(action_name, _render_task, + action_args) + + def _generate_action_handler_base(self, action_name, render_fn, + action_args=None): def wrapper(*args, **kwargs): validate_method(('POST'), self.role_key, self.admin_methods) try: @@ -76,16 +99,10 @@ class Resource(object): if action_args is not None: request = parse_request() model_args.extend(request[key] for key in action_args) - fn = getattr(self.model, model_fn(self, action_name)) - ident = fn(*model_args) - self._redirect(ident) - uri_params = [] - for arg in self.model_args: - if arg is None: - arg = '' - uri_params.append(urllib2.quote(arg.encode('utf-8'), - safe="")) - raise internal_redirect(self.uri_fmt % tuple(uri_params)) + + action_fn = getattr(self.model, model_fn(self, action_name)) + action_result = action_fn(*model_args) + return render_fn(self, action_result) except MissingParameter, e: raise cherrypy.HTTPError(400, e.message) except InvalidParameter, e: diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 7bcae72..4362da7 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -17,12 +17,9 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-import cherrypy - from kimchi.control.base import Collection, Resource, SimpleCollection -from kimchi.control.utils import UrlSubNode, validate_method -from kimchi.exception import OperationFailed, NotFoundError -from kimchi.template import render +from kimchi.control.utils import UrlSubNode +from kimchi.exception import NotFoundError
@UrlSubNode('host', True) @@ -41,16 +38,7 @@ class Host(Resource): self.repositories = Repositories(self.model) self.users = Users(self.model) self.groups = Groups(self.model) - - @cherrypy.expose - def swupdate(self): - validate_method(('POST'), self.role_key, self.admin_methods) - try: - task = self.model.host_swupdate() - cherrypy.response.status = 202 - return render("Task", task) - except OperationFailed, e: - raise cherrypy.HTTPError(500, e.message) + self.swupdate = self.generate_action_handler_task('swupdate')
@property def data(self):

Sometimes we need to wait for a task to finish. Instead of writing the waiting loop everytime, we can now use the function "wait". Add the function "TaskModel.wait" which waits for a task to finish for a maximum ammount of time (in seconds). If the task finishes before the timeout expires, the function returns successfully; otherwise, an exception is raised. Also, update calls to an already existing function which does the same thing in the test code, and replace its usages with this new function. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/i18n.py | 1 + src/kimchi/model/tasks.py | 28 ++++++++++++++++++++++++++++ tests/test_model.py | 19 +++++++++---------- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index f789259..10408bf 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -35,6 +35,7 @@ messages = { "KCHASYNC0001E": _("Datastore is not initiated in the model object."), "KCHASYNC0002E": _("Unable to start task due error: %(err)s"), + "KCHASYNC0003E": _("Timeout of %(seconds)s seconds expired while running task '%(task)s."), "KCHAUTH0001E": _("Authentication failed for user '%(username)s'. [Error code: %(code)s]"), "KCHAUTH0002E": _("You are not authorized to access Kimchi"), diff --git a/src/kimchi/model/tasks.py b/src/kimchi/model/tasks.py index f25bcbf..61bc2f3 100644 --- a/src/kimchi/model/tasks.py +++ b/src/kimchi/model/tasks.py @@ -18,6 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import time + +from kimchi.exception import TimeoutExpired + + class TasksModel(object): def __init__(self, **kargs): self.objstore = kargs['objstore'] @@ -34,3 +39,26 @@ class TaskModel(object): def lookup(self, id): with self.objstore as session: return session.get('task', str(id)) + + def wait(self, id, timeout=10): + """Wait for a task until it stops running (successfully or due to + an error). If the Task finishes its execution before <timeout>, this + function returns normally; otherwise an exception is raised. + + Parameters: + id -- The Task ID. + timeout -- The maximum time, in seconds, that this function should wait + for the Task. If the Task runs for more than <timeout>, + "TimeoutExpired" is raised. + """ + for i in range(0, timeout): + with self.objstore as session: + task = session.get('task', str(id)) + + if task['status'] != 'running': + return + + time.sleep(1) + + raise TimeoutExpired('KCHASYNC0003E', {'seconds': timeout, + 'task': task['target_uri']}) diff --git a/tests/test_model.py b/tests/test_model.py index de2e49a..7f33540 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -45,7 +45,6 @@ from kimchi.iscsi import TargetClient from kimchi.model import model from kimchi.rollbackcontext import RollbackContext from kimchi.utils import add_task -from utils import wait_task invalid_repository_urls = ['www.fedora.org', # missing protocol @@ -130,7 +129,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create('default', params)['id'] rollback.prependDefer(inst.storagevolume_delete, 'default', vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) vol_path = inst.storagevolume_lookup('default', vol)['path'] @@ -290,7 +289,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) vm_name = 'kimchi-cdrom' params = {'name': 'test', 'disks': [], 'cdrom': self.kimchi_iso} @@ -569,7 +568,7 @@ class ModelTests(unittest.TestCase): params['name'] = vol task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) fd, path = tempfile.mkstemp(dir=path) @@ -610,7 +609,7 @@ class ModelTests(unittest.TestCase): taskid = task_response['id'] vol_name = task_response['target_uri'].split('/')[-1] self.assertEquals('COPYING', vol_name) - wait_task(inst.task_lookup, taskid, timeout=60) + inst.task_wait(taskid, timeout=60) self.assertEquals('finished', inst.task_lookup(taskid)['status']) vol_path = os.path.join(args['path'], vol_name) self.assertTrue(os.path.isfile(vol_path)) @@ -1155,7 +1154,7 @@ class ModelTests(unittest.TestCase): inst = model.Model('test:///default', objstore_loc=self.tmp_store) taskid = add_task('', quick_op, inst.objstore, 'Hello') - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals(1, taskid) self.assertEquals('finished', inst.task_lookup(taskid)['status']) self.assertEquals('Hello', inst.task_lookup(taskid)['message']) @@ -1166,12 +1165,12 @@ class ModelTests(unittest.TestCase): self.assertEquals(2, taskid) self.assertEquals('running', inst.task_lookup(taskid)['status']) self.assertEquals('OK', inst.task_lookup(taskid)['message']) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('failed', inst.task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', inst.task_lookup(taskid)['message']) taskid = add_task('', abnormal_op, inst.objstore, {}) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('Exception raised', inst.task_lookup(taskid)['message']) self.assertEquals('failed', inst.task_lookup(taskid)['status']) @@ -1179,7 +1178,7 @@ class ModelTests(unittest.TestCase): taskid = add_task('', continuous_ops, inst.objstore, {'result': True}) self.assertEquals('running', inst.task_lookup(taskid)['status']) - wait_task(inst.task_lookup, taskid, timeout=10) + inst.task_wait(taskid, timeout=10) self.assertEquals('finished', inst.task_lookup(taskid)['status']) # This wrapper function is needed due to the new backend messaging in @@ -1285,7 +1284,7 @@ class ModelTests(unittest.TestCase): task = inst.debugreports_create({'name': reportName}) rollback.prependDefer(inst.debugreport_delete, tmp_name) taskid = task['id'] - wait_task(inst.task_lookup, taskid, timeout) + inst.task_wait(taskid, timeout) self.assertEquals('finished', inst.task_lookup(taskid)['status'], "It is not necessary an error. " -- 1.9.3

On 11/02/2014 11:05 PM, Crístian Viana wrote:
Sometimes we need to wait for a task to finish. Instead of writing the waiting loop everytime, we can now use the function "wait".
Add the function "TaskModel.wait" which waits for a task to finish for a maximum ammount of time (in seconds). If the task finishes before the timeout expires, the function returns successfully; otherwise, an exception is raised. Also, update calls to an already existing function which does the same thing in the test code, and replace its usages with this new function.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/i18n.py | 1 + src/kimchi/model/tasks.py | 28 ++++++++++++++++++++++++++++ tests/test_model.py | 19 +++++++++---------- 3 files changed, 38 insertions(+), 10 deletions(-)
diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index f789259..10408bf 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -35,6 +35,7 @@ messages = {
"KCHASYNC0001E": _("Datastore is not initiated in the model object."), "KCHASYNC0002E": _("Unable to start task due error: %(err)s"), + "KCHASYNC0003E": _("Timeout of %(seconds)s seconds expired while running task '%(task)s."),
"KCHAUTH0001E": _("Authentication failed for user '%(username)s'. [Error code: %(code)s]"), "KCHAUTH0002E": _("You are not authorized to access Kimchi"), diff --git a/src/kimchi/model/tasks.py b/src/kimchi/model/tasks.py index f25bcbf..61bc2f3 100644 --- a/src/kimchi/model/tasks.py +++ b/src/kimchi/model/tasks.py @@ -18,6 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+import time + +from kimchi.exception import TimeoutExpired + + class TasksModel(object): def __init__(self, **kargs): self.objstore = kargs['objstore'] @@ -34,3 +39,26 @@ class TaskModel(object): def lookup(self, id): with self.objstore as session: return session.get('task', str(id)) + + def wait(self, id, timeout=10): + """Wait for a task until it stops running (successfully or due to + an error). If the Task finishes its execution before <timeout>, this + function returns normally; otherwise an exception is raised. + + Parameters: + id -- The Task ID. + timeout -- The maximum time, in seconds, that this function should wait + for the Task. If the Task runs for more than <timeout>, + "TimeoutExpired" is raised. + """ + for i in range(0, timeout): + with self.objstore as session: + task = session.get('task', str(id)) + + if task['status'] != 'running': + return + + time.sleep(1) + + raise TimeoutExpired('KCHASYNC0003E', {'seconds': timeout, + 'task': task['target_uri']}) diff --git a/tests/test_model.py b/tests/test_model.py index de2e49a..7f33540 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -45,7 +45,6 @@ from kimchi.iscsi import TargetClient from kimchi.model import model from kimchi.rollbackcontext import RollbackContext from kimchi.utils import add_task -from utils import wait_task
If you will use the new TaskModel.wait() function in all wait_task occurrences you should also remove the wait_task implementation from utils.py
invalid_repository_urls = ['www.fedora.org', # missing protocol @@ -130,7 +129,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create('default', params)['id'] rollback.prependDefer(inst.storagevolume_delete, 'default', vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status']) vol_path = inst.storagevolume_lookup('default', vol)['path']
@@ -290,7 +289,7 @@ class ModelTests(unittest.TestCase): 'format': 'qcow2'} task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id)
vm_name = 'kimchi-cdrom' params = {'name': 'test', 'disks': [], 'cdrom': self.kimchi_iso} @@ -569,7 +568,7 @@ class ModelTests(unittest.TestCase): params['name'] = vol task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) - wait_task(inst.task_lookup, task_id) + inst.task_wait(task_id) self.assertEquals('finished', inst.task_lookup(task_id)['status'])
fd, path = tempfile.mkstemp(dir=path) @@ -610,7 +609,7 @@ class ModelTests(unittest.TestCase): taskid = task_response['id'] vol_name = task_response['target_uri'].split('/')[-1] self.assertEquals('COPYING', vol_name) - wait_task(inst.task_lookup, taskid, timeout=60) + inst.task_wait(taskid, timeout=60) self.assertEquals('finished', inst.task_lookup(taskid)['status']) vol_path = os.path.join(args['path'], vol_name) self.assertTrue(os.path.isfile(vol_path)) @@ -1155,7 +1154,7 @@ class ModelTests(unittest.TestCase): inst = model.Model('test:///default', objstore_loc=self.tmp_store) taskid = add_task('', quick_op, inst.objstore, 'Hello') - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals(1, taskid) self.assertEquals('finished', inst.task_lookup(taskid)['status']) self.assertEquals('Hello', inst.task_lookup(taskid)['message']) @@ -1166,12 +1165,12 @@ class ModelTests(unittest.TestCase): self.assertEquals(2, taskid) self.assertEquals('running', inst.task_lookup(taskid)['status']) self.assertEquals('OK', inst.task_lookup(taskid)['message']) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('failed', inst.task_lookup(taskid)['status']) self.assertEquals('It was not meant to be', inst.task_lookup(taskid)['message']) taskid = add_task('', abnormal_op, inst.objstore, {}) - wait_task(inst.task_lookup, taskid) + inst.task_wait(taskid) self.assertEquals('Exception raised', inst.task_lookup(taskid)['message']) self.assertEquals('failed', inst.task_lookup(taskid)['status']) @@ -1179,7 +1178,7 @@ class ModelTests(unittest.TestCase): taskid = add_task('', continuous_ops, inst.objstore, {'result': True}) self.assertEquals('running', inst.task_lookup(taskid)['status']) - wait_task(inst.task_lookup, taskid, timeout=10) + inst.task_wait(taskid, timeout=10) self.assertEquals('finished', inst.task_lookup(taskid)['status'])
# This wrapper function is needed due to the new backend messaging in @@ -1285,7 +1284,7 @@ class ModelTests(unittest.TestCase): task = inst.debugreports_create({'name': reportName}) rollback.prependDefer(inst.debugreport_delete, tmp_name) taskid = task['id'] - wait_task(inst.task_lookup, taskid, timeout) + inst.task_wait(taskid, timeout) self.assertEquals('finished', inst.task_lookup(taskid)['status'], "It is not necessary an error. "

On 03-11-2014 14:12, Aline Manera wrote:
If you will use the new TaskModel.wait() function in all wait_task occurrences you should also remove the wait_task implementation from utils.py
They have different implementations. The function implemented on TaskModel is supposed to be used by the model (e.g. some_task.wait()), which is needed now to wait for "StorageVolumes.create". The one implemented in tests/utils.py is meant to be used for tests, especially for the REST tests (i.e. running "GET /tasks/1" and parsing its JSON result). The tests cannot use the model implementation because they don't use the model directly and the model cannot use the test implementation because it doesn't handle REST commands. Actually, the function in tests/utils.py receives a function as a parameter so it can handle different lookup approaches (i.e. by model / by REST). But IMO, when using the model directly, "wait" is not a "util" function and it should be defined in the model itself. So I think we have two options here: 1) Implement one "wait" function for the model and another one for the tests; 2) Implement one generic "wait" function and another one with the partial "lookup" implementation to handle the REST commands; More than one function will have to be implemented anyway. And I prefer option (1) because (2) looks like a workaround. What should I do?

On 11/04/2014 11:10 AM, Crístian Viana wrote:
On 03-11-2014 14:12, Aline Manera wrote:
If you will use the new TaskModel.wait() function in all wait_task occurrences you should also remove the wait_task implementation from utils.py
They have different implementations. The function implemented on TaskModel is supposed to be used by the model (e.g. some_task.wait()), which is needed now to wait for "StorageVolumes.create". The one implemented in tests/utils.py is meant to be used for tests, especially for the REST tests (i.e. running "GET /tasks/1" and parsing its JSON result). The tests cannot use the model implementation because they don't use the model directly and the model cannot use the test implementation because it doesn't handle REST commands.
Actually, the function in tests/utils.py receives a function as a parameter so it can handle different lookup approaches (i.e. by model / by REST). But IMO, when using the model directly, "wait" is not a "util" function and it should be defined in the model itself.
So I think we have two options here:
1) Implement one "wait" function for the model and another one for the tests; 2) Implement one generic "wait" function and another one with the partial "lookup" implementation to handle the REST commands;
More than one function will have to be implemented anyway. And I prefer option (1) because (2) looks like a workaround.
What should I do?
I thought about one more option. In both cases, we use the AsynTask() object. So isn't there a way to implement the wait_task() there and reuse it in all cases? I haven't checked the code yet - it is just an idea.

During tests, some storage pools are created locally and their directories are left behind in the system. The pool itself is removed from libvirt, but the directory remains. Remove the test storage pools directories after the tests are finished. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- tests/test_model.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_model.py b/tests/test_model.py index 7f33540..21e1b6b 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -277,6 +277,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) # Activate the pool before adding any volume @@ -491,6 +492,8 @@ class ModelTests(unittest.TestCase): num = len(pools) + 1 inst.storagepools_create(poolDef) + if poolDef['type'] == 'dir': + rollback.prependDefer(shutil.rmtree, poolDef['path']) rollback.prependDefer(inst.storagepool_delete, name) pools = inst.storagepools_get_list() @@ -547,6 +550,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) self.assertRaises(InvalidOperation, inst.storagevolumes_get_list, @@ -656,6 +660,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) inst.template_update('test', params) @@ -1372,6 +1377,8 @@ class ModelTests(unittest.TestCase): 'path': '/tmp/kimchi-images', 'type': 'kimchi-iso'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, '/tmp/kimchi-images') + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_deactivate, args['name']) time.sleep(1) -- 1.9.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 11/02/2014 11:05 PM, Crístian Viana wrote:
During tests, some storage pools are created locally and their directories are left behind in the system. The pool itself is removed from libvirt, but the directory remains.
Remove the test storage pools directories after the tests are finished.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- tests/test_model.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/tests/test_model.py b/tests/test_model.py index 7f33540..21e1b6b 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -277,6 +277,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool)
# Activate the pool before adding any volume @@ -491,6 +492,8 @@ class ModelTests(unittest.TestCase): num = len(pools) + 1
inst.storagepools_create(poolDef) + if poolDef['type'] == 'dir': + rollback.prependDefer(shutil.rmtree, poolDef['path']) rollback.prependDefer(inst.storagepool_delete, name)
pools = inst.storagepools_get_list() @@ -547,6 +550,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool)
self.assertRaises(InvalidOperation, inst.storagevolumes_get_list, @@ -656,6 +660,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool)
inst.template_update('test', params) @@ -1372,6 +1377,8 @@ class ModelTests(unittest.TestCase): 'path': '/tmp/kimchi-images', 'type': 'kimchi-iso'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, '/tmp/kimchi-images') + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_deactivate, args['name'])
time.sleep(1)

The tests fail with this patch. On 11/03/2014 02:13 PM, Aline Manera wrote:
Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com>
On 11/02/2014 11:05 PM, Crístian Viana wrote:
During tests, some storage pools are created locally and their directories are left behind in the system. The pool itself is removed from libvirt, but the directory remains.
Remove the test storage pools directories after the tests are finished.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- tests/test_model.py | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/tests/test_model.py b/tests/test_model.py index 7f33540..21e1b6b 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -277,6 +277,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) # Activate the pool before adding any volume @@ -491,6 +492,8 @@ class ModelTests(unittest.TestCase): num = len(pools) + 1 inst.storagepools_create(poolDef) + if poolDef['type'] == 'dir': + rollback.prependDefer(shutil.rmtree, poolDef['path']) rollback.prependDefer(inst.storagepool_delete, name) pools = inst.storagepools_get_list() @@ -547,6 +550,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) self.assertRaises(InvalidOperation, inst.storagevolumes_get_list, @@ -656,6 +660,7 @@ class ModelTests(unittest.TestCase): 'path': path, 'type': 'dir'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_delete, pool) inst.template_update('test', params) @@ -1372,6 +1377,8 @@ class ModelTests(unittest.TestCase): 'path': '/tmp/kimchi-images', 'type': 'kimchi-iso'} inst.storagepools_create(args) + rollback.prependDefer(shutil.rmtree, '/tmp/kimchi-images') + rollback.prependDefer(shutil.rmtree, args['path']) rollback.prependDefer(inst.storagepool_deactivate, args['name']) time.sleep(1)
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

In order to clone VMs, we will need to be able to copy storage volumes. Add a new method to create a storage volume. This method requires the parameter 'volume_path' which specifies the base storage volume. The new volume will be created with the same content as the original one. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 1 + src/kimchi/API.json | 5 +++ src/kimchi/i18n.py | 1 + src/kimchi/mockmodel.py | 44 ++++++++++++++++++++++- src/kimchi/model/storagevolumes.py | 73 +++++++++++++++++++++++++++++++++++--- tests/test_model.py | 19 ++++++++++ tests/test_rest.py | 23 ++++++++++++ 7 files changed, 161 insertions(+), 5 deletions(-) diff --git a/docs/API.md b/docs/API.md index 9c06f85..29ae4e1 100644 --- a/docs/API.md +++ b/docs/API.md @@ -445,6 +445,7 @@ A interface represents available network interface on VM. * format: The format of the defined Storage Volume * file: File to be uploaded, passed through form data * url: URL to be downloaded + * volume_path: The path of an existing storage volume to be copied. ### Resource: Storage Volume diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 0ad36ab..81492f5 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -216,6 +216,11 @@ "type": "string", "pattern": "^(http|ftp)[s]?://", "error": "KCHVOL0021E" + }, + "volume_path": { + "description": "The path of an existing storage volume to be copied", + "type": "string", + "error": "KCHVOL0023E" } } }, diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 10408bf..712e5a6 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -207,6 +207,7 @@ messages = { "KCHVOL0020E": _("Storage volume capacity must be an integer number."), "KCHVOL0021E": _("Storage volume URL must be http://, https://, ftp:// or ftps://."), "KCHVOL0022E": _("Unable to access file %(url)s. Please, check it."), + "KCHVOL0023E": _("Storage volume path must be a string"), "KCHIFACE0001E": _("Interface %(name)s does not exist"), diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 7163f8d..baee0b6 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -485,7 +485,7 @@ class MockModel(object): raise NotFoundError("KCHPOOL0002E", {'name': name}) def storagevolumes_create(self, pool_name, params): - vol_source = ['file', 'url', 'capacity'] + vol_source = ['file', 'url', 'capacity', 'volume_path'] require_name_params = ['capacity'] name = params.get('name') @@ -506,6 +506,10 @@ class MockModel(object): name = os.path.basename(params['file'].filename) elif create_param == 'url': name = os.path.basename(params['url']) + elif create_param == 'volume_path': + basename = os.path.basename(params['volume_path']) + split = os.path.splitext(basename) + name = u'%s-clone%s' % (split[0], split[1]) else: name = 'upload-%s' % int(time.time()) params['name'] = name @@ -588,6 +592,44 @@ class MockModel(object): cb('OK', True) + def _create_volume_with_volume_path(self, cb, params): + try: + new_vol_name = params['name'].decode('utf-8') + new_pool_name = params['pool'].decode('utf-8') + orig_vol_path = params['volume_path'].decode('utf-8') + + orig_pool = orig_vol = None + for pn, p in self._mock_storagepools.items(): + for vn, v in p._volumes.items(): + if v.info['path'] == orig_vol_path: + orig_pool = self._get_storagepool(pn) + orig_vol = self._get_storagevolume(pn, vn) + break + + if orig_vol is not None: + break + + if orig_vol is None: + raise OperationFailed('KCHVOL0007E', + {'name': new_vol_name, + 'pool': new_pool_name, + 'err': 'could not find volume \'%s\'' % + orig_vol_path}) + + new_vol = copy.deepcopy(orig_vol) + new_vol.info['name'] = new_vol_name + new_vol.info['path'] = os.path.join(orig_pool.info['path'], + new_vol_name) + + new_pool = self._get_storagepool(new_pool_name) + new_pool._volumes[new_vol_name] = new_vol + except (KeyError, NotFoundError), e: + raise OperationFailed('KCHVOL0007E', + {'name': new_vol_name, 'pool': new_pool_name, + 'err': e.message}) + + cb('OK', True) + def storagevolume_lookup(self, pool, name): if self._get_storagepool(pool).info['state'] != 'active': raise InvalidOperation("KCHVOL0005E", {'pool': pool, diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 9ff43e6..d7325e4 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -18,9 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import contextlib +import lxml.etree as ET import os import time import urllib2 +from lxml.builder import E import libvirt @@ -53,9 +55,10 @@ class StorageVolumesModel(object): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) + self.storagevolume = StorageVolumeModel(**kargs) def create(self, pool_name, params): - vol_source = ['file', 'url', 'capacity'] + vol_source = ['file', 'url', 'capacity', 'volume_path'] name = params.get('name') @@ -81,13 +84,17 @@ class StorageVolumesModel(object): if create_param in REQUIRE_NAME_PARAMS: raise InvalidParameter('KCHVOL0016E') - # if 'name' is omitted - except for the methods listed in - # 'REQUIRE_NAME_PARAMS' - the default volume name will be the - # file/URL basename. + # if 'name' is omitted,the default volume name for 'file' and 'url' + # will be the file/URL basename, and for 'volume' it will be + # '<volume-name>-clone.<volume-extension>'. if create_param == 'file': name = os.path.basename(params['file'].filename) elif create_param == 'url': name = os.path.basename(params['url']) + elif create_param == 'volume_path': + basename = os.path.basename(params['volume_path']) + split = os.path.splitext(basename) + name = u'%s-clone%s' % (split[0], split[1]) else: name = 'upload-%s' % int(time.time()) params['name'] = name @@ -221,6 +228,64 @@ class StorageVolumesModel(object): StoragePoolModel.get_storagepool(pool_name, self.conn).refresh(0) cb('OK', True) + def _create_volume_with_volume_path(self, cb, params): + """Create a storage volume based on an existing volume. + + The existing volume is referenced by its path. This function copies all + the data inside the original volume into the new one. + + Arguments: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "pool": the name of the new pool. + "name": the name of the new volume. + "volume_path": the file path of the original storage volume. + """ + try: + new_vol_name = params['name'].decode('utf-8') + new_pool_name = params['pool'].decode('utf-8') + orig_vol_path = params['volume_path'].decode('utf-8') + except KeyError, e: + raise MissingParameter('KCHVOL0004E', + {'item': str(e), 'volume': new_vol_name}) + + try: + cb('setting up volume creation') + vir_conn = self.conn.get() + orig_vir_vol = vir_conn.storageVolLookupByPath(orig_vol_path) + orig_vol_name = orig_vir_vol.name().decode('utf-8') + orig_vir_pool = orig_vir_vol.storagePoolLookupByVolume() + orig_pool_name = orig_vir_pool.name().decode('utf-8') + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_vir_pool = StoragePoolModel.get_storagepool(new_pool_name, + self.conn) + + cb('building volume XML') + root_elem = E.volume() + root_elem.append(E.name(new_vol_name)) + root_elem.append(E.capacity(unicode(orig_vol['capacity']), + unit='bytes')) + target_elem = E.target() + target_elem.append(E.format(type=orig_vol['format'])) + root_elem.append(target_elem) + new_vol_xml = ET.tostring(root_elem, encoding='utf-8', + pretty_print=True) + + cb('creating volume') + new_vir_pool.createXMLFrom(new_vol_xml, orig_vir_vol, 0) + except (InvalidOperation, NotFoundError, libvirt.libvirtError), e: + raise OperationFailed("KCHVOL0007E", + {'name': new_vol_name, 'pool': new_pool_name, + 'err': e.get_error_message()}) + + cb('adding volume to the object store') + new_vol_id = '%s:%s' % (new_pool_name, new_vol_name) + with self.objstore as session: + session.store('storagevolume', new_vol_id, {'ref_cnt': 0}) + + cb('OK', True) + def get_list(self, pool_name): pool = StoragePoolModel.get_storagepool(pool_name, self.conn) if not pool.isActive(): diff --git a/tests/test_model.py b/tests/test_model.py index 21e1b6b..b165731 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -623,6 +623,25 @@ class ModelTests(unittest.TestCase): cp_content = cp_file.read() self.assertEquals(vol_content, cp_content) + # clone the volume created above + params = {'volume_path': vol_path} + task = inst.storagevolumes_create(pool, params) + taskid = task['id'] + cloned_vol_name = task['target_uri'].split('/')[-1] + inst.task_wait(taskid) + self.assertEquals('finished', inst.task_lookup(taskid)['status']) + rollback.prependDefer(inst.storagevolume_delete, pool, + cloned_vol_name) + + orig_vol = inst.storagevolume_lookup(pool, vol_name) + cloned_vol = inst.storagevolume_lookup(pool, cloned_vol_name) + + self.assertNotEquals(orig_vol['path'], cloned_vol['path']) + del orig_vol['path'] + del cloned_vol['path'] + + self.assertEquals(orig_vol, cloned_vol) + @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_storage_customise(self): inst = model.Model(objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 9bc930f..66072bc 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -1047,6 +1047,29 @@ class RestTests(unittest.TestCase): resp = self.request('/storagepools/pool-1/storagevolumes/%s' % vol_name, '{}', 'GET') self.assertEquals(200, resp.status) + vol = json.loads(resp.read()) + + # clone the volume created above + req = json.dumps({'volume_path': vol['path']}) + resp = self.request('/storagepools/pool-1/storagevolumes', req, 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + cloned_vol_name = task['target_uri'].split('/')[-1] + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id']).read()) + self.assertEquals('finished', task['status']) + resp = self.request('/storagepools/pool-1/storagevolumes/%s' % + cloned_vol_name, '{}', 'GET') + self.assertEquals(200, resp.status) + cloned_vol = json.loads(resp.read()) + + self.assertNotEquals(vol['name'], cloned_vol['name']) + del vol['name'] + del cloned_vol['name'] + self.assertNotEquals(vol['path'], cloned_vol['path']) + del vol['path'] + del cloned_vol['path'] + self.assertEquals(vol, cloned_vol) # Now remove the StoragePool from mock model self._delete_pool('pool-1') -- 1.9.3

On 11/02/2014 11:05 PM, Crístian Viana wrote:
In order to clone VMs, we will need to be able to copy storage volumes.
Add a new method to create a storage volume. This method requires the parameter 'volume_path' which specifies the base storage volume. The new volume will be created with the same content as the original one.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 1 + src/kimchi/API.json | 5 +++ src/kimchi/i18n.py | 1 + src/kimchi/mockmodel.py | 44 ++++++++++++++++++++++- src/kimchi/model/storagevolumes.py | 73 +++++++++++++++++++++++++++++++++++--- tests/test_model.py | 19 ++++++++++ tests/test_rest.py | 23 ++++++++++++ 7 files changed, 161 insertions(+), 5 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 9c06f85..29ae4e1 100644 --- a/docs/API.md +++ b/docs/API.md @@ -445,6 +445,7 @@ A interface represents available network interface on VM. * format: The format of the defined Storage Volume * file: File to be uploaded, passed through form data * url: URL to be downloaded + * volume_path: The path of an existing storage volume to be copied.
It is like a clone, right? So the API should be POST /storagepools/<pool>/storagevolumes/<volumes>/clone And what about the destination path? Isn't it needed to do the clone? Even if it is optional. Because, in the guest clone case the volume can be cloned in a different pool from the existing one.
### Resource: Storage Volume
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 0ad36ab..81492f5 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -216,6 +216,11 @@ "type": "string", "pattern": "^(http|ftp)[s]?://", "error": "KCHVOL0021E" + }, + "volume_path": { + "description": "The path of an existing storage volume to be copied", + "type": "string", + "error": "KCHVOL0023E" } } }, diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 10408bf..712e5a6 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -207,6 +207,7 @@ messages = { "KCHVOL0020E": _("Storage volume capacity must be an integer number."), "KCHVOL0021E": _("Storage volume URL must be http://, https://, ftp:// or ftps://."), "KCHVOL0022E": _("Unable to access file %(url)s. Please, check it."), + "KCHVOL0023E": _("Storage volume path must be a string"),
"KCHIFACE0001E": _("Interface %(name)s does not exist"),
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 7163f8d..baee0b6 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -485,7 +485,7 @@ class MockModel(object): raise NotFoundError("KCHPOOL0002E", {'name': name})
def storagevolumes_create(self, pool_name, params): - vol_source = ['file', 'url', 'capacity'] + vol_source = ['file', 'url', 'capacity', 'volume_path'] require_name_params = ['capacity']
name = params.get('name') @@ -506,6 +506,10 @@ class MockModel(object): name = os.path.basename(params['file'].filename) elif create_param == 'url': name = os.path.basename(params['url']) + elif create_param == 'volume_path': + basename = os.path.basename(params['volume_path']) + split = os.path.splitext(basename) + name = u'%s-clone%s' % (split[0], split[1]) else: name = 'upload-%s' % int(time.time()) params['name'] = name @@ -588,6 +592,44 @@ class MockModel(object):
cb('OK', True)
+ def _create_volume_with_volume_path(self, cb, params): + try: + new_vol_name = params['name'].decode('utf-8') + new_pool_name = params['pool'].decode('utf-8') + orig_vol_path = params['volume_path'].decode('utf-8') + + orig_pool = orig_vol = None + for pn, p in self._mock_storagepools.items(): + for vn, v in p._volumes.items(): + if v.info['path'] == orig_vol_path: + orig_pool = self._get_storagepool(pn) + orig_vol = self._get_storagevolume(pn, vn) + break + + if orig_vol is not None: + break + + if orig_vol is None: + raise OperationFailed('KCHVOL0007E', + {'name': new_vol_name, + 'pool': new_pool_name, + 'err': 'could not find volume \'%s\'' % + orig_vol_path}) + + new_vol = copy.deepcopy(orig_vol) + new_vol.info['name'] = new_vol_name + new_vol.info['path'] = os.path.join(orig_pool.info['path'], + new_vol_name) + + new_pool = self._get_storagepool(new_pool_name) + new_pool._volumes[new_vol_name] = new_vol + except (KeyError, NotFoundError), e: + raise OperationFailed('KCHVOL0007E', + {'name': new_vol_name, 'pool': new_pool_name, + 'err': e.message}) + + cb('OK', True) + def storagevolume_lookup(self, pool, name): if self._get_storagepool(pool).info['state'] != 'active': raise InvalidOperation("KCHVOL0005E", {'pool': pool, diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 9ff43e6..d7325e4 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -18,9 +18,11 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import contextlib +import lxml.etree as ET import os import time import urllib2 +from lxml.builder import E
import libvirt
@@ -53,9 +55,10 @@ class StorageVolumesModel(object): self.conn = kargs['conn'] self.objstore = kargs['objstore'] self.task = TaskModel(**kargs) + self.storagevolume = StorageVolumeModel(**kargs)
def create(self, pool_name, params): - vol_source = ['file', 'url', 'capacity'] + vol_source = ['file', 'url', 'capacity', 'volume_path']
name = params.get('name')
@@ -81,13 +84,17 @@ class StorageVolumesModel(object): if create_param in REQUIRE_NAME_PARAMS: raise InvalidParameter('KCHVOL0016E')
- # if 'name' is omitted - except for the methods listed in - # 'REQUIRE_NAME_PARAMS' - the default volume name will be the - # file/URL basename. + # if 'name' is omitted,the default volume name for 'file' and 'url' + # will be the file/URL basename, and for 'volume' it will be + # '<volume-name>-clone.<volume-extension>'. if create_param == 'file': name = os.path.basename(params['file'].filename) elif create_param == 'url': name = os.path.basename(params['url']) + elif create_param == 'volume_path': + basename = os.path.basename(params['volume_path']) + split = os.path.splitext(basename) + name = u'%s-clone%s' % (split[0], split[1]) else: name = 'upload-%s' % int(time.time()) params['name'] = name @@ -221,6 +228,64 @@ class StorageVolumesModel(object): StoragePoolModel.get_storagepool(pool_name, self.conn).refresh(0) cb('OK', True)
+ def _create_volume_with_volume_path(self, cb, params): + """Create a storage volume based on an existing volume. + + The existing volume is referenced by its path. This function copies all + the data inside the original volume into the new one. + + Arguments: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "pool": the name of the new pool. + "name": the name of the new volume. + "volume_path": the file path of the original storage volume. + """ + try: + new_vol_name = params['name'].decode('utf-8') + new_pool_name = params['pool'].decode('utf-8') + orig_vol_path = params['volume_path'].decode('utf-8') + except KeyError, e: + raise MissingParameter('KCHVOL0004E', + {'item': str(e), 'volume': new_vol_name}) + + try: + cb('setting up volume creation') + vir_conn = self.conn.get() + orig_vir_vol = vir_conn.storageVolLookupByPath(orig_vol_path) + orig_vol_name = orig_vir_vol.name().decode('utf-8') + orig_vir_pool = orig_vir_vol.storagePoolLookupByVolume() + orig_pool_name = orig_vir_pool.name().decode('utf-8') + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_vir_pool = StoragePoolModel.get_storagepool(new_pool_name, + self.conn) + + cb('building volume XML') + root_elem = E.volume() + root_elem.append(E.name(new_vol_name)) + root_elem.append(E.capacity(unicode(orig_vol['capacity']), + unit='bytes')) + target_elem = E.target() + target_elem.append(E.format(type=orig_vol['format'])) + root_elem.append(target_elem) + new_vol_xml = ET.tostring(root_elem, encoding='utf-8', + pretty_print=True) + + cb('creating volume') + new_vir_pool.createXMLFrom(new_vol_xml, orig_vir_vol, 0) + except (InvalidOperation, NotFoundError, libvirt.libvirtError), e: + raise OperationFailed("KCHVOL0007E", + {'name': new_vol_name, 'pool': new_pool_name, + 'err': e.get_error_message()}) + + cb('adding volume to the object store') + new_vol_id = '%s:%s' % (new_pool_name, new_vol_name) + with self.objstore as session: + session.store('storagevolume', new_vol_id, {'ref_cnt': 0}) + + cb('OK', True) + def get_list(self, pool_name): pool = StoragePoolModel.get_storagepool(pool_name, self.conn) if not pool.isActive(): diff --git a/tests/test_model.py b/tests/test_model.py index 21e1b6b..b165731 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -623,6 +623,25 @@ class ModelTests(unittest.TestCase): cp_content = cp_file.read() self.assertEquals(vol_content, cp_content)
+ # clone the volume created above + params = {'volume_path': vol_path} + task = inst.storagevolumes_create(pool, params) + taskid = task['id'] + cloned_vol_name = task['target_uri'].split('/')[-1] + inst.task_wait(taskid) + self.assertEquals('finished', inst.task_lookup(taskid)['status']) + rollback.prependDefer(inst.storagevolume_delete, pool, + cloned_vol_name) + + orig_vol = inst.storagevolume_lookup(pool, vol_name) + cloned_vol = inst.storagevolume_lookup(pool, cloned_vol_name) + + self.assertNotEquals(orig_vol['path'], cloned_vol['path']) + del orig_vol['path'] + del cloned_vol['path'] + + self.assertEquals(orig_vol, cloned_vol) + @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_storage_customise(self): inst = model.Model(objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 9bc930f..66072bc 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -1047,6 +1047,29 @@ class RestTests(unittest.TestCase): resp = self.request('/storagepools/pool-1/storagevolumes/%s' % vol_name, '{}', 'GET') self.assertEquals(200, resp.status) + vol = json.loads(resp.read()) + + # clone the volume created above + req = json.dumps({'volume_path': vol['path']}) + resp = self.request('/storagepools/pool-1/storagevolumes', req, 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + cloned_vol_name = task['target_uri'].split('/')[-1] + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id']).read()) + self.assertEquals('finished', task['status']) + resp = self.request('/storagepools/pool-1/storagevolumes/%s' % + cloned_vol_name, '{}', 'GET') + self.assertEquals(200, resp.status) + cloned_vol = json.loads(resp.read()) + + self.assertNotEquals(vol['name'], cloned_vol['name']) + del vol['name'] + del cloned_vol['name'] + self.assertNotEquals(vol['path'], cloned_vol['path']) + del vol['path'] + del cloned_vol['path'] + self.assertEquals(vol, cloned_vol)
# Now remove the StoragePool from mock model self._delete_pool('pool-1')

On 03-11-2014 14:32, Aline Manera wrote:
It is like a clone, right? So the API should be POST /storagepools/<pool>/storagevolumes/<volumes>/clone
Well, you can see it that way as well but I was influenced by the libvirt naming used to implement this: virStorageVolCreateXMLFrom(pool, xml, flags). We can think of it as a clone and change its API to the one you suggested.
And what about the destination path? Isn't it needed to do the clone? Even if it is optional. Because, in the guest clone case the volume can be cloned in a different pool from the existing one.
Using the API I implemented, the destination path was provided by the pool used to create the new volume: POST /storagepool/*mypool*/storagevolumes {'volume_path': '/home/user/image.img'} The command above would create a new volume on the pool "mypool", that's why the destination path wasn't needed. But if implement it as a clone, we will need to provide a new parameter (e.g. "pool") to act as the destination path.

On 11/04/2014 11:10 AM, Crístian Viana wrote:
On 03-11-2014 14:32, Aline Manera wrote:
It is like a clone, right? So the API should be POST /storagepools/<pool>/storagevolumes/<volumes>/clone
Well, you can see it that way as well but I was influenced by the libvirt naming used to implement this: virStorageVolCreateXMLFrom(pool, xml, flags). We can think of it as a clone and change its API to the one you suggested.
ACK.
And what about the destination path? Isn't it needed to do the clone? Even if it is optional. Because, in the guest clone case the volume can be cloned in a different pool from the existing one.
Using the API I implemented, the destination path was provided by the pool used to create the new volume:
POST /storagepool/*mypool*/storagevolumes {'volume_path': '/home/user/image.img'}
The command above would create a new volume on the pool "mypool", that's why the destination path wasn't needed.
But if implement it as a clone, we will need to provide a new parameter (e.g. "pool") to act as the destination path.
Thanks for the explanation.

The new command "POST /vms/<vm-name>/clone" will create a new virtual machine based on <vm-name>. The new VM will have the exact same settings, except the name, UUID, MAC address and disk paths; those values will be generated automatically. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 7 ++ src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 2 + src/kimchi/model/vms.py | 297 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 305 insertions(+), 2 deletions(-) diff --git a/docs/API.md b/docs/API.md index 29ae4e1..d5f8df6 100644 --- a/docs/API.md +++ b/docs/API.md @@ -133,6 +133,13 @@ the following general conventions: risk of data loss caused by reset without the guest OS shutdown. * connect: Prepare the connection for spice or vnc +* clone: Create a new VM identical to this VM. The new VM's name, UUID and + network MAC addresses will be generated automatically. Each existing + disks will be copied to a new volume in the same storage pool. If + there is no available space on that storage pool to hold the new + volume, it will be created on the pool 'default'. This action returns + a Task. + ### Sub-resource: Virtual Machine Screenshot **URI:** /vms/*:name*/screenshot diff --git a/src/kimchi/control/vms.py b/src/kimchi/control/vms.py index 88d8a81..a1589ef 100644 --- a/src/kimchi/control/vms.py +++ b/src/kimchi/control/vms.py @@ -46,6 +46,7 @@ class VM(Resource): self.shutdown = self.generate_action_handler('shutdown') self.reset = self.generate_action_handler('reset') self.connect = self.generate_action_handler('connect') + self.clone = self.generate_action_handler_task('clone') @property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 712e5a6..304c870 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -102,6 +102,8 @@ messages = { "KCHVM0030E": _("Unable to get access metadata of virtual machine %(name)s. Details: %(err)s"), "KCHVM0031E": _("The guest console password must be a string."), "KCHVM0032E": _("The life time for the guest console password must be a number."), + "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), + "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"), "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index f2e4ae3..17fe2f4 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -22,6 +22,7 @@ import lxml.etree as ET from lxml import etree, objectify import os import random +import re import string import time import uuid @@ -30,17 +31,20 @@ from xml.etree import ElementTree import libvirt from cherrypy.process.plugins import BackgroundTask -from kimchi import vnc +from kimchi import model, vnc from kimchi.config import READONLY_POOL_TYPE from kimchi.exception import InvalidOperation, InvalidParameter from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.config import CapabilitiesModel +from kimchi.model.tasks import TaskModel from kimchi.model.templates import TemplateModel from kimchi.model.utils import get_vm_name from kimchi.model.utils import get_metadata_node from kimchi.model.utils import set_metadata_node +from kimchi.rollbackcontext import RollbackContext from kimchi.screenshot import VMScreenshot -from kimchi.utils import import_class, kimchi_log, run_setfacl_set_attr +from kimchi.utils import add_task, import_class, kimchi_log +from kimchi.utils import run_setfacl_set_attr from kimchi.utils import template_name_from_uri from kimchi.xmlutils.utils import xpath_get_text, xml_item_update @@ -63,6 +67,15 @@ VM_LIVE_UPDATE_PARAMS = {} stats = {} +XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" +XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" +XPATH_DOMAIN_NAME = '/domain/name' +XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address" +XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ + "mac[@address='%s']" +XPATH_DOMAIN_UUID = '/domain/uuid' + + class VMsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] @@ -251,6 +264,11 @@ class VMModel(object): self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class('kimchi.model.host.UsersModel')(**kargs) self.groups = import_class('kimchi.model.host.GroupsModel')(**kargs) + self.vms = VMsModel(**kargs) + self.task = TaskModel(**kargs) + self.storagepool = model.storagepools.StoragePoolModel(**kargs) + self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) + self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs) def update(self, name, params): dom = self.get_vm(name, self.conn) @@ -258,6 +276,281 @@ class VMModel(object): self._live_vm_update(dom, params) return dom.name().decode('utf-8') + def clone(self, name): + """Clone a virtual machine based on an existing one. + + The new virtual machine will have the exact same configuration as the + original VM, except for the name, UUID, MAC addresses and disks. The + name will have the form "<name>-clone-<number>", with <number> starting + at 1; the UUID will be generated randomly; the MAC addresses will be + generated randomly with no conflicts within the original and the new + VM; and the disks will be new volumes [mostly] on the same storage + pool, with the same content as the original disks. The storage pool + 'default' will always be used when cloning SCSI and iSCSI disks and + when the original storage pool cannot hold the new volume. + + An exception will be raised if the virtual machine <name> is not + shutoff, if there is no available space to copy a new volume to the + storage pool 'default' (when there was also no space to copy it to the + original storage pool) and if one of the virtual machine's disks belong + to a storage pool not supported by Kimchi. + + Parameters: + name -- The name of the existing virtual machine to be cloned. + + Return: + A Task running the clone operation. + """ + name = name.decode('utf-8') + + # VM must be shutoff in order to clone it + info = self.lookup(name) + if info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + # this name will be used as the Task's 'target_uri' so it needs to be + # defined now. + new_name = self._clone_get_next_name(name) + + # create a task with the actual clone function + taskid = add_task(u'/vms/%s' % new_name, self._clone_task, + self.objstore, + {'name': name, 'new_name': new_name}) + + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + Parameters: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "name": the name of the original VM. + "new_name": the name of the new VM. + """ + name = params['name'] + new_name = params['new_name'] + vir_conn = self.conn.get() + + # fetch base XML + cb('reading source VM XML') + vir_dom = vir_conn.lookupByName(name) + xml = vir_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + + # update name + cb('updating VM name') + xml = xml_item_update(xml, './name', new_name) + + # update UUID + cb('updating VM UUID') + new_uuid = unicode(uuid.uuid4()) + xml = xml_item_update(xml, './uuid', new_uuid) + + # update MAC addresses + cb('updating VM MAC addresses') + xml = self._clone_update_mac_addresses(xml) + + with RollbackContext() as rollback: + # copy disks + cb('copying VM disks') + xml = self._clone_update_disks(xml, rollback) + + # update objstore entry + cb('updating object store') + self._clone_update_objstore(vir_dom, new_uuid, rollback) + + # create new guest + cb('defining new VM') + vir_conn.defineXML(xml) + + rollback.commitAll() + + cb('OK', True) + + def _clone_get_next_name(self, basename): + """Find the next available name for a clone VM. + + If any VM named "<basename>-clone-<number>" is found, use + the maximum "number" + 1; else, use 1. + + Argument: + basename -- the name of the original VM. + + Return: + A UTF-8 string in the format "<basename>-clone-<number>". + """ + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms.get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + # increments the maximum "clone number" found + return u'%s-clone-%d' % (basename, max_num + 1) + + @staticmethod + def _clone_update_mac_addresses(xml): + """Update the MAC addresses with new values in the XML descriptor of a + cloning domain. + + The new MAC addresses will be generated randomly, and their values are + guaranteed to be distinct from the ones in the original VM. + + Arguments: + xml -- The XML descriptor of the original domain. + + Return: + The XML descriptor <xml> with the new MAC addresses instead of the + old ones. + """ + old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC) + new_macs = [] + + for mac in old_macs: + while True: + new_mac = model.vmifaces.VMIfacesModel.random_mac() + # make sure the new MAC doesn't conflict with the original VM + # and with the new values on the new VM. + if new_mac not in (old_macs + new_macs): + new_macs.append(new_mac) + break + + xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, + new_mac, 'address') + + return xml + + def _clone_update_disks(self, xml, rollback): + """Clone disks from a virtual machine. The disks are copied as new + volumes and the new VM's XML is updated accordingly. + + Arguments: + xml -- The XML descriptor of the original VM + new values for + "/domain/name" and "/domain/uuid". + rollback -- A rollback context so the new volumes can be removed if an + error occurs during the cloning operation. + + Return: + The XML descriptor <xml> with the new disk paths instead of the + old ones. + """ + # the UUID will be used to create the disk paths + uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) + + vir_conn = self.conn.get() + + for i, path in enumerate(all_paths): + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() + + orig_pool_name = vir_pool.name().decode('utf-8') + orig_vol_name = vir_orig_vol.name().decode('utf-8') + + orig_pool = self.storagepool.lookup(orig_pool_name) + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_pool_name = orig_pool_name + new_pool = orig_pool + + if orig_pool['type'] in ['dir', 'netfs', 'logical']: + # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name}) + + elif orig_pool['type'] in ['scsi', 'iscsi']: + # SCSI and iSCSI always fall back to the storage pool 'default' + kimchi_log.warning('cannot create new volume for clone in ' + 'storage pool \'%s\'; falling back to ' + '\'default\'', orig_pool_name) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # if the pool 'default' cannot hold a new volume, raise + # an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', {'name': domain_name}) + + else: + # unexpected storage pool type + raise InvalidOperation('KCHPOOL0014E', + {'type': orig_pool['type']}) + + # new volume name: <UUID>-<loop-index>.<original extension> + # e.g. 1234-5678-9012-3456-0.img + ext = os.path.splitext(path)[1] + new_vol_name = u'%s-%d%s' % (uuid, i, ext) + new_vol_params = {'name': new_vol_name, 'volume_path': path} + task = self.storagevolumes.create(new_pool_name, new_vol_params) + self.task.wait(task['id'], 3600) # 1 h + + # get the new volume path and update the XML descriptor + new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) + xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, + new_vol['path'], 'file') + + # remove the new volume should an error occur later + rollback.prependDefer(self.storagevolume.delete, new_pool_name, + new_vol_name) + + return xml + + def _clone_update_objstore(self, dom, new_uuid, rollback): + """Update Kimchi's object store with the cloning VM. + + Arguments: + dom -- The native domain object of the original VM. + new_uuid -- The UUID of the new, clonning VM. + rollback -- A rollback context so the object store entry can be removed + if an error occurs during the cloning operation. + """ + xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + + with self.objstore as session: + try: + vm = session.get('vm', old_uuid) + icon = vm['icon'] + session.store('vm', new_uuid, {'icon': icon}) + except NotFoundError: + # if we cannot find an object store entry for the original VM, + # don't store one with an empty value. + pass + else: + # we need to define a custom function to prepend to the + # rollback context because the object store session needs to be + # opened and closed correctly (i.e. "prependDefer" only + # accepts one command at a time but we need more than one to + # handle an object store). + def _rollback_objstore(): + with self.objstore as session_rb: + session_rb.delete('vm', new_uuid, ignore_missing=True) + + # remove the new object store entry should an error occur later + rollback.prependDefer(_rollback_objstore) + def _build_access_elem(self, users, groups): access = E.access() for user in users: -- 1.9.3

On 11/02/2014 11:05 PM, Crístian Viana wrote:
The new command "POST /vms/<vm-name>/clone" will create a new virtual machine based on <vm-name>. The new VM will have the exact same settings, except the name, UUID, MAC address and disk paths; those values will be generated automatically.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 7 ++ src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 2 + src/kimchi/model/vms.py | 297 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 305 insertions(+), 2 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 29ae4e1..d5f8df6 100644 --- a/docs/API.md +++ b/docs/API.md @@ -133,6 +133,13 @@ the following general conventions: risk of data loss caused by reset without the guest OS shutdown. * connect: Prepare the connection for spice or vnc
+* clone: Create a new VM identical to this VM. The new VM's name, UUID and + network MAC addresses will be generated automatically. Each existing + disks will be copied to a new volume in the same storage pool. If + there is no available space on that storage pool to hold the new + volume, it will be created on the pool 'default'. This action returns + a Task. + ### Sub-resource: Virtual Machine Screenshot
**URI:** /vms/*:name*/screenshot diff --git a/src/kimchi/control/vms.py b/src/kimchi/control/vms.py index 88d8a81..a1589ef 100644 --- a/src/kimchi/control/vms.py +++ b/src/kimchi/control/vms.py @@ -46,6 +46,7 @@ class VM(Resource): self.shutdown = self.generate_action_handler('shutdown') self.reset = self.generate_action_handler('reset') self.connect = self.generate_action_handler('connect') + self.clone = self.generate_action_handler_task('clone')
@property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 712e5a6..304c870 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -102,6 +102,8 @@ messages = { "KCHVM0030E": _("Unable to get access metadata of virtual machine %(name)s. Details: %(err)s"), "KCHVM0031E": _("The guest console password must be a string."), "KCHVM0032E": _("The life time for the guest console password must be a number."), + "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), + "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"),
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index f2e4ae3..17fe2f4 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -22,6 +22,7 @@ import lxml.etree as ET from lxml import etree, objectify import os import random +import re import string import time import uuid @@ -30,17 +31,20 @@ from xml.etree import ElementTree import libvirt from cherrypy.process.plugins import BackgroundTask
-from kimchi import vnc +from kimchi import model, vnc from kimchi.config import READONLY_POOL_TYPE from kimchi.exception import InvalidOperation, InvalidParameter from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.config import CapabilitiesModel +from kimchi.model.tasks import TaskModel from kimchi.model.templates import TemplateModel from kimchi.model.utils import get_vm_name from kimchi.model.utils import get_metadata_node from kimchi.model.utils import set_metadata_node +from kimchi.rollbackcontext import RollbackContext from kimchi.screenshot import VMScreenshot -from kimchi.utils import import_class, kimchi_log, run_setfacl_set_attr +from kimchi.utils import add_task, import_class, kimchi_log +from kimchi.utils import run_setfacl_set_attr from kimchi.utils import template_name_from_uri from kimchi.xmlutils.utils import xpath_get_text, xml_item_update
@@ -63,6 +67,15 @@ VM_LIVE_UPDATE_PARAMS = {} stats = {}
+XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" +XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" +XPATH_DOMAIN_NAME = '/domain/name' +XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address" +XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ + "mac[@address='%s']" +XPATH_DOMAIN_UUID = '/domain/uuid' + + class VMsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] @@ -251,6 +264,11 @@ class VMModel(object): self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class('kimchi.model.host.UsersModel')(**kargs) self.groups = import_class('kimchi.model.host.GroupsModel')(**kargs) + self.vms = VMsModel(**kargs) + self.task = TaskModel(**kargs) + self.storagepool = model.storagepools.StoragePoolModel(**kargs) + self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) + self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs)
def update(self, name, params): dom = self.get_vm(name, self.conn) @@ -258,6 +276,281 @@ class VMModel(object): self._live_vm_update(dom, params) return dom.name().decode('utf-8')
+ def clone(self, name): + """Clone a virtual machine based on an existing one. + + The new virtual machine will have the exact same configuration as the + original VM, except for the name, UUID, MAC addresses and disks. The + name will have the form "<name>-clone-<number>", with <number> starting + at 1; the UUID will be generated randomly; the MAC addresses will be + generated randomly with no conflicts within the original and the new + VM; and the disks will be new volumes [mostly] on the same storage + pool, with the same content as the original disks. The storage pool + 'default' will always be used when cloning SCSI and iSCSI disks and + when the original storage pool cannot hold the new volume. + + An exception will be raised if the virtual machine <name> is not + shutoff, if there is no available space to copy a new volume to the + storage pool 'default' (when there was also no space to copy it to the + original storage pool) and if one of the virtual machine's disks belong + to a storage pool not supported by Kimchi. + + Parameters: + name -- The name of the existing virtual machine to be cloned. + + Return: + A Task running the clone operation. + """ + name = name.decode('utf-8') + + # VM must be shutoff in order to clone it + info = self.lookup(name) + if info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + # this name will be used as the Task's 'target_uri' so it needs to be + # defined now. + new_name = self._clone_get_next_name(name) + + # create a task with the actual clone function + taskid = add_task(u'/vms/%s' % new_name, self._clone_task, + self.objstore, + {'name': name, 'new_name': new_name}) + + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + Parameters: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "name": the name of the original VM. + "new_name": the name of the new VM. + """ + name = params['name'] + new_name = params['new_name'] + vir_conn = self.conn.get() + + # fetch base XML + cb('reading source VM XML') + vir_dom = vir_conn.lookupByName(name) + xml = vir_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + + # update name + cb('updating VM name') + xml = xml_item_update(xml, './name', new_name) + + # update UUID + cb('updating VM UUID') + new_uuid = unicode(uuid.uuid4()) + xml = xml_item_update(xml, './uuid', new_uuid) + + # update MAC addresses + cb('updating VM MAC addresses') + xml = self._clone_update_mac_addresses(xml) + + with RollbackContext() as rollback: + # copy disks + cb('copying VM disks') + xml = self._clone_update_disks(xml, rollback) + + # update objstore entry + cb('updating object store') + self._clone_update_objstore(vir_dom, new_uuid, rollback) + + # create new guest + cb('defining new VM') + vir_conn.defineXML(xml) + + rollback.commitAll() + + cb('OK', True) + + def _clone_get_next_name(self, basename): + """Find the next available name for a clone VM. + + If any VM named "<basename>-clone-<number>" is found, use + the maximum "number" + 1; else, use 1. + + Argument: + basename -- the name of the original VM. + + Return: + A UTF-8 string in the format "<basename>-clone-<number>". + """ + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms.get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + # increments the maximum "clone number" found + return u'%s-clone-%d' % (basename, max_num + 1) + + @staticmethod + def _clone_update_mac_addresses(xml): + """Update the MAC addresses with new values in the XML descriptor of a + cloning domain. + + The new MAC addresses will be generated randomly, and their values are + guaranteed to be distinct from the ones in the original VM. + + Arguments: + xml -- The XML descriptor of the original domain. + + Return: + The XML descriptor <xml> with the new MAC addresses instead of the + old ones. + """ + old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC) + new_macs = [] + + for mac in old_macs: + while True: + new_mac = model.vmifaces.VMIfacesModel.random_mac() + # make sure the new MAC doesn't conflict with the original VM + # and with the new values on the new VM. + if new_mac not in (old_macs + new_macs): + new_macs.append(new_mac) + break + + xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, + new_mac, 'address') + + return xml + + def _clone_update_disks(self, xml, rollback): + """Clone disks from a virtual machine. The disks are copied as new + volumes and the new VM's XML is updated accordingly. + + Arguments: + xml -- The XML descriptor of the original VM + new values for + "/domain/name" and "/domain/uuid". + rollback -- A rollback context so the new volumes can be removed if an + error occurs during the cloning operation. + + Return: + The XML descriptor <xml> with the new disk paths instead of the + old ones. + """ + # the UUID will be used to create the disk paths + uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) + + vir_conn = self.conn.get() + + for i, path in enumerate(all_paths): + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() + + orig_pool_name = vir_pool.name().decode('utf-8') + orig_vol_name = vir_orig_vol.name().decode('utf-8') + + orig_pool = self.storagepool.lookup(orig_pool_name) + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_pool_name = orig_pool_name + new_pool = orig_pool + + if orig_pool['type'] in ['dir', 'netfs', 'logical']: + # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path)
+ new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name})
You do this same verification some lines below. Couldn't you reorganize the code to only have one occurrence of it? Or in last case, create a function.
+ + elif orig_pool['type'] in ['scsi', 'iscsi']: + # SCSI and iSCSI always fall back to the storage pool 'default' + kimchi_log.warning('cannot create new volume for clone in ' + 'storage pool \'%s\'; falling back to ' + '\'default\'', orig_pool_name) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # if the pool 'default' cannot hold a new volume, raise + # an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', {'name': domain_name}) + + else: + # unexpected storage pool type + raise InvalidOperation('KCHPOOL0014E', + {'type': orig_pool['type']}) + + # new volume name: <UUID>-<loop-index>.<original extension> + # e.g. 1234-5678-9012-3456-0.img + ext = os.path.splitext(path)[1] + new_vol_name = u'%s-%d%s' % (uuid, i, ext) + new_vol_params = {'name': new_vol_name, 'volume_path': path} + task = self.storagevolumes.create(new_pool_name, new_vol_params) + self.task.wait(task['id'], 3600) # 1 h + + # get the new volume path and update the XML descriptor + new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) + xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, + new_vol['path'], 'file') + + # remove the new volume should an error occur later + rollback.prependDefer(self.storagevolume.delete, new_pool_name, + new_vol_name) + + return xml + + def _clone_update_objstore(self, dom, new_uuid, rollback): + """Update Kimchi's object store with the cloning VM. + + Arguments: + dom -- The native domain object of the original VM. + new_uuid -- The UUID of the new, clonning VM. + rollback -- A rollback context so the object store entry can be removed + if an error occurs during the cloning operation. + """ + xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] +
+ with self.objstore as session: + try: + vm = session.get('vm', old_uuid) + icon = vm['icon'] + session.store('vm', new_uuid, {'icon': icon}) + except NotFoundError: + # if we cannot find an object store entry for the original VM, + # don't store one with an empty value. + pass + else: + # we need to define a custom function to prepend to the + # rollback context because the object store session needs to be + # opened and closed correctly (i.e. "prependDefer" only + # accepts one command at a time but we need more than one to + # handle an object store). + def _rollback_objstore(): + with self.objstore as session_rb: + session_rb.delete('vm', new_uuid, ignore_missing=True) + + # remove the new object store entry should an error occur later + rollback.prependDefer(_rollback_objstore) + def _build_access_elem(self, users, groups): access = E.access() for user in users:

On 03-11-2014 14:47, Aline Manera wrote:+ if orig_pool['type'] in ['dir', 'netfs', 'logical']:
+ # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path)
+ new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name})
You do this same verification some lines below. Couldn't you reorganize the code to only have one occurrence of it? Or in last case, create a function.
I don't think it's worth it. Even though those "if" blocks look the same, the only thing they use different variables in the boolean condition and they have different bodies. There's not much to reuse on that code. I can go from this: if orig_vol['capacity'] > orig_pool['available']: kimchi_log.warning('storage pool \'%s\' doesn\'t have enough free space to store image \'%s\'; falling back to \'default\'', orig_pool_name, path) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default') if orig_vol['capacity'] > new_pool['available']: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name}) to this: @staticmethod def vol_fits_in_pool(vol, pool): return vol['capacity'] > pool['available'] if vol_fits_in_pool(orig_vol, orig_pool): kimchi_log.warning('storage pool \'%s\' doesn\'t have enough free space to store image \'%s\'; falling back to \'default\'', orig_pool_name, path) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default') if vol_fits_in_pool(orig_vol, new_pool): domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name}) Is it really worth it?

On 11/04/2014 11:10 AM, Crístian Viana wrote:
On 03-11-2014 14:47, Aline Manera wrote:+ if orig_pool['type'] in ['dir', 'netfs', 'logical']:
+ # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path)
+ new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name})
You do this same verification some lines below. Couldn't you reorganize the code to only have one occurrence of it? Or in last case, create a function.
I don't think it's worth it. Even though those "if" blocks look the same, the only thing they use different variables in the boolean condition and they have different bodies. There's not much to reuse on that code.
I can go from this:
if orig_vol['capacity'] > orig_pool['available']: kimchi_log.warning('storage pool \'%s\' doesn\'t have enough free space to store image \'%s\'; falling back to \'default\'', orig_pool_name, path) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default')
if orig_vol['capacity'] > new_pool['available']: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name})
to this:
@staticmethod def vol_fits_in_pool(vol, pool): return vol['capacity'] > pool['available']
if vol_fits_in_pool(orig_vol, orig_pool): kimchi_log.warning('storage pool \'%s\' doesn\'t have enough free space to store image \'%s\'; falling back to \'default\'', orig_pool_name, path) new_pool_name = u'default' new_pool = self.storagepool.lookup(u'default')
if vol_fits_in_pool(orig_vol, new_pool): domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] raise InvalidOperation('KCHVM0034E', {'name': domain_name})
Is it really worth it?
Nope! Keep it is as it is! =)

On 11/02/2014 11:05 PM, Crístian Viana wrote:
The new command "POST /vms/<vm-name>/clone" will create a new virtual machine based on <vm-name>. The new VM will have the exact same settings, except the name, UUID, MAC address and disk paths; those values will be generated automatically.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 7 ++ src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 2 + src/kimchi/model/vms.py | 297 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 305 insertions(+), 2 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 29ae4e1..d5f8df6 100644 --- a/docs/API.md +++ b/docs/API.md @@ -133,6 +133,13 @@ the following general conventions: risk of data loss caused by reset without the guest OS shutdown. * connect: Prepare the connection for spice or vnc
+* clone: Create a new VM identical to this VM. The new VM's name, UUID and + network MAC addresses will be generated automatically. Each existing + disks will be copied to a new volume in the same storage pool. If + there is no available space on that storage pool to hold the new + volume, it will be created on the pool 'default'. This action returns + a Task. + ### Sub-resource: Virtual Machine Screenshot
**URI:** /vms/*:name*/screenshot diff --git a/src/kimchi/control/vms.py b/src/kimchi/control/vms.py index 88d8a81..a1589ef 100644 --- a/src/kimchi/control/vms.py +++ b/src/kimchi/control/vms.py @@ -46,6 +46,7 @@ class VM(Resource): self.shutdown = self.generate_action_handler('shutdown') self.reset = self.generate_action_handler('reset') self.connect = self.generate_action_handler('connect') + self.clone = self.generate_action_handler_task('clone')
@property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 712e5a6..304c870 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -102,6 +102,8 @@ messages = { "KCHVM0030E": _("Unable to get access metadata of virtual machine %(name)s. Details: %(err)s"), "KCHVM0031E": _("The guest console password must be a string."), "KCHVM0032E": _("The life time for the guest console password must be a number."), + "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), + "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"),
"KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index f2e4ae3..17fe2f4 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -22,6 +22,7 @@ import lxml.etree as ET from lxml import etree, objectify import os import random +import re import string import time import uuid @@ -30,17 +31,20 @@ from xml.etree import ElementTree import libvirt from cherrypy.process.plugins import BackgroundTask
-from kimchi import vnc +from kimchi import model, vnc from kimchi.config import READONLY_POOL_TYPE from kimchi.exception import InvalidOperation, InvalidParameter from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.config import CapabilitiesModel +from kimchi.model.tasks import TaskModel from kimchi.model.templates import TemplateModel from kimchi.model.utils import get_vm_name from kimchi.model.utils import get_metadata_node from kimchi.model.utils import set_metadata_node +from kimchi.rollbackcontext import RollbackContext from kimchi.screenshot import VMScreenshot -from kimchi.utils import import_class, kimchi_log, run_setfacl_set_attr +from kimchi.utils import add_task, import_class, kimchi_log +from kimchi.utils import run_setfacl_set_attr from kimchi.utils import template_name_from_uri from kimchi.xmlutils.utils import xpath_get_text, xml_item_update
@@ -63,6 +67,15 @@ VM_LIVE_UPDATE_PARAMS = {} stats = {}
+XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" +XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" +XPATH_DOMAIN_NAME = '/domain/name' +XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address" +XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ + "mac[@address='%s']" +XPATH_DOMAIN_UUID = '/domain/uuid' + + class VMsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] @@ -251,6 +264,11 @@ class VMModel(object): self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class('kimchi.model.host.UsersModel')(**kargs) self.groups = import_class('kimchi.model.host.GroupsModel')(**kargs) + self.vms = VMsModel(**kargs) + self.task = TaskModel(**kargs) + self.storagepool = model.storagepools.StoragePoolModel(**kargs) + self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) + self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs)
def update(self, name, params): dom = self.get_vm(name, self.conn) @@ -258,6 +276,281 @@ class VMModel(object): self._live_vm_update(dom, params) return dom.name().decode('utf-8')
+ def clone(self, name): + """Clone a virtual machine based on an existing one. + + The new virtual machine will have the exact same configuration as the + original VM, except for the name, UUID, MAC addresses and disks. The + name will have the form "<name>-clone-<number>", with <number> starting + at 1; the UUID will be generated randomly; the MAC addresses will be + generated randomly with no conflicts within the original and the new + VM; and the disks will be new volumes [mostly] on the same storage + pool, with the same content as the original disks. The storage pool + 'default' will always be used when cloning SCSI and iSCSI disks and + when the original storage pool cannot hold the new volume. + + An exception will be raised if the virtual machine <name> is not + shutoff, if there is no available space to copy a new volume to the + storage pool 'default' (when there was also no space to copy it to the + original storage pool) and if one of the virtual machine's disks belong + to a storage pool not supported by Kimchi. + + Parameters: + name -- The name of the existing virtual machine to be cloned. + + Return: + A Task running the clone operation. + """ + name = name.decode('utf-8') + + # VM must be shutoff in order to clone it + info = self.lookup(name) + if info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + # this name will be used as the Task's 'target_uri' so it needs to be + # defined now. + new_name = self._clone_get_next_name(name) + + # create a task with the actual clone function + taskid = add_task(u'/vms/%s' % new_name, self._clone_task, + self.objstore, + {'name': name, 'new_name': new_name}) + + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + Parameters: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "name": the name of the original VM. + "new_name": the name of the new VM. + """ + name = params['name'] + new_name = params['new_name'] + vir_conn = self.conn.get() + + # fetch base XML + cb('reading source VM XML') + vir_dom = vir_conn.lookupByName(name) + xml = vir_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + + # update name + cb('updating VM name') + xml = xml_item_update(xml, './name', new_name) + + # update UUID + cb('updating VM UUID') + new_uuid = unicode(uuid.uuid4()) + xml = xml_item_update(xml, './uuid', new_uuid) + + # update MAC addresses + cb('updating VM MAC addresses') + xml = self._clone_update_mac_addresses(xml) + + with RollbackContext() as rollback: + # copy disks + cb('copying VM disks') + xml = self._clone_update_disks(xml, rollback) + + # update objstore entry + cb('updating object store') + self._clone_update_objstore(vir_dom, new_uuid, rollback) + + # create new guest + cb('defining new VM') + vir_conn.defineXML(xml) + + rollback.commitAll() + + cb('OK', True) + + def _clone_get_next_name(self, basename): + """Find the next available name for a clone VM. + + If any VM named "<basename>-clone-<number>" is found, use + the maximum "number" + 1; else, use 1. + + Argument: + basename -- the name of the original VM. + + Return: + A UTF-8 string in the format "<basename>-clone-<number>". + """ + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms.get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + # increments the maximum "clone number" found + return u'%s-clone-%d' % (basename, max_num + 1) + + @staticmethod + def _clone_update_mac_addresses(xml): + """Update the MAC addresses with new values in the XML descriptor of a + cloning domain. + + The new MAC addresses will be generated randomly, and their values are + guaranteed to be distinct from the ones in the original VM. + + Arguments: + xml -- The XML descriptor of the original domain. + + Return: + The XML descriptor <xml> with the new MAC addresses instead of the + old ones. + """ + old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC) + new_macs = [] + + for mac in old_macs: + while True: + new_mac = model.vmifaces.VMIfacesModel.random_mac() + # make sure the new MAC doesn't conflict with the original VM + # and with the new values on the new VM. + if new_mac not in (old_macs + new_macs): + new_macs.append(new_mac) + break + + xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, + new_mac, 'address') + + return xml + + def _clone_update_disks(self, xml, rollback): + """Clone disks from a virtual machine. The disks are copied as new + volumes and the new VM's XML is updated accordingly. + + Arguments: + xml -- The XML descriptor of the original VM + new values for + "/domain/name" and "/domain/uuid". + rollback -- A rollback context so the new volumes can be removed if an + error occurs during the cloning operation. + + Return: + The XML descriptor <xml> with the new disk paths instead of the + old ones. + """ + # the UUID will be used to create the disk paths + uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) + + vir_conn = self.conn.get() +
+ for i, path in enumerate(all_paths): + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() +
I've just thought about the case the disk is not in a pool. For example, we allow user create a Template (and then a VM) using an Image file. That image file is not necessarily on a pool. ie, it is a volume from libvirt perspective. So in that case the call vir_conn.storageVolLookupByPath(path) will raise an exception and stop the clone operation. We need to have this scenario in mind and work with it properly.
+ orig_pool_name = vir_pool.name().decode('utf-8') + orig_vol_name = vir_orig_vol.name().decode('utf-8') + + orig_pool = self.storagepool.lookup(orig_pool_name) + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_pool_name = orig_pool_name + new_pool = orig_pool + + if orig_pool['type'] in ['dir', 'netfs', 'logical']: + # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name}) + + elif orig_pool['type'] in ['scsi', 'iscsi']: + # SCSI and iSCSI always fall back to the storage pool 'default' + kimchi_log.warning('cannot create new volume for clone in ' + 'storage pool \'%s\'; falling back to ' + '\'default\'', orig_pool_name) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # if the pool 'default' cannot hold a new volume, raise + # an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', {'name': domain_name}) + + else: + # unexpected storage pool type + raise InvalidOperation('KCHPOOL0014E', + {'type': orig_pool['type']}) + + # new volume name: <UUID>-<loop-index>.<original extension> + # e.g. 1234-5678-9012-3456-0.img + ext = os.path.splitext(path)[1] + new_vol_name = u'%s-%d%s' % (uuid, i, ext) + new_vol_params = {'name': new_vol_name, 'volume_path': path} + task = self.storagevolumes.create(new_pool_name, new_vol_params) + self.task.wait(task['id'], 3600) # 1 h + + # get the new volume path and update the XML descriptor + new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) + xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, + new_vol['path'], 'file') + + # remove the new volume should an error occur later + rollback.prependDefer(self.storagevolume.delete, new_pool_name, + new_vol_name) + + return xml + + def _clone_update_objstore(self, dom, new_uuid, rollback): + """Update Kimchi's object store with the cloning VM. + + Arguments: + dom -- The native domain object of the original VM. + new_uuid -- The UUID of the new, clonning VM. + rollback -- A rollback context so the object store entry can be removed + if an error occurs during the cloning operation. + """ + xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + + with self.objstore as session: + try: + vm = session.get('vm', old_uuid) + icon = vm['icon'] + session.store('vm', new_uuid, {'icon': icon}) + except NotFoundError: + # if we cannot find an object store entry for the original VM, + # don't store one with an empty value. + pass + else: + # we need to define a custom function to prepend to the + # rollback context because the object store session needs to be + # opened and closed correctly (i.e. "prependDefer" only + # accepts one command at a time but we need more than one to + # handle an object store). + def _rollback_objstore(): + with self.objstore as session_rb: + session_rb.delete('vm', new_uuid, ignore_missing=True) + + # remove the new object store entry should an error occur later + rollback.prependDefer(_rollback_objstore) + def _build_access_elem(self, users, groups): access = E.access() for user in users:

On 11/04/2014 09:07 AM, Aline Manera wrote:
On 11/02/2014 11:05 PM, Crístian Viana wrote:
The new command "POST /vms/<vm-name>/clone" will create a new virtual machine based on <vm-name>. The new VM will have the exact same settings, except the name, UUID, MAC address and disk paths; those values will be generated automatically.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- docs/API.md | 7 ++ src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 2 + src/kimchi/model/vms.py | 297 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 305 insertions(+), 2 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 29ae4e1..d5f8df6 100644 --- a/docs/API.md +++ b/docs/API.md @@ -133,6 +133,13 @@ the following general conventions: risk of data loss caused by reset without the guest OS shutdown. * connect: Prepare the connection for spice or vnc +* clone: Create a new VM identical to this VM. The new VM's name, UUID and + network MAC addresses will be generated automatically. Each existing + disks will be copied to a new volume in the same storage pool. If + there is no available space on that storage pool to hold the new + volume, it will be created on the pool 'default'. This action returns + a Task. + ### Sub-resource: Virtual Machine Screenshot **URI:** /vms/*:name*/screenshot diff --git a/src/kimchi/control/vms.py b/src/kimchi/control/vms.py index 88d8a81..a1589ef 100644 --- a/src/kimchi/control/vms.py +++ b/src/kimchi/control/vms.py @@ -46,6 +46,7 @@ class VM(Resource): self.shutdown = self.generate_action_handler('shutdown') self.reset = self.generate_action_handler('reset') self.connect = self.generate_action_handler('connect') + self.clone = self.generate_action_handler_task('clone') @property def data(self): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index 712e5a6..304c870 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -102,6 +102,8 @@ messages = { "KCHVM0030E": _("Unable to get access metadata of virtual machine %(name)s. Details: %(err)s"), "KCHVM0031E": _("The guest console password must be a string."), "KCHVM0032E": _("The life time for the guest console password must be a number."), + "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), + "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"), "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index f2e4ae3..17fe2f4 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -22,6 +22,7 @@ import lxml.etree as ET from lxml import etree, objectify import os import random +import re import string import time import uuid @@ -30,17 +31,20 @@ from xml.etree import ElementTree import libvirt from cherrypy.process.plugins import BackgroundTask -from kimchi import vnc +from kimchi import model, vnc from kimchi.config import READONLY_POOL_TYPE from kimchi.exception import InvalidOperation, InvalidParameter from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.config import CapabilitiesModel +from kimchi.model.tasks import TaskModel from kimchi.model.templates import TemplateModel from kimchi.model.utils import get_vm_name from kimchi.model.utils import get_metadata_node from kimchi.model.utils import set_metadata_node +from kimchi.rollbackcontext import RollbackContext from kimchi.screenshot import VMScreenshot -from kimchi.utils import import_class, kimchi_log, run_setfacl_set_attr +from kimchi.utils import add_task, import_class, kimchi_log +from kimchi.utils import run_setfacl_set_attr from kimchi.utils import template_name_from_uri from kimchi.xmlutils.utils import xpath_get_text, xml_item_update @@ -63,6 +67,15 @@ VM_LIVE_UPDATE_PARAMS = {} stats = {} +XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" +XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" +XPATH_DOMAIN_NAME = '/domain/name' +XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address" +XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\ + "mac[@address='%s']" +XPATH_DOMAIN_UUID = '/domain/uuid' + + class VMsModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] @@ -251,6 +264,11 @@ class VMModel(object): self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class('kimchi.model.host.UsersModel')(**kargs) self.groups = import_class('kimchi.model.host.GroupsModel')(**kargs) + self.vms = VMsModel(**kargs) + self.task = TaskModel(**kargs) + self.storagepool = model.storagepools.StoragePoolModel(**kargs) + self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) + self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs) def update(self, name, params): dom = self.get_vm(name, self.conn) @@ -258,6 +276,281 @@ class VMModel(object): self._live_vm_update(dom, params) return dom.name().decode('utf-8') + def clone(self, name): + """Clone a virtual machine based on an existing one. + + The new virtual machine will have the exact same configuration as the + original VM, except for the name, UUID, MAC addresses and disks. The + name will have the form "<name>-clone-<number>", with <number> starting + at 1; the UUID will be generated randomly; the MAC addresses will be + generated randomly with no conflicts within the original and the new + VM; and the disks will be new volumes [mostly] on the same storage + pool, with the same content as the original disks. The storage pool + 'default' will always be used when cloning SCSI and iSCSI disks and + when the original storage pool cannot hold the new volume. + + An exception will be raised if the virtual machine <name> is not + shutoff, if there is no available space to copy a new volume to the + storage pool 'default' (when there was also no space to copy it to the + original storage pool) and if one of the virtual machine's disks belong + to a storage pool not supported by Kimchi. + + Parameters: + name -- The name of the existing virtual machine to be cloned. + + Return: + A Task running the clone operation. + """ + name = name.decode('utf-8') + + # VM must be shutoff in order to clone it + info = self.lookup(name) + if info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + # this name will be used as the Task's 'target_uri' so it needs to be + # defined now. + new_name = self._clone_get_next_name(name) + + # create a task with the actual clone function + taskid = add_task(u'/vms/%s' % new_name, self._clone_task, + self.objstore, + {'name': name, 'new_name': new_name}) + + return self.task.lookup(taskid) + + def _clone_task(self, cb, params): + """Asynchronous function which performs the clone operation. + + Parameters: + cb -- A callback function to signal the Task's progress. + params -- A dict with the following values: + "name": the name of the original VM. + "new_name": the name of the new VM. + """ + name = params['name'] + new_name = params['new_name'] + vir_conn = self.conn.get() + + # fetch base XML + cb('reading source VM XML') + vir_dom = vir_conn.lookupByName(name) + xml = vir_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + + # update name + cb('updating VM name') + xml = xml_item_update(xml, './name', new_name) + + # update UUID + cb('updating VM UUID') + new_uuid = unicode(uuid.uuid4()) + xml = xml_item_update(xml, './uuid', new_uuid) + + # update MAC addresses + cb('updating VM MAC addresses') + xml = self._clone_update_mac_addresses(xml) + + with RollbackContext() as rollback: + # copy disks + cb('copying VM disks') + xml = self._clone_update_disks(xml, rollback) + + # update objstore entry + cb('updating object store') + self._clone_update_objstore(vir_dom, new_uuid, rollback) + + # create new guest + cb('defining new VM') + vir_conn.defineXML(xml) + + rollback.commitAll() + + cb('OK', True) + + def _clone_get_next_name(self, basename): + """Find the next available name for a clone VM. + + If any VM named "<basename>-clone-<number>" is found, use + the maximum "number" + 1; else, use 1. + + Argument: + basename -- the name of the original VM. + + Return: + A UTF-8 string in the format "<basename>-clone-<number>". + """ + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms.get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + # increments the maximum "clone number" found + return u'%s-clone-%d' % (basename, max_num + 1) + + @staticmethod + def _clone_update_mac_addresses(xml): + """Update the MAC addresses with new values in the XML descriptor of a + cloning domain. + + The new MAC addresses will be generated randomly, and their values are + guaranteed to be distinct from the ones in the original VM. + + Arguments: + xml -- The XML descriptor of the original domain. + + Return: + The XML descriptor <xml> with the new MAC addresses instead of the + old ones. + """ + old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC) + new_macs = [] + + for mac in old_macs: + while True: + new_mac = model.vmifaces.VMIfacesModel.random_mac() + # make sure the new MAC doesn't conflict with the original VM + # and with the new values on the new VM. + if new_mac not in (old_macs + new_macs): + new_macs.append(new_mac) + break + + xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, + new_mac, 'address') + + return xml + + def _clone_update_disks(self, xml, rollback): + """Clone disks from a virtual machine. The disks are copied as new + volumes and the new VM's XML is updated accordingly. + + Arguments: + xml -- The XML descriptor of the original VM + new values for + "/domain/name" and "/domain/uuid". + rollback -- A rollback context so the new volumes can be removed if an + error occurs during the cloning operation. + + Return: + The XML descriptor <xml> with the new disk paths instead of the + old ones. + """ + # the UUID will be used to create the disk paths + uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK) + + vir_conn = self.conn.get() +
+ for i, path in enumerate(all_paths): + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() +
I've just thought about the case the disk is not in a pool. For example, we allow user create a Template (and then a VM) using an Image file. That image file is not necessarily on a pool. ie, it is a volume from libvirt perspective.
it is *NOT* a volume from libvirt perspective
So in that case the call vir_conn.storageVolLookupByPath(path) will raise an exception and stop the clone operation. We need to have this scenario in mind and work with it properly.
+ orig_pool_name = vir_pool.name().decode('utf-8') + orig_vol_name = vir_orig_vol.name().decode('utf-8') + + orig_pool = self.storagepool.lookup(orig_pool_name) + orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) + + new_pool_name = orig_pool_name + new_pool = orig_pool + + if orig_pool['type'] in ['dir', 'netfs', 'logical']: + # if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold + # a new volume with the same size, the pool 'default' should + # be used + if orig_vol['capacity'] > orig_pool['available']: + kimchi_log.warning('storage pool \'%s\' doesn\'t have ' + 'enough free space to store image ' + '\'%s\'; falling back to \'default\'', + orig_pool_name, path) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # ...and if even the pool 'default' cannot hold a new + # volume, raise an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', + {'name': domain_name}) + + elif orig_pool['type'] in ['scsi', 'iscsi']: + # SCSI and iSCSI always fall back to the storage pool 'default' + kimchi_log.warning('cannot create new volume for clone in ' + 'storage pool \'%s\'; falling back to ' + '\'default\'', orig_pool_name) + new_pool_name = u'default' + new_pool = self.storagepool.lookup(u'default') + + # if the pool 'default' cannot hold a new volume, raise + # an exception + if orig_vol['capacity'] > new_pool['available']: + domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] + raise InvalidOperation('KCHVM0034E', {'name': domain_name}) + + else: + # unexpected storage pool type + raise InvalidOperation('KCHPOOL0014E', + {'type': orig_pool['type']}) + + # new volume name: <UUID>-<loop-index>.<original extension> + # e.g. 1234-5678-9012-3456-0.img + ext = os.path.splitext(path)[1] + new_vol_name = u'%s-%d%s' % (uuid, i, ext) + new_vol_params = {'name': new_vol_name, 'volume_path': path} + task = self.storagevolumes.create(new_pool_name, new_vol_params) + self.task.wait(task['id'], 3600) # 1 h + + # get the new volume path and update the XML descriptor + new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) + xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, + new_vol['path'], 'file') + + # remove the new volume should an error occur later + rollback.prependDefer(self.storagevolume.delete, new_pool_name, + new_vol_name) + + return xml + + def _clone_update_objstore(self, dom, new_uuid, rollback): + """Update Kimchi's object store with the cloning VM. + + Arguments: + dom -- The native domain object of the original VM. + new_uuid -- The UUID of the new, clonning VM. + rollback -- A rollback context so the object store entry can be removed + if an error occurs during the cloning operation. + """ + xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE).decode('utf-8') + old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] + + with self.objstore as session: + try: + vm = session.get('vm', old_uuid) + icon = vm['icon'] + session.store('vm', new_uuid, {'icon': icon}) + except NotFoundError: + # if we cannot find an object store entry for the original VM, + # don't store one with an empty value. + pass + else: + # we need to define a custom function to prepend to the + # rollback context because the object store session needs to be + # opened and closed correctly (i.e. "prependDefer" only + # accepts one command at a time but we need more than one to + # handle an object store). + def _rollback_objstore(): + with self.objstore as session_rb: + session_rb.delete('vm', new_uuid, ignore_missing=True) + + # remove the new object store entry should an error occur later + rollback.prependDefer(_rollback_objstore) + def _build_access_elem(self, users, groups): access = E.access() for user in users:
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 04-11-2014 09:07, Aline Manera wrote:
I've just thought about the case the disk is not in a pool. For example, we allow user create a Template (and then a VM) using an Image file. That image file is not necessarily on a pool. ie, it is a volume from libvirt perspective. So in that case the call vir_conn.storageVolLookupByPath(path) will raise an exception and stop the clone operation. We need to have this scenario in mind and work with it properly.
I did a quick test now: I created a template based on a local image on my home dir (which is not a pool) and created a VM with that template. After checking the VM storage settings, I see it has its disk on the default storage pool (/var/lib/libvirt/images), so I guess it must have been copied there to make sure the disk was in an existing pool. Can you try to reproduce this and make sure this is how it works? If that's the case, then there's nothing to be done about this issue.

On 11/04/2014 11:32 AM, Crístian Viana wrote:
On 04-11-2014 09:07, Aline Manera wrote:
I've just thought about the case the disk is not in a pool. For example, we allow user create a Template (and then a VM) using an Image file. That image file is not necessarily on a pool. ie, it is a volume from libvirt perspective. So in that case the call vir_conn.storageVolLookupByPath(path) will raise an exception and stop the clone operation. We need to have this scenario in mind and work with it properly.
I did a quick test now: I created a template based on a local image on my home dir (which is not a pool) and created a VM with that template. After checking the VM storage settings, I see it has its disk on the default storage pool (/var/lib/libvirt/images), so I guess it must have been copied there to make sure the disk was in an existing pool.
Can you try to reproduce this and make sure this is how it works? If that's the case, then there's nothing to be done about this issue.
Thanks for checking! Anyway, we support guests created outside Kimchi and they may have this situation too. We don't need to think about a fix right now, but make sure your code will not raise an exception in this case. Maybe a try/except block around: + vir_orig_vol = vir_conn.storageVolLookupByPath(path) + vir_pool = vir_orig_vol.storagePoolLookupByVolume() would make the code better.

The function "vm_clone" is now implemented on the mockmodel with a similar implementation as the real model; the biggest difference is that it doesn't deal with the different storage pool details. Also, new tests were added. Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_model.py | 38 ++++++++++++++++++++++++++++++++ tests/test_rest.py | 31 ++++++++++++++++++++++++++ 3 files changed, 127 insertions(+) diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index baee0b6..2265298 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -28,6 +28,7 @@ import os import shutil import psutil import random +import re import string import time import uuid @@ -181,6 +182,63 @@ class MockModel(object): def vm_connect(self, name): pass + def _clone_get_next_name(self, basename): + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms_get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + return u'%s-clone-%d' % (basename, max_num + 1) + + def vm_clone(self, name): + vm = self._mock_vms[name] + if vm.info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + new_name = self._clone_get_next_name(name) + + taskid = self.add_task(u'/vms/%s' % new_name, self._do_clone, + {'name': name, 'new_name': new_name}) + return self.task_lookup(taskid) + + def _do_clone(self, cb, params): + name = params['name'] + new_name = params['new_name'] + + vm = self._mock_vms[name] + new_vm = copy.deepcopy(vm) + + new_uuid = unicode(uuid.uuid4()) + + new_vm.name = new_name + new_vm.info['name'] = new_name + new_vm.uuid = new_uuid + new_vm.info['uuid'] = new_uuid + + for mac, iface in new_vm.ifaces.items(): + new_mac = MockVMIface.get_mac() + iface.info['mac'] = new_mac + new_vm.ifaces[new_mac] = iface + + storage_names = new_vm.storagedevices.keys() + for i, storage_name in enumerate(storage_names): + storage = new_vm.storagedevices[storage_name] + basename, ext = os.path.splitext(storage.info['path']) + new_path = u'%s-%d%s' % (basename, i, ext) + new_vm.storagedevices[storage_name].path = new_path + + self._mock_vms[new_name] = new_vm + + cb('OK', True) + def vms_create(self, params): t_name = template_name_from_uri(params['template']) name = get_vm_name(params.get('name'), t_name, self._mock_vms.keys()) diff --git a/tests/test_model.py b/tests/test_model.py index b165731..b05d71d 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1256,6 +1256,44 @@ class ModelTests(unittest.TestCase): self.assertEquals(vms, sorted(vms, key=unicode.lower)) + def test_vm_clone(self): + inst = model.Model('test:///default', objstore_loc=self.tmp_store) + + all_vm_names = inst.vms_get_list() + name = all_vm_names[0] + + original_vm = inst.vm_lookup(name) + + # the VM 'test' should be running by now, so we can't clone it yet + self.assertRaises(InvalidParameter, inst.vm_clone, name) + + with RollbackContext() as rollback: + inst.vm_poweroff(name) + rollback.prependDefer(inst.vm_start, name) + + task = inst.vm_clone(name) + clone_name = task['target_uri'].split('/')[-1] + rollback.prependDefer(inst.vm_delete, clone_name) + inst.task_wait(task['id']) + + # update the original VM info because its state has changed + original_vm = inst.vm_lookup(name) + clone_vm = inst.vm_lookup(clone_name) + + self.assertNotEqual(original_vm['name'], clone_vm['name']) + self.assertTrue(re.match(u'%s-clone-\d+' % original_vm['name'], + clone_vm['name'])) + del original_vm['name'] + del clone_vm['name'] + + self.assertNotEqual(original_vm['uuid'], clone_vm['uuid']) + del original_vm['uuid'] + del clone_vm['uuid'] + + # compare all VM settings except the ones already compared + # (and removed) above (i.e. 'name' and 'uuid') + self.assertEquals(original_vm, clone_vm) + def test_use_test_host(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 66072bc..09cf52b 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -22,6 +22,7 @@ import base64 import json import os import random +import re import requests import shutil import time @@ -341,6 +342,10 @@ class RestTests(unittest.TestCase): self.assertEquals(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) + # Clone a running VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(400, resp.status) + # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) @@ -351,6 +356,32 @@ class RestTests(unittest.TestCase): resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status) + # Clone a VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id'], '{}').read()) + self.assertEquals('finished', task['status']) + clone_vm_name = task['target_uri'].split('/')[-1] + self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name)) + + resp = self.request('/vms/test-vm', '{}') + original_vm_info = json.loads(resp.read()) + resp = self.request('/vms/%s' % clone_vm_name, '{}') + self.assertEquals(200, resp.status) + clone_vm_info = json.loads(resp.read()) + + self.assertNotEqual(original_vm_info['name'], clone_vm_info['name']) + del original_vm_info['name'] + del clone_vm_info['name'] + + self.assertNotEqual(original_vm_info['uuid'], clone_vm_info['uuid']) + del original_vm_info['uuid'] + del clone_vm_info['uuid'] + + self.assertEquals(original_vm_info, clone_vm_info) + # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status) -- 1.9.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 11/02/2014 11:05 PM, Crístian Viana wrote:
The function "vm_clone" is now implemented on the mockmodel with a similar implementation as the real model; the biggest difference is that it doesn't deal with the different storage pool details. Also, new tests were added.
Signed-off-by: Crístian Viana <vianac@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_model.py | 38 ++++++++++++++++++++++++++++++++ tests/test_rest.py | 31 ++++++++++++++++++++++++++ 3 files changed, 127 insertions(+)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index baee0b6..2265298 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -28,6 +28,7 @@ import os import shutil import psutil import random +import re import string import time import uuid @@ -181,6 +182,63 @@ class MockModel(object): def vm_connect(self, name): pass
+ def _clone_get_next_name(self, basename): + re_group_num = 'num' + + max_num = 0 + re_cloned_vm = re.compile(u'%s-clone-(?P<%s>\d+)' % + (basename, re_group_num)) + + vm_names = self.vms_get_list() + + for v in vm_names: + match = re_cloned_vm.match(v) + if match is not None: + max_num = max(max_num, int(match.group(re_group_num))) + + return u'%s-clone-%d' % (basename, max_num + 1) + + def vm_clone(self, name): + vm = self._mock_vms[name] + if vm.info['state'] != u'shutoff': + raise InvalidParameter('KCHVM0033E', {'name': name}) + + new_name = self._clone_get_next_name(name) + + taskid = self.add_task(u'/vms/%s' % new_name, self._do_clone, + {'name': name, 'new_name': new_name}) + return self.task_lookup(taskid) + + def _do_clone(self, cb, params): + name = params['name'] + new_name = params['new_name'] + + vm = self._mock_vms[name] + new_vm = copy.deepcopy(vm) + + new_uuid = unicode(uuid.uuid4()) + + new_vm.name = new_name + new_vm.info['name'] = new_name + new_vm.uuid = new_uuid + new_vm.info['uuid'] = new_uuid + + for mac, iface in new_vm.ifaces.items(): + new_mac = MockVMIface.get_mac() + iface.info['mac'] = new_mac + new_vm.ifaces[new_mac] = iface + + storage_names = new_vm.storagedevices.keys() + for i, storage_name in enumerate(storage_names): + storage = new_vm.storagedevices[storage_name] + basename, ext = os.path.splitext(storage.info['path']) + new_path = u'%s-%d%s' % (basename, i, ext) + new_vm.storagedevices[storage_name].path = new_path + + self._mock_vms[new_name] = new_vm + + cb('OK', True) + def vms_create(self, params): t_name = template_name_from_uri(params['template']) name = get_vm_name(params.get('name'), t_name, self._mock_vms.keys()) diff --git a/tests/test_model.py b/tests/test_model.py index b165731..b05d71d 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1256,6 +1256,44 @@ class ModelTests(unittest.TestCase):
self.assertEquals(vms, sorted(vms, key=unicode.lower))
+ def test_vm_clone(self): + inst = model.Model('test:///default', objstore_loc=self.tmp_store) + + all_vm_names = inst.vms_get_list() + name = all_vm_names[0] + + original_vm = inst.vm_lookup(name) + + # the VM 'test' should be running by now, so we can't clone it yet + self.assertRaises(InvalidParameter, inst.vm_clone, name) + + with RollbackContext() as rollback: + inst.vm_poweroff(name) + rollback.prependDefer(inst.vm_start, name) + + task = inst.vm_clone(name) + clone_name = task['target_uri'].split('/')[-1] + rollback.prependDefer(inst.vm_delete, clone_name) + inst.task_wait(task['id']) + + # update the original VM info because its state has changed + original_vm = inst.vm_lookup(name) + clone_vm = inst.vm_lookup(clone_name) + + self.assertNotEqual(original_vm['name'], clone_vm['name']) + self.assertTrue(re.match(u'%s-clone-\d+' % original_vm['name'], + clone_vm['name'])) + del original_vm['name'] + del clone_vm['name'] + + self.assertNotEqual(original_vm['uuid'], clone_vm['uuid']) + del original_vm['uuid'] + del clone_vm['uuid'] + + # compare all VM settings except the ones already compared + # (and removed) above (i.e. 'name' and 'uuid') + self.assertEquals(original_vm, clone_vm) + def test_use_test_host(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) diff --git a/tests/test_rest.py b/tests/test_rest.py index 66072bc..09cf52b 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -22,6 +22,7 @@ import base64 import json import os import random +import re import requests import shutil import time @@ -341,6 +342,10 @@ class RestTests(unittest.TestCase): self.assertEquals(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image'))
+ # Clone a running VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(400, resp.status) + # Force poweroff the VM resp = self.request('/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/vms/test-vm').read()) @@ -351,6 +356,32 @@ class RestTests(unittest.TestCase): resp = self.request('/vms', req, 'POST') self.assertEquals(400, resp.status)
+ # Clone a VM + resp = self.request('/vms/test-vm/clone', '{}', 'POST') + self.assertEquals(202, resp.status) + task = json.loads(resp.read()) + wait_task(self._task_lookup, task['id']) + task = json.loads(self.request('/tasks/%s' % task['id'], '{}').read()) + self.assertEquals('finished', task['status']) + clone_vm_name = task['target_uri'].split('/')[-1] + self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name)) + + resp = self.request('/vms/test-vm', '{}') + original_vm_info = json.loads(resp.read()) + resp = self.request('/vms/%s' % clone_vm_name, '{}') + self.assertEquals(200, resp.status) + clone_vm_info = json.loads(resp.read()) + + self.assertNotEqual(original_vm_info['name'], clone_vm_info['name']) + del original_vm_info['name'] + del clone_vm_info['name'] + + self.assertNotEqual(original_vm_info['uuid'], clone_vm_info['uuid']) + del original_vm_info['uuid'] + del clone_vm_info['uuid'] + + self.assertEquals(original_vm_info, clone_vm_info) + # Delete the VM resp = self.request('/vms/test-vm', '{}', 'DELETE') self.assertEquals(204, resp.status)

I will apply patches 1-3 and 5 so you don't need to send them again in your V2. On 11/02/2014 11:05 PM, Crístian Viana wrote:
Crístian Viana (8): Make function "randomMAC" public Allow updating XML attribute in "xml_item_update" Render different types of data in generate_action_handler Add model function to wait for task Clean up test pool directories Create storage volume based on an existing volume Clone virtual machines Add tests and mockmodel for the cloning feature
docs/API.md | 8 + src/kimchi/API.json | 5 + src/kimchi/control/base.py | 37 +++-- src/kimchi/control/host.py | 18 +-- src/kimchi/control/vms.py | 1 + src/kimchi/i18n.py | 4 + src/kimchi/mockmodel.py | 102 ++++++++++++- src/kimchi/model/storagevolumes.py | 73 ++++++++- src/kimchi/model/tasks.py | 28 ++++ src/kimchi/model/vmifaces.py | 18 +-- src/kimchi/model/vms.py | 297 ++++++++++++++++++++++++++++++++++++- src/kimchi/xmlutils/utils.py | 7 +- tests/test_model.py | 83 +++++++++-- tests/test_rest.py | 54 +++++++ 14 files changed, 682 insertions(+), 53 deletions(-)

On 11/03/2014 03:54 PM, Aline Manera wrote:
Applied. Thanks.
Regards,
Aline Manera
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
Only patches 1-3 was applied!
participants (2)
-
Aline Manera
-
Crístian Viana