[Kimchi-devel] [PATCH v3 4/4] CDROM Management: Guest vm storage devices mockmodel and rest api test cases

Daniel Barboza danielhb at linux.vnet.ibm.com
Wed Feb 12 02:38:39 UTC 2014


From: Rodrigo Trujillo <rodrigo.trujillo at linux.vnet.ibm.com>

This patch implements the mockmodel class to simulate lookup, add,
remove, update of devices in a guest vm. Also, it adds test cases to
test the rest API.

Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo at linux.vnet.ibm.com>

Minor changes/improvements based on ML feedback

Signed-off-by: Daniel Henrique Barboza <danielhb at linux.vnet.ibm.com>
---
 src/kimchi/mockmodel.py | 66 +++++++++++++++++++++++++++++++++++++++++++
 tests/test_rest.py      | 74 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 140 insertions(+)

diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py
index 441c0e4..9b7233b 100644
--- a/src/kimchi/mockmodel.py
+++ b/src/kimchi/mockmodel.py
@@ -542,6 +542,51 @@ class MockModel(object):
     def networks_get_list(self):
         return sorted(self._mock_networks.keys())
 
+    def storages_create(self, vm_name, params):
+        path = params.get('path')
+        if path.startswith('/') and not os.path.exists(path):
+            msg = "Path specified for device is not valid"
+            raise InvalidParameter(msg)
+
+        dom = self._get_vm(vm_name)
+        dev = params.get('dev', None)
+        if dev and dev in self.storages_get_list(vm_name):
+            return OperationFailed('Device name already in use.')
+        if not dev:
+            return OperationFailed('Must specify a device name.')
+        vmdev = MockVMStorageDevice(params)
+        dom.storagedevices[params['dev']] = vmdev
+        return params['dev']
+
+    def storages_get_list(self, vm_name):
+        dom = self._get_vm(vm_name)
+        return dom.storagedevices.keys()
+
+    def storage_lookup(self, vm_name, dev_name):
+        dom = self._get_vm(vm_name)
+        if dev_name not in self.storages_get_list(vm_name):
+            msg = 'The storage device "%s" does not exist in the guest "%s"' \
+                  % (dev_name,vm_name)
+            raise NotFoundError(msg)
+        return dom.storagedevices.get('dev_name').info
+
+    def storage_delete(self, vm_name, dev_name):
+        dom = self._get_vm(vm_name)
+        if dev_name not in self.storages_get_list(vm_name):
+            msg = 'The storage device "%s" does not exist in the guest "%s"' \
+                  % (dev_name,vm_name)
+            raise NotFoundError(msg)
+        dom.storagedevices.pop(dev_name)
+
+    def storage_update(self, vm_name, dev_name, params):
+        try:
+            dom = self._get_vm(vm_name)
+            dom.storagedevices[dev_name].info.update(params)
+        except Exception as e:
+            msg = 'Was not possible to update storage device: %s' % e.message
+            raise OperationFailed(e.message)
+        return dev_name
+
     def vmifaces_create(self, vm, params):
         if (params["type"] == "network" and
             params["network"] not in self.networks_get_list()):
@@ -720,6 +765,24 @@ class MockVMTemplate(VMTemplate):
         return disk_paths
 
 
+class MockVMStorageDevice(object):
+    def __init__(self, params):
+        # Defaults
+        if params['dev'] == 'hda':
+            self.info = {'dev': params.get('dev'),
+                         'type': 'disk',
+                         'path': '/tmp/myimage.img'}
+        elif params['dev'] == 'hdc':
+            self.info = {'dev': params.get('dev'),
+                         'type': 'cdrom',
+                         'path': ''}
+        # New devices
+        else:
+            self.info = {'dev': params.get('dev'),
+                         'type': params.get('type'),
+                         'path': params.get('path')}
+
+
 class MockVMIface(object):
     counter = 0
 
@@ -745,6 +808,9 @@ class MockVM(object):
         self.disk_paths = []
         self.networks = template_info['networks']
         ifaces = [MockVMIface(net) for net in self.networks]
+        default_devices = [{'dev':'hda'}, {'dev':'hdc'}]
+        self.storagedevices = dict([(dev['dev'], MockVMStorageDevice(dev)) \
+                                    for dev in default_devices])
         self.ifaces = dict([(iface.info['mac'], iface) for iface in ifaces])
         self.info = {'state': 'shutoff',
                      'stats': "{'cpu_utilization': 20, 'net_throughput' : 35, \
diff --git a/tests/test_rest.py b/tests/test_rest.py
index 69b8316..1dae45e 100644
--- a/tests/test_rest.py
+++ b/tests/test_rest.py
@@ -341,6 +341,80 @@ class RestTests(unittest.TestCase):
         resp = self.request('/templates/test', '{}', 'DELETE')
         self.assertEquals(204, resp.status)
 
+    def test_vm_storage_devices(self):
+
+        with RollbackContext() as rollback:
+            # Create a template as a base for our VMs
+            req = json.dumps({'name': 'test', 'cdrom': '/nonexistent.iso'})
+            resp = self.request('/templates', req, 'POST')
+            self.assertEquals(201, resp.status)
+            # Delete the template
+            rollback.prependDefer(self.request,
+                                  '/templates/test', '{}', 'DELETE')
+
+            # Create a VM with default args
+            req = json.dumps({'name': 'test-vm',
+                              'template': '/templates/test'})
+            resp = self.request('/vms', req, 'POST')
+            self.assertEquals(201, resp.status)
+            # Delete the VM
+            rollback.prependDefer(self.request,
+                                  '/vms/test-vm', '{}', 'DELETE')
+
+            # Attach a storage disk
+            req = json.dumps({'dev': 'hdx',
+                              'type': 'disk',
+                              'path': '/tmp'})
+            resp = self.request('/vms/test-vm/storages', req, 'POST')
+            self.assertEquals(201, resp.status)
+            # Delete the disk
+            rollback.prependDefer(self.request,
+                                  '/vms/test-vm/storages/hdx', '{}', 'DELETE')
+
+            # Detach storage disk
+            resp = self.request('/vms/test-vm/storages/hdx', '{}', 'DELETE')
+            self.assertEquals(204, resp.status)
+
+            # Detach storage disk that does not exist
+            resp = self.request('/vms/test-vm/storages/hdx', '{}', 'DELETE')
+            self.assertEquals(404, resp.status)
+
+            # Attach cdrom with nonexistent iso
+            req = json.dumps({'dev': 'hdx',
+                              'type': 'cdrom',
+                              'path': '/tmp/nonexistent.iso'})
+            resp = self.request('/vms/test-vm/storages', req, 'POST')
+            self.assertEquals(400, resp.status)
+
+            # Attach a cdrom with existent dev name
+            open('/tmp/existent.iso', 'w').close()
+            req = json.dumps({'dev': 'hdx',
+                              'type': 'cdrom',
+                              'path': '/tmp/existent.iso'})
+            resp = self.request('/vms/test-vm/storages', req, 'POST')
+            self.assertEquals(201, resp.status)
+            # Delete the file and cdrom
+            rollback.prependDefer(self.request,
+                                  '/vms/test-vm/storages/hdx', '{}', 'DELETE')
+            os.remove('/tmp/existent.iso')
+
+            # Change path of storage cdrom
+            req = json.dumps({'path': 'http://myserver.com/myiso.iso'})
+            resp = self.request('/vms/test-vm/storages/hdx', req, 'PUT')
+            self.assertEquals(200, resp.status)
+
+            # Test GET
+            devs = json.loads(self.request('/vms/test-vm/storages').read())
+            self.assertEquals(3, len(devs))
+
+            # Detach storage cdrom
+            resp = self.request('/vms/test-vm/storages/hdx', '{}', 'DELETE')
+            self.assertEquals(204, resp.status)
+
+            # Test GET
+            devs = json.loads(self.request('/vms/test-vm/storages').read())
+            self.assertEquals(2, len(devs))
+
     def test_vm_iface(self):
 
         with RollbackContext() as rollback:
-- 
1.8.3.1




More information about the Kimchi-devel mailing list