[Kimchi-devel] [PATCH 12/13] refactor model: Create a separated model for vm resource

Aline Manera alinefm at linux.vnet.ibm.com
Fri Jan 17 02:24:48 UTC 2014


From: Aline Manera <alinefm at br.ibm.com>

To avoid duplicating code in model and mockmodel, the code related to
vm resource was added to model_/vms.py and the specific code for each
backend (libvirt or mock) was added to model_/libvirtbackend.py and
model_/mockbackend.py

Signed-off-by: Aline Manera <alinefm at br.ibm.com>
---
 src/kimchi/model_/libvirtbackend.py |  309 ++++++++++++++++++++++++++++++++++-
 src/kimchi/model_/mockbackend.py    |  117 ++++++++++++-
 src/kimchi/model_/vms.py            |  164 +++++++++++++++++++
 src/kimchi/vmtemplate.py            |    6 +-
 4 files changed, 587 insertions(+), 9 deletions(-)
 create mode 100644 src/kimchi/model_/vms.py

diff --git a/src/kimchi/model_/libvirtbackend.py b/src/kimchi/model_/libvirtbackend.py
index 41f1b06..bf29c78 100644
--- a/src/kimchi/model_/libvirtbackend.py
+++ b/src/kimchi/model_/libvirtbackend.py
@@ -42,20 +42,30 @@ from kimchi import netinfo
 from kimchi import networkxml
 from kimchi import xmlutils
 from kimchi.asynctask import AsyncTask
+from kimchi.exception import IsoFormatError, NotFoundError, OperationFailed
 from kimchi.featuretests import FeatureTests
 from kimchi.isoinfo import IsoImage
-from kimchi.exception import IsoFormatError, OperationFailed
 from kimchi.model_.libvirtconnection import LibvirtConnection
 from kimchi.model_.libvirtstoragepool import StoragePoolDef
 from kimchi.objectstore import ObjectStore
 from kimchi.scan import Scanner
+from kimchi.screenshot import VMScreenshot
 from kimchi.utils import kimchi_log
 
+GUESTS_STATS_INTERVAL = 5
 HOST_STATS_INTERVAL = 1
 STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name',
                              'path': '/pool/source/dir/@path'}}
 
 class LibvirtBackend(object):
+    dom_state_map = {0: 'nostate',
+                     1: 'running',
+                     2: 'blocked',
+                     3: 'paused',
+                     4: 'shutdown',
+                     5: 'shutoff',
+                     6: 'crashed'}
+
     pool_state_map = {0: 'inactive',
                       1: 'initializing',
                       2: 'active',
@@ -75,9 +85,13 @@ class LibvirtBackend(object):
         self.objstore = ObjectStore(objstore_loc)
         self.next_taskid = 1
         self.scanner = Scanner(self._clean_scan)
+        self.stats = {}
         self.host_stats = defaultdict(int)
+        self.guests_stats_thread = BackgroundTask(GUESTS_STATS_INTERVAL,
+                                                  self._update_guests_stats)
         self.host_stats_thread = BackgroundTask(HOST_STATS_INTERVAL,
                                                 self._update_host_stats)
+        self.guests_stats_thread.start()
         self.host_stats_thread.start()
 
         # Subscribe function to set host capabilities to be run when cherrypy
@@ -112,6 +126,96 @@ class LibvirtBackend(object):
                 'screenshot': VMScreenshot.get_stream_test_result(),
                 'system_report_tool': bool(report_tool)}
 
+    def _update_guests_stats(self):
+        conn = self.conn.get()
+        vm_list = self.get_vms()
+
+        for name in vm_list:
+            info = self.get_vm_by_name(name)
+            vm_uuid = info['uuid']
+            state = info['state']
+            if state != 'running':
+                self.stats[vm_uuid] = {}
+                continue
+
+            if self.stats.get(vm_uuid, None) is None:
+                self.stats[vm_uuid] = {}
+
+            timestamp = time.time()
+            prevStats = self.stats.get(vm_uuid, {})
+            seconds = timestamp - prevStats.get('timestamp', 0)
+            self.stats[vm_uuid].update({'timestamp': timestamp})
+
+            dom = conn.lookupByName(name.encode("utf-8"))
+            self._get_percentage_cpu_usage(vm_uuid, dom.info, seconds)
+            self._get_network_io_rate(vm_uuid, dom, seconds)
+            self._get_disk_io_rate(vm_uuid, dom, seconds)
+
+    def _get_percentage_cpu_usage(self, vm_uuid, info, seconds):
+        prevCpuTime = self.stats[vm_uuid].get('cputime', 0)
+
+        cpus = info[3]
+        cpuTime = info[4] - prevCpuTime
+
+        base = (((cpuTime) * 100.0) / (seconds * 1000.0 * 1000.0 * 1000.0))
+        percentage = max(0.0, min(100.0, base / cpus))
+
+        self.stats[vm_uuid].update({'cputime': info[4], 'cpu': percentage})
+
+    def _get_network_io_rate(self, vm_uuid, dom, seconds):
+        prevNetRxKB = self.stats[vm_uuid].get('netRxKB', 0)
+        prevNetTxKB = self.stats[vm_uuid].get('netTxKB', 0)
+        currentMaxNetRate = self.stats[vm_uuid].get('max_net_io', 100)
+
+        rx_bytes = 0
+        tx_bytes = 0
+
+        tree = ElementTree.fromstring(dom.XMLDesc(0))
+        for target in tree.findall('devices/interface/target'):
+            dev = target.get('dev')
+            io = dom.interfaceStats(dev)
+            rx_bytes += io[0]
+            tx_bytes += io[4]
+
+        netRxKB = float(rx_bytes) / 1000
+        netTxKB = float(tx_bytes) / 1000
+
+        rx_stats = (netRxKB - prevNetRxKB) / seconds
+        tx_stats = (netTxKB - prevNetTxKB) / seconds
+
+        rate = rx_stats + tx_stats
+        max_net_io = round(max(currentMaxNetRate, int(rate)), 1)
+
+        self.stats[vm_uuid].update({'net_io': rate, 'max_net_io': max_net_io,
+                                    'netRxKB': netRxKB, 'netTxKB': netTxKB})
+
+    def _get_disk_io_rate(self, vm_uuid, dom, seconds):
+        prevDiskRdKB = self.stats[vm_uuid].get('diskRdKB', 0)
+        prevDiskWrKB = self.stats[vm_uuid].get('diskWrKB', 0)
+        currentMaxDiskRate = self.stats[vm_uuid].get('max_disk_io', 100)
+
+        rd_bytes = 0
+        wr_bytes = 0
+
+        tree = ElementTree.fromstring(dom.XMLDesc(0))
+        for target in tree.findall("devices/disk/target"):
+            dev = target.get("dev")
+            io = dom.blockStats(dev)
+            rd_bytes += io[1]
+            wr_bytes += io[3]
+
+        diskRdKB = float(rd_bytes) / 1024
+        diskWrKB = float(wr_bytes) / 1024
+
+        rd_stats = (diskRdKB - prevDiskRdKB) / seconds
+        wr_stats = (diskWrKB - prevDiskWrKB) / seconds
+
+        rate = rd_stats + wr_stats
+        max_disk_io = round(max(currentMaxDiskRate, int(rate)), 1)
+
+        self.stats[vm_uuid].update({'disk_io': rate, 'max_disk_io': max_disk_io,
+                                    'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB})
+
     def _update_host_stats(self):
         preTimeStamp = self.host_stats['timestamp']
         timestamp = time.time()
@@ -602,7 +706,15 @@ class LibvirtBackend(object):
                             'pf': forward_pf}}
 
     def _get_vms_attach_to_network(self, network):
-        return []
+        vms = []
+        xpath = "/domain/devices/interface[@type='network']/source/@network"
+        conn = self.conn.get()
+        for dom in conn.listAllDomains(0):
+            xml = dom.XMLDesc(0)
+            networks =  xmlutils.xpath_get_text(xml, xpath)
+            if network in networks:
+                vms.append(dom.name())
+        return vms
 
     def activate_network(self, name):
         conn = self.conn.get()
@@ -648,3 +760,196 @@ class LibvirtBackend(object):
     def delete_template(self, name):
         with self.objstore as session:
             session.delete('template', name)
+
+    def create_vm(self, name, uuid, tmpl, vol_list):
+        # Store the icon for displaying later
+        icon = tmpl.info.get('icon', None)
+        if icon is not None:
+            with self.objstore as session:
+                session.store('vm', vm_uuid, {'icon': icon})
+
+        libvirt_stream = False
+        if len(self.libvirt_stream_protocols) != 0:
+            libvirt_stream = True
+
+        xml = tmpl.to_vm_xml(name, vm_uuid, libvirt_stream,
+                             self.qemu_stream_dns)
+        try:
+            dom = conn.defineXML(xml.encode('utf-8'))
+        except libvirt.libvirtError as e:
+            for v in vol_list:
+                vol = conn.storageVolLookupByPath(v['path'])
+                vol.delete(0)
+            raise OperationFailed(e.get_error_message())
+
+    def get_vms(self):
+        conn = self.conn.get()
+        ids = conn.listDomainsID()
+        names = map(lambda x: conn.lookupByID(x).name(), ids)
+        names += conn.listDefinedDomains()
+        names = map(lambda x: x.decode('utf-8'), names)
+        return sorted(names, key=unicode.lower)
+
+    def get_screenshot_by_name(self, vm_uuid):
+        with self.objstore as session:
+            try:
+                params = session.get('screenshot', vm_uuid)
+            except NotFoundError:
+                params = {'uuid': vm_uuid}
+                session.store('screenshot', vm_uuid, params)
+
+        screenshot = LibvirtVMScreenshot(params, self.conn)
+        img_path = screenshot.lookup()
+        # screenshot info changed after scratch generation
+        with self.objstore as session:
+            session.store('screenshot', vm_uuid, screenshot.info)
+
+        return img_path
+
+    def delete_screenshot(self, vm_uuid):
+        os.remove(self.get_screenshot_by_name(vm_uuid))
+        with self.objstore as session:
+            session.delete('screenshot', vm_uuid)
+
+    def get_vm_by_name(self, name):
+        conn = self.conn.get()
+        dom = conn.lookupByName(name.encode("utf-8"))
+        info = dom.info()
+        state = self.dom_state_map[info[0]]
+        screenshot = None
+        graphics = self._get_vm_graphics(dom)
+        graphics_type, graphics_listen, graphics_port = graphics
+        graphics_port = graphics_port if state == 'running' else None
+        if state == 'running':
+            screenshot = self.get_screenshot_by_name(name)
+        elif state == 'shutoff':
+            # reset vm stats when it is powered off to avoid sending
+            # incorrect (old) data
+            self.stats[dom.UUIDString()] = {}
+
+        with self.objstore as session:
+            try:
+                extra_info = session.get('vm', dom.UUIDString())
+            except NotFoundError:
+                extra_info = {}
+        icon = extra_info.get('icon')
+
+        vm_stats = self.stats.get(dom.UUIDString(), {})
+        stats = {}
+        stats['cpu_utilization'] = vm_stats.get('cpu', 0)
+        stats['net_throughput'] = vm_stats.get('net_io', 0)
+        stats['net_throughput_peak'] = vm_stats.get('max_net_io', 100)
+        stats['io_throughput'] = vm_stats.get('disk_io', 0)
+        stats['io_throughput_peak'] = vm_stats.get('max_disk_io', 100)
+
+        return {'state': state, 'stats': str(stats), 'uuid': dom.UUIDString(),
+                'memory': info[2] >> 10, 'cpus': info[3], 'icon': icon,
+                'screenshot': screenshot,
+                'graphics': {'type': graphics_type, 'listen': graphics_listen,
+                             'port': graphics_port}
+                }
+
+    def _get_vm_graphics(self, dom):
+        xml = dom.XMLDesc(0)
+        expr = "/domain/devices/graphics/@type"
+        res = xmlutils.xpath_get_text(xml, expr)
+        graphics_type = res[0] if res else None
+        expr = "/domain/devices/graphics/@listen"
+        res = xmlutils.xpath_get_text(xml, expr)
+        graphics_listen = res[0] if res else None
+        graphics_port = None
+        if graphics_type:
+            expr = "/domain/devices/graphics[@type='%s']/@port" % graphics_type
+            res = xmlutils.xpath_get_text(xml, expr)
+            graphics_port = int(res[0]) if res else None
+        return graphics_type, graphics_listen, graphics_port
+
+    def static_vm_update(self, name, params):
+        conn = self.conn.get()
+        dom = conn.lookupByName(name.encode("utf-8"))
+        old_xml = new_xml = dom.XMLDesc(0)
+
+        for key, val in params.items():
+            if key in VM_STATIC_UPDATE_PARAMS:
+                new_xml = xmlutils.xml_item_update(new_xml,
+                                                   VM_STATIC_UPDATE_PARAMS[key],
+                                                   val)
+
+        try:
+            dom.undefine()
+            conn.defineXML(new_xml)
+        except libvirt.libvirtError as e:
+            conn.defineXML(old_xml)
+            raise OperationFailed(e.get_error_message())
+
+    def live_vm_update(self, name, params):
+        pass
+
+    def delete_vm(self, name):
+        info = self.get_vm_by_name(name)
+        if info['state'] == 'running':
+            self.stop_vm(name)
+
+        conn = self.conn.get()
+        dom = conn.lookupByName(name.encode("utf-8"))
+        dom.undefine()
+
+        xml = dom.XMLDesc(0)
+        xpath = "/domain/devices/disk[@device='disk']/source/@file"
+        paths = xmlutils.xpath_get_text(xml, xpath)
+        for path in paths:
+            vol = conn.storageVolLookupByPath(path)
+            vol.delete(0)
+
+        with self.objstore as session:
+            session.delete('vm', dom.UUIDString(), ignore_missing=True)
+
+        self.delete_screenshot(dom.UUIDString())
+        vnc.remove_proxy_token(name)
+
+    def start_vm(self, name):
+        conn = self.conn.get()
+        dom = conn.lookupByName(name.encode("utf-8"))
+        dom.create()
+
+    def stop_vm(self, name):
+        conn = self.conn.get()
+        dom = conn.lookupByName(name.encode("utf-8"))
+        dom.destroy()
+
+    def connect_vm(self, name):
+        graphics = self._get_vm_graphics(name)
+        graphics_type, graphics_listen, graphics_port = get_graphics
+        if graphics_port is None:
+            raise OperationFailed("Only able to connect to running vm's vnc "
+                                  "graphics.")
+        vnc.add_proxy_token(name, graphics_port)
+
+class LibvirtVMScreenshot(VMScreenshot):
+    def __init__(self, vm_uuid, conn):
+        VMScreenshot.__init__(self, vm_uuid)
+        self.conn = conn
+
+    def _generate_scratch(self, thumbnail):
+        def handler(stream, buf, opaque):
+            fd = opaque
+            os.write(fd, buf)
+
+        fd = os.open(thumbnail, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0644)
+        try:
+            conn = self.conn.get()
+            dom = conn.lookupByUUIDString(self.vm_uuid)
+            vm_name = dom.name()
+            stream = conn.newStream(0)
+            mimetype = dom.screenshot(stream, 0, 0)
+            stream.recvAll(handler, fd)
+        except libvirt.libvirtError:
+            try:
+                stream.abort()
+            except:
+                pass
+            raise NotFoundError("Screenshot not supported for %s" % vm_name)
+        else:
+            stream.finish()
+        finally:
+            os.close(fd)
diff --git a/src/kimchi/model_/mockbackend.py b/src/kimchi/model_/mockbackend.py
index a598582..d4314ca 100644
--- a/src/kimchi/model_/mockbackend.py
+++ b/src/kimchi/model_/mockbackend.py
@@ -28,6 +28,7 @@ import random
 from kimchi import config
 from kimchi.asynctask import AsyncTask
 from kimchi.objectstore import ObjectStore
+from kimchi.screenshot import VMScreenshot
 
 class MockBackend(object):
     _network_info = {'state': 'inactive', 'autostart': True, 'connection': '',
@@ -43,6 +44,8 @@ class MockBackend(object):
         self._storagepools = {}
         self._networks = {}
         self._templates = {}
+        self._vms = {}
+        self._screenshots = {}
 
     def _get_host_stats(self):
         memory_stats = {'total': 3934908416L,
@@ -169,11 +172,15 @@ class MockBackend(object):
 
     def get_network_by_name(self, name):
         info = self._networks[name]
-        info['vms'] = self._get_vms_attach_to_a_network(name)
+        info['vms'] = self._get_vms_attach_to_network(name)
         return info
 
-    def _get_vms_attach_to_a_network(self, network):
-        return []
+    def _get_vms_attach_to_network(self, network):
+        vms = []
+        for name, dom in self._vms.iteritems():
+             if network in dom.networks:
+                 vms.append(name)
+        return vms
 
     def activate_network(self, name):
         self._networks[name]['state'] = 'active'
@@ -196,6 +203,69 @@ class MockBackend(object):
     def delete_template(self, name):
         del self._templates[name]
 
+    def create_vm(self, name, uuid, tmpl, vol_list):
+        vm = MockVM(vm_uuid, name, tmpl.info)
+        icon = tmpl.info.get('icon', None)
+        if icon is not None:
+            vm.info['icon'] = icon
+
+        disk_paths = []
+        for vol in vol_list:
+            disk_paths.append({'pool': pool.name, 'volume': vol_info['name']})
+
+        vm.disk_paths = disk_paths
+        self._vms[name] = vm
+
+    def get_vms(self):
+        return self._vms.keys()
+
+    def get_screenshot_by_name(self, vm_uuid):
+        mockscreenshot = MockVMScreenshot({'uuid': vm_uuid})
+        screenshot = self._screenshots.setdefault(vm_uuid, mockscreenshot)
+        return screenshot.lookup()
+
+    def get_vm_by_name(self, name):
+        vm = self._vms[name]
+        if vm.info['state'] == 'running':
+            vm.info['screenshot'] = self.get_screenshot_by_name(name)
+        else:
+            vm.info['screenshot'] = None
+        return vm.info
+
+    def static_vm_update(self, name, params):
+        vm_info = copy.copy(self._vms[name])
+        for key, val in params.items():
+            if key in VM_STATIC_UPDATE_PARAMS and key in vm_info:
+                vm_info[key] = val
+
+        if 'name' in params:
+            del self._vms[name]
+            self._vms[params['name']] = vm_info
+
+    def live_vm_update(self, name, params):
+        pass
+
+    def delete_vm(self, name):
+        vm = self._vms[name]
+        screenshot = self._screenshots.get(vm.uuid, None)
+        if screenshot is not None:
+            screenshot.delete()
+            del self._screenshots[vm_uuid]
+
+        for disk in vm.disk_paths:
+            self.delete_storagevolume(disk['pool'], disk['volume'])
+
+        del self._vms[name]
+
+    def start_vm(self, name):
+        self._vms[name].info['state'] = 'running'
+
+    def stop_vm(self, name):
+        self._vms[name].info['state'] = 'shutoff'
+
+    def connect_vm(self, name):
+        pass
+
 class MockStoragePool(object):
     def __init__(self, name):
         self.name = name
@@ -225,3 +295,44 @@ class MockStorageVolume(object):
             self.info['os_distro'] = 'fedora'
             self.info['bootable'] = True
 
+class MockVM(object):
+    def __init__(self, uuid, name, template_info):
+        self.uuid = uuid
+        self.name = name
+        self.disk_paths = []
+        self.networks = template_info['networks']
+        self.info = {'state': 'shutoff',
+                     'stats': "{'cpu_utilization': 20, 'net_throughput' : 35, \
+                                'net_throughput_peak': 100, 'io_throughput': 45, \
+                                'io_throughput_peak': 100}",
+                     'uuid': self.uuid,
+                     'memory': template_info['memory'],
+                     'cpus': template_info['cpus'],
+                     'icon': None,
+                     'graphics': {'type': 'vnc', 'listen': '0.0.0.0', 'port': None}
+                     }
+        self.info['graphics'].update(template_info['graphics'])
+
+class MockVMScreenshot(VMScreenshot):
+    OUTDATED_SECS = 5
+    BACKGROUND_COLOR = ['blue', 'green', 'purple', 'red', 'yellow']
+    BOX_COORD = (50, 115, 206, 141)
+    BAR_COORD = (50, 115, 50, 141)
+
+    def __init__(self, vm_name):
+        VMScreenshot.__init__(self, vm_name)
+        self.coord = MockVMScreenshot.BAR_COORD
+        self.background = random.choice(MockVMScreenshot.BACKGROUND_COLOR)
+
+    def _generate_scratch(self, thumbnail):
+        self.coord = (self.coord[0],
+                      self.coord[1],
+                      min(MockVMScreenshot.BOX_COORD[2],
+                          self.coord[2]+random.randrange(50)),
+                      self.coord[3])
+
+        image = Image.new("RGB", (256, 256), self.background)
+        d = ImageDraw.Draw(image)
+        d.rectangle(MockVMScreenshot.BOX_COORD, outline='black')
+        d.rectangle(self.coord, outline='black', fill='black')
+        image.save(thumbnail)
diff --git a/src/kimchi/model_/vms.py b/src/kimchi/model_/vms.py
new file mode 100644
index 0000000..3f1b6c3
--- /dev/null
+++ b/src/kimchi/model_/vms.py
@@ -0,0 +1,164 @@
+#
+# Project Kimchi
+#
+# Copyright IBM, Corp. 2013
+#
+# Authors:
+#  Adam Litke <agl at linux.vnet.ibm.com>
+#  Aline Manera <alinefm at linux.vnet.ibm.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA
+
+import uuid
+
+VM_STATIC_UPDATE_PARAMS = {'name': './name'}
+
+class VMs(object):
+    def __init__(self, backend):
+        self.backend = backend
+
+    def create(self, params):
+        vm_uuid = str(uuid.uuid4())
+        vm_list = self.get_list()
+        name = self._get_vm_name(params.get('name'), t_name, vm_list)
+        # incoming text, from js json, is unicode, do not need decode
+        if name in vm_list:
+            raise InvalidOperation("VM '%s' already exists" % name)
+
+        vm_overrides = dict()
+        override_params = ['storagepool', 'graphics']
+        for param in override_params:
+            value = params.get(param, None)
+            if value is not None:
+                vm_overrides[param] = value
+
+        t_name = self._uri_to_name('templates', params['template'])
+        t_params = self.backend.get_template_by_name(t_name)
+        t_params.update(vm_overrides)
+        tmpl = VMTemplate(t_params, False)
+
+        caps = self.backend.get_capabilities()
+        if not caps.qemu_stream and t.info.get('iso_stream', False):
+            raise InvalidOperation("Remote ISO image is not supported by this"
+                                   " server.")
+
+        pool_name = self._uri_to_name('storagepools', pool_uri)
+        self._validate_storagepool(pool_name)
+        self._validate_network(tmpl)
+        vol_list = tmpl.to_volume_list(vm_uuid)
+        for vol_info in vol_list:
+            self.backend.create_storagevolume(pool_name, vol_info)
+
+        self.backend.create_vm(name, vm_uuid, tmpl, vol_list)
+        return name
+
+    def _validate_storagepool(self, pool_name):
+        try:
+            pool_info = self.backend.get_storagepool_by_name(pool_name)
+        except Exception:
+            raise InvalidParameter("Storage pool '%s' specified by template "
+                                   "does not exist" % pool_name)
+
+        if not pool_info['state'] != 'active':
+            raise InvalidParameter("Storage pool '%s' specified by template "
+                                   "is not active" % pool_name)
+
+    def _validate_network(self, tmpl):
+        names = tmpl.info['networks']
+        for name in names:
+            if name not in self.backend.get_networks():
+                raise InvalidParameter("Network '%s' specified by template "
+                                       "does not exist.")
+
+            net_info = self.backend.get_network_by_name(name)
+            if net_info['state'] != 'active':
+                raise InvalidParameter("Network '%s' specified by template is "
+                                       "not active.")
+
+    def _uri_to_name(self, collection, uri):
+        expr = '/%s/(.*?)/?$' % collection
+        m = re.match(expr, uri)
+        if not m:
+            raise InvalidParameter(uri)
+        return m.group(1)
+
+    def _get_vm_name(self, vm_name, t_name, name_list):
+        if vm_name:
+            return vm_name
+
+        for i in xrange(1, 1000):
+            vm_name = "%s-vm-%i" % (t_name, i)
+            if vm_name not in name_list:
+                return vm_name
+
+        raise OperationFailed("Unable to choose a VM name")
+
+    def get_list(self):
+        return sorted(self.backend.get_vms())
+
+class VM(VMs):
+    def _vm_exists(self, name):
+        if name not in self.backend.get_vms():
+            raise NotFoundError("VM '%s' not found." % name)
+
+        return True
+
+    def lookup(self, name):
+        if self._vm_exists(name):
+            return self.backend.get_vm_by_name(name)
+
+    def update(self, name, params):
+        if self._vm_exists(name):
+            if 'name' in params:
+                state = self.get_vm_by_name(name)['state']
+                if state == 'running':
+                    raise InvalidParameter("The VM needs to be shutted off for"
+                                           "renaming.")
+
+                if params['name'] in self.get_list():
+                    raise InvalidParameter("VM name '%s' already exists" %
+                                            params['name'])
+
+            self.backend.static_vm_update(name, params)
+            self.backend.live_vm_update(name, params)
+
+            return params.get('name', None) or name
+
+    def delete(self, name):
+        if self._vm_exists(name):
+            self.backend.delete_vm(name)
+
+    def start(self, name):
+        if self._vm_exists(name):
+            self.backend.start_vm(self, name)
+
+    def stop(self, name):
+        if self._vm_exists(name):
+            self.backend.stop_vm(self, name)
+
+    def connect(self, name):
+        if self._vm_exists(name):
+            self.backend.connect_vm(self, name)
+
+class VMScreenshot(object):
+    def __init__(self, backend):
+        self.backend = backend
+
+    def lookup(self, name):
+        vm_info = self.backend.get_vm_by_name(name)
+        if vm_info['state'] != 'running':
+            raise NotFoundError('No screenshot for stopped vm')
+
+        return self.backend.get_screenshot_by_name(vm_info['uuid'])
diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py
index e7f6c81..c0e5982 100644
--- a/src/kimchi/vmtemplate.py
+++ b/src/kimchi/vmtemplate.py
@@ -162,7 +162,7 @@ class VMTemplate(object):
             """ % params
         return ret
 
-    def _get_graphics_xml(self, params):
+    def _get_graphics_xml(self):
         graphics_xml = """
             <graphics type='%(type)s' autoport='yes' listen='%(listen)s'>
             </graphics>
@@ -173,8 +173,6 @@ class VMTemplate(object):
             </channel>
         """
         graphics = dict(self.info['graphics'])
-        if params:
-            graphics.update(params)
         graphics_xml = graphics_xml % graphics
         if graphics['type'] == 'spice':
             graphics_xml = graphics_xml + spicevmc_xml
@@ -219,7 +217,7 @@ class VMTemplate(object):
         params['cdroms'] = ''
         params['qemu-stream-cmdline'] = ''
         graphics = kwargs.get('graphics')
-        params['graphics'] = self._get_graphics_xml(graphics)
+        params['graphics'] = self._get_graphics_xml()
 
         qemu_stream_dns = kwargs.get('qemu_stream_dns', False)
         libvirt_stream = kwargs.get('libvirt_stream', False)
-- 
1.7.10.4




More information about the Kimchi-devel mailing list