Signed-off-by: Lucio Correia <luciojhc(a)linux.vnet.ibm.com>
---
plugins/kimchi/config.py.in | 1 -
plugins/kimchi/mockmodel.py | 19 +-
plugins/kimchi/model/config.py | 2 +-
plugins/kimchi/model/cpuinfo.py | 2 +-
plugins/kimchi/model/debugreports.py | 2 +-
plugins/kimchi/model/diskutils.py | 4 +-
plugins/kimchi/model/hostdev.py | 4 +-
plugins/kimchi/model/libvirtconnection.py | 12 +-
plugins/kimchi/model/networks.py | 3 +-
plugins/kimchi/model/peers.py | 4 +-
plugins/kimchi/model/storagepools.py | 13 +-
plugins/kimchi/model/storagevolumes.py | 9 +-
plugins/kimchi/model/vms.py | 32 ++-
plugins/kimchi/model/vmstorages.py | 6 +-
plugins/kimchi/osinfo.py | 1 -
plugins/kimchi/root.py | 1 -
plugins/kimchi/screenshot.py | 2 +-
plugins/kimchi/tests/test_authorization.py | 36 ++-
plugins/kimchi/tests/test_config.py.in | 6 +-
plugins/kimchi/tests/test_exception.py | 24 ++-
plugins/kimchi/tests/test_host.py | 30 ++-
plugins/kimchi/tests/test_mock_network.py | 4 +-
plugins/kimchi/tests/test_mock_storagepool.py | 12 +-
plugins/kimchi/tests/test_mock_storagevolume.py | 8 +-
plugins/kimchi/tests/test_mockmodel.py | 6 +-
plugins/kimchi/tests/test_model.py | 24 ++-
plugins/kimchi/tests/test_model_network.py | 8 +-
plugins/kimchi/tests/test_model_storagepool.py | 20 +-
plugins/kimchi/tests/test_model_storagevolume.py | 18 +-
plugins/kimchi/tests/test_rest.py | 315 +++++++++++++++-------
plugins/kimchi/tests/test_template.py | 61 +++--
plugins/kimchi/tests/utils.py | 8 +-
plugins/kimchi/utils.py | 1 -
src/wok/cachebust.py | 2 +-
src/wok/server.py | 11 +-
src/wok/utils.py | 6 +-
36 files changed, 465 insertions(+), 252 deletions(-)
diff --git a/plugins/kimchi/config.py.in b/plugins/kimchi/config.py.in
index 80b72bd..65150aa 100644
--- a/plugins/kimchi/config.py.in
+++ b/plugins/kimchi/config.py.in
@@ -136,4 +136,3 @@ class KimchiConfig(dict):
}
self.update(custom_config)
-
diff --git a/plugins/kimchi/mockmodel.py b/plugins/kimchi/mockmodel.py
index e316c21..b269c26 100644
--- a/plugins/kimchi/mockmodel.py
+++ b/plugins/kimchi/mockmodel.py
@@ -48,8 +48,10 @@ from vmtemplate import VMTemplate
fake_user = {'root': 'letmein!'}
-mockmodel_defaults = {'storagepool':
'/plugins/kimchi/storagepools/default-pool',
- 'domain': 'test', 'arch': 'i686'}
+mockmodel_defaults = {
+ 'storagepool': '/plugins/kimchi/storagepools/default-pool',
+ 'domain': 'test', 'arch': 'i686'
+}
class MockModel(Model):
@@ -245,8 +247,8 @@ class MockModel(Model):
return MockModel._libvirt_get_vol_path(pool, vol)
def _gen_debugreport_file(self, name):
- return add_task('/plugins/kimchi/debugreports/%s' % name,
self._create_log,
- self.objstore, name)
+ return add_task('/plugins/kimchi/debugreports/%s' % name,
+ self._create_log, self.objstore, name)
def _create_log(self, cb, name):
path = config.get_debugreports_path()
@@ -354,8 +356,8 @@ class MockModel(Model):
return self._mock_swupdate.pkgs[pkg_name]
def _mock_host_swupdate(self, args=None):
- task_id = add_task('/plugins/kimchi/host/swupdate',
self._mock_swupdate.doUpdate,
- self.objstore)
+ task_id = add_task('/plugins/kimchi/host/swupdate',
+ self._mock_swupdate.doUpdate, self.objstore)
return self.task_lookup(task_id)
def _mock_repositories_get_list(self):
@@ -406,8 +408,9 @@ class MockModel(Model):
def _mock_vmsnapshots_create(self, vm_name, params):
name = params.get('name', unicode(int(time.time())))
params = {'vm_name': vm_name, 'name': name}
- taskid = add_task(u'/plugins/kimchi/vms/%s/snapshots/%s' % (vm_name,
name),
- self._vmsnapshots_create_task, self.objstore, params)
+ taskid = add_task(u'/plugins/kimchi/vms/%s/snapshots/%s' %
+ (vm_name, name), self._vmsnapshots_create_task,
+ self.objstore, params)
return self.task_lookup(taskid)
def _vmsnapshots_create_task(self, cb, params):
diff --git a/plugins/kimchi/model/config.py b/plugins/kimchi/model/config.py
index 371e382..464ffae 100644
--- a/plugins/kimchi/model/config.py
+++ b/plugins/kimchi/model/config.py
@@ -108,7 +108,7 @@ class CapabilitiesModel(object):
out, err, rc = run_command(['ldd', qemu_path])
if rc != 0:
wok_log.error('Failed to find qemu binary dependencies: %s',
- err)
+ err)
return False
for line in out.split('\n'):
if line.lstrip().startswith('libspice-server.so'):
diff --git a/plugins/kimchi/model/cpuinfo.py b/plugins/kimchi/model/cpuinfo.py
index 5f4bbca..299e445 100644
--- a/plugins/kimchi/model/cpuinfo.py
+++ b/plugins/kimchi/model/cpuinfo.py
@@ -60,7 +60,7 @@ class CPUInfoModel(object):
libvirt_topology = get_topo_capabilities(connect)
except Exception as e:
wok_log.info("Unable to get CPU topology capabilities: %s"
- % e.message)
+ % e.message)
return
if libvirt_topology is None:
wok_log.info("cpu_info topology not supported.")
diff --git a/plugins/kimchi/model/debugreports.py b/plugins/kimchi/model/debugreports.py
index d20eb12..bee10b6 100644
--- a/plugins/kimchi/model/debugreports.py
+++ b/plugins/kimchi/model/debugreports.py
@@ -97,7 +97,7 @@ class DebugReportsModel(object):
# Some error in sosreport happened
if reportFile is None:
wok_log.error('Debug report file not found. See sosreport '
- 'output for detail:\n%s', output)
+ 'output for detail:\n%s', output)
fname = (patterns[0] % name).split('/')[-1]
raise OperationFailed('KCHDR0004E', {'name': fname})
diff --git a/plugins/kimchi/model/diskutils.py b/plugins/kimchi/model/diskutils.py
index 2f0c903..350e6eb 100644
--- a/plugins/kimchi/model/diskutils.py
+++ b/plugins/kimchi/model/diskutils.py
@@ -55,8 +55,8 @@ def get_disk_used_by(objstore, conn, path):
# occour if a disk is added to two guests
# unknowingly.
wok_log.error('Unable to store storage volume id in'
- ' objectstore due error: %s',
- e.message)
+ ' objectstore due error: %s',
+ e.message)
raise OperationFailed('KCHVOL0017E',
{'err': e.message})
except Exception as e:
diff --git a/plugins/kimchi/model/hostdev.py b/plugins/kimchi/model/hostdev.py
index c411d0b..31211c7 100644
--- a/plugins/kimchi/model/hostdev.py
+++ b/plugins/kimchi/model/hostdev.py
@@ -44,7 +44,7 @@ def _get_dev_info_tree(dev_infos):
parent = devs[dev_info['parent']]
except KeyError:
wok_log.error('Parent %s of device %s does not exist.',
- dev_info['parent'], dev_info['name'])
+ dev_info['parent'], dev_info['name'])
continue
try:
@@ -99,7 +99,7 @@ def _get_same_iommugroup_devices(dev_infos, device_info):
parent_info = dev_dict[parent]
except KeyError:
wok_log.error("Parent %s of device %s does not exist",
- parent, dev_info['name'])
+ parent, dev_info['name'])
break
try:
diff --git a/plugins/kimchi/model/libvirtconnection.py
b/plugins/kimchi/model/libvirtconnection.py
index ef4a4ce..73f3dcf 100644
--- a/plugins/kimchi/model/libvirtconnection.py
+++ b/plugins/kimchi/model/libvirtconnection.py
@@ -75,8 +75,8 @@ class LibvirtConnection(object):
libvirt.VIR_ERR_INVALID_CONN)
if edom in EDOMAINS and ecode in ECODES:
wok_log.error('Connection to libvirt broken. '
- 'Recycling. ecode: %d edom: %d' %
- (ecode, edom))
+ 'Recycling. ecode: %d edom: %d' %
+ (ecode, edom))
with LibvirtConnection._connectionLock:
self._connections[conn_id] = None
raise
@@ -97,10 +97,10 @@ class LibvirtConnection(object):
wok_log.error('Unable to connect to libvirt.')
if not retries:
wok_log.error("Unable to establish connection "
- "with libvirt. Please check "
- "your libvirt URI which is often
"
- "defined in "
- "/etc/libvirt/libvirt.conf")
+ "with libvirt. Please check "
+ "your libvirt URI which is often "
+ "defined in "
+ "/etc/libvirt/libvirt.conf")
cherrypy.engine.stop()
exit(1)
time.sleep(2)
diff --git a/plugins/kimchi/model/networks.py b/plugins/kimchi/model/networks.py
index 57af9e7..45d4474 100644
--- a/plugins/kimchi/model/networks.py
+++ b/plugins/kimchi/model/networks.py
@@ -51,7 +51,8 @@ class NetworksModel(object):
conn = self.conn.get()
error_msg = ("Please, check the configuration in %s/template.conf to "
- "ensure it lists only valid networks." %
PluginPaths('kimchi').conf_dir)
+ "ensure it lists only valid networks." %
+ PluginPaths('kimchi').conf_dir)
for net_name in networks:
try:
diff --git a/plugins/kimchi/model/peers.py b/plugins/kimchi/model/peers.py
index 91e4a32..7577364 100644
--- a/plugins/kimchi/model/peers.py
+++ b/plugins/kimchi/model/peers.py
@@ -41,7 +41,7 @@ class PeersModel(object):
out, error, ret = run_command(cmd)
if out and len(out) != 0:
wok_log.error("Unable to register server on openSLP."
- " Details: %s" % out)
+ " Details: %s" % out)
cherrypy.engine.subscribe('exit', self._peer_deregister)
def _peer_deregister(self):
@@ -50,7 +50,7 @@ class PeersModel(object):
out, error, ret = run_command(cmd)
if out and len(out) != 0:
wok_log.error("Unable to deregister server on openSLP."
- " Details: %s" % out)
+ " Details: %s" % out)
def get_list(self):
# check federation feature is enabled on Kimchi server
diff --git a/plugins/kimchi/model/storagepools.py b/plugins/kimchi/model/storagepools.py
index 9e45ccf..0db2ef4 100644
--- a/plugins/kimchi/model/storagepools.py
+++ b/plugins/kimchi/model/storagepools.py
@@ -214,8 +214,8 @@ class StoragePoolsModel(object):
'virt_use_nfs=1'])
if error or returncode:
wok_log.error("Unable to set virt_use_nfs=1. If you use "
- "SELinux, this may prevent NFS pools from "
- "being used.")
+ "SELinux, this may prevent NFS pools from "
+ "being used.")
return name
def _clean_scan(self, pool_name):
@@ -285,9 +285,9 @@ class StoragePoolModel(object):
# throwing an Exception here would prevent all pools from
# displaying information -- so return None for busy
wok_log.error("ERROR: Storage Pool get vol count: %s "
- % e.get_error_message())
+ % e.get_error_message())
wok_log.error("ERROR: Storage Pool get vol count error no: %s "
- % e.get_error_code())
+ % e.get_error_code())
return 0
except Exception as e:
raise OperationFailed("KCHPOOL0008E",
@@ -340,8 +340,7 @@ class StoragePoolModel(object):
# for a nfs if the corresponding NFS server is down.
if pool_type == 'netfs' and not self._nfs_status_online(pool):
wok_log.debug("NFS pool %s is offline, reason: NFS "
- "server %s is unreachable.", name,
- source['addr'])
+ "server %s is unreachable.", name,
source['addr'])
# Mark state as '4' => inaccessible.
info[0] = 4
# skip calculating volumes
@@ -379,7 +378,7 @@ class StoragePoolModel(object):
output, error, returncode = run_command(lsblk_cmd)
if returncode != 0:
wok_log.error('%s is not a valid disk/partition. Could not '
- 'add it to the pool %s.', disk, pool_name)
+ 'add it to the pool %s.', disk, pool_name)
raise OperationFailed('KCHPOOL0027E', {'disk': disk,
'pool': pool_name})
# add disks to the lvm pool using vgextend + virsh refresh
diff --git a/plugins/kimchi/model/storagevolumes.py
b/plugins/kimchi/model/storagevolumes.py
index 7fc0837..920333c 100644
--- a/plugins/kimchi/model/storagevolumes.py
+++ b/plugins/kimchi/model/storagevolumes.py
@@ -114,7 +114,8 @@ class StorageVolumesModel(object):
raise InvalidParameter('KCHVOL0001E', {'name': name})
params['pool'] = pool_name
- targeturi = '/plugins/kimchi/storagepools/%s/storagevolumes/%s' %
(pool_name, name)
+ targeturi = '/plugins/kimchi/storagepools/%s/storagevolumes/%s' \
+ % (pool_name, name)
taskid = add_task(targeturi, create_func, self.objstore, params)
return self.task.lookup(taskid)
@@ -403,8 +404,8 @@ class StorageVolumeModel(object):
'name': name,
'new_pool': new_pool,
'new_name': new_name}
- taskid = add_task(u'/plugins/kimchi/storagepools/%s/storagevolumes/%s' %
- (pool, new_name), self._clone_task, self.objstore,
+ taskid = add_task(u'/plugins/kimchi/storagepools/%s/storagevolumes/%s'
+ % (pool, new_name), self._clone_task, self.objstore,
params)
return self.task.lookup(taskid)
@@ -529,7 +530,7 @@ class IsoVolumesModel(object):
except Exception, e:
# Skip inactive pools
wok_log.debug("Shallow scan: skipping pool %s because of "
- "error: %s", (pool_name, e.message))
+ "error: %s", (pool_name, e.message))
continue
for volume in volumes:
diff --git a/plugins/kimchi/model/vms.py b/plugins/kimchi/model/vms.py
index 561e7d8..36e3cd2 100644
--- a/plugins/kimchi/model/vms.py
+++ b/plugins/kimchi/model/vms.py
@@ -135,7 +135,7 @@ class VMsModel(object):
# It is possible to continue Kimchi executions without store
# vm icon info
wok_log.error('Error trying to update database with guest '
- 'icon information due error: %s', e.message)
+ 'icon information due error: %s', e.message)
# If storagepool is SCSI, volumes will be LUNs and must be passed by
# the user from UI or manually.
@@ -184,8 +184,12 @@ class VMModel(object):
self.objstore = kargs['objstore']
self.caps = CapabilitiesModel(**kargs)
self.vmscreenshot = VMScreenshotModel(**kargs)
- self.users =
import_class('plugins.kimchi.model.users.UsersModel')(**kargs)
- self.groups =
import_class('plugins.kimchi.model.groups.GroupsModel')(**kargs)
+ self.users = import_class(
+ 'plugins.kimchi.model.users.UsersModel'
+ )(**kargs)
+ self.groups = import_class(
+ 'plugins.kimchi.model.groups.GroupsModel'
+ )(**kargs)
self.vms = VMsModel(**kargs)
self.task = TaskModel(**kargs)
self.storagepool = model.storagepools.StoragePoolModel(**kargs)
@@ -398,9 +402,9 @@ class VMModel(object):
# be used
if orig_vol['capacity'] > orig_pool['available']:
wok_log.warning('storage pool \'%s\' doesn\'t have
'
- 'enough free space to store image '
- '\'%s\'; falling back to
\'default\'',
- orig_pool_name, path)
+ 'enough free space to store image '
+ '\'%s\'; falling back to
\'default\'',
+ orig_pool_name, path)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
@@ -414,8 +418,8 @@ class VMModel(object):
elif orig_pool['type'] in ['scsi', 'iscsi']:
# SCSI and iSCSI always fall back to the storage pool 'default'
wok_log.warning('cannot create new volume for clone in '
- 'storage pool \'%s\'; falling back to
'
- '\'default\'', orig_pool_name)
+ 'storage pool \'%s\'; falling back to '
+ '\'default\'', orig_pool_name)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
@@ -1044,7 +1048,7 @@ class VMModel(object):
snapshot_names = self.vmsnapshots.get_list(name)
except OperationFailed, e:
wok_log.error('cannot list snapshots: %s; '
- 'skipping snapshot deleting...' % e.message)
+ 'skipping snapshot deleting...' % e.message)
else:
for s in snapshot_names:
self.vmsnapshot.delete(name, s)
@@ -1069,7 +1073,7 @@ class VMModel(object):
ignore_missing=True)
except libvirt.libvirtError as e:
wok_log.error('Unable to get storage volume by path: %s' %
- e.message)
+ e.message)
except Exception as e:
raise OperationFailed('KCHVOL0017E', {'err': e.message})
@@ -1089,7 +1093,7 @@ class VMModel(object):
except Exception as e:
# It is possible to delete vm without delete its database info
wok_log.error('Error deleting vm information from database: '
- '%s', e.message)
+ '%s', e.message)
vnc.remove_proxy_token(name)
@@ -1185,7 +1189,7 @@ class VMModel(object):
# It is possible to continue Kimchi executions without delete
# screenshots
wok_log.error('Error trying to delete vm screenshot from '
- 'database due error: %s', e.message)
+ 'database due error: %s', e.message)
def suspend(self, name):
"""Suspend the virtual machine's execution and puts it in the
@@ -1251,7 +1255,7 @@ class VMScreenshotModel(object):
# It is possible to continue Kimchi executions without store
# screenshots
wok_log.error('Error trying to update database with guest '
- 'screenshot information due error: %s', e.message)
+ 'screenshot information due error: %s', e.message)
return img_path
@staticmethod
@@ -1269,7 +1273,7 @@ class VMScreenshotModel(object):
# It is possible to continue Kimchi vm executions without
# screenshots
wok_log.error('Error trying to update database with guest '
- 'screenshot information due error: %s', e.message)
+ 'screenshot information due error: %s', e.message)
return LibvirtVMScreenshot(params, conn)
diff --git a/plugins/kimchi/model/vmstorages.py b/plugins/kimchi/model/vmstorages.py
index d6987e3..bec16c6 100644
--- a/plugins/kimchi/model/vmstorages.py
+++ b/plugins/kimchi/model/vmstorages.py
@@ -193,7 +193,7 @@ class VMStorageModel(object):
used_by = get_disk_used_by(self.objstore, self.conn, path)
else:
wok_log.error("Unable to decrement volume used_by on"
- " delete because no path could be found.")
+ " delete because no path could be found.")
dom.detachDeviceFlags(etree.tostring(disk),
get_vm_config_flag(dom, 'all'))
except Exception as e:
@@ -204,7 +204,7 @@ class VMStorageModel(object):
set_disk_used_by(self.objstore, path, used_by)
else:
wok_log.error("Unable to update %s:%s used_by on delete."
- % (vm_name, dev_name))
+ % (vm_name, dev_name))
def update(self, vm_name, dev_name, params):
old_disk_used_by = None
@@ -248,5 +248,5 @@ class VMStorageModel(object):
new_disk_used_by)
except Exception as e:
wok_log.error("Unable to update dev used_by on update due to"
- " %s:" % e.message)
+ " %s:" % e.message)
return dev
diff --git a/plugins/kimchi/osinfo.py b/plugins/kimchi/osinfo.py
index 1164946..5b1277c 100644
--- a/plugins/kimchi/osinfo.py
+++ b/plugins/kimchi/osinfo.py
@@ -210,5 +210,4 @@ def lookup(distro, version):
else:
params['icon'] = 'plugins/kimchi/images/icon-vm.png'
-
return params
diff --git a/plugins/kimchi/root.py b/plugins/kimchi/root.py
index 20c41ca..fb2f966 100644
--- a/plugins/kimchi/root.py
+++ b/plugins/kimchi/root.py
@@ -67,4 +67,3 @@ class KimchiRoot(WokRoot):
def get_custom_conf(self):
return config.KimchiConfig()
-
diff --git a/plugins/kimchi/screenshot.py b/plugins/kimchi/screenshot.py
index 7c2347c..ffe5a1a 100644
--- a/plugins/kimchi/screenshot.py
+++ b/plugins/kimchi/screenshot.py
@@ -165,7 +165,7 @@ class VMScreenshot(object):
self._generate_scratch(thumbnail)
except:
wok_log.error("screenshot_creation: Unable to create "
- "screenshot image %s." % thumbnail)
+ "screenshot image %s." % thumbnail)
else:
self._create_black_image(thumbnail)
diff --git a/plugins/kimchi/tests/test_authorization.py
b/plugins/kimchi/tests/test_authorization.py
index 53aa847..87d68ab 100644
--- a/plugins/kimchi/tests/test_authorization.py
+++ b/plugins/kimchi/tests/test_authorization.py
@@ -88,7 +88,8 @@ class AuthorizationTests(unittest.TestCase):
self.assertEquals(200, resp.status)
resp = self.request('/plugins/kimchi/networks', '{}',
'POST')
self.assertEquals(403, resp.status)
- resp = self.request('/plugins/kimchi/networks/default/activate',
'{}', 'POST')
+ resp = self.request('/plugins/kimchi/networks/default/activate',
'{}',
+ 'POST')
self.assertEquals(403, resp.status)
resp = self.request('/plugins/kimchi/networks/default', '{}',
'DELETE')
self.assertEquals(403, resp.status)
@@ -98,9 +99,11 @@ class AuthorizationTests(unittest.TestCase):
self.assertEquals(200, resp.status)
resp = self.request('/plugins/kimchi/storagepools', '{}',
'POST')
self.assertEquals(403, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/default/activate',
'{}', 'POST')
+ resp = self.request('/plugins/kimchi/storagepools/default/activate',
+ '{}', 'POST')
self.assertEquals(403, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/default', '{}',
'DELETE')
+ resp = self.request('/plugins/kimchi/storagepools/default',
'{}',
+ 'DELETE')
self.assertEquals(403, resp.status)
# Non-root users can not update or delete a template
@@ -118,23 +121,29 @@ class AuthorizationTests(unittest.TestCase):
# Non-root users can only get vms authorized to them
model.templates_create({'name': u'test', 'cdrom':
fake_iso})
- task_info = model.vms_create({'name': u'test-me',
- 'template':
'/plugins/kimchi/templates/test'})
+ task_info = model.vms_create({
+ 'name': u'test-me',
+ 'template': '/plugins/kimchi/templates/test'
+ })
wait_task(model.task_lookup, task_info['id'])
model.vm_update(u'test-me',
{'users': [mockmodel.fake_user.keys()[0]],
'groups': []})
- task_info = model.vms_create({'name': u'test-usera',
- 'template':
'/plugins/kimchi/templates/test'})
+ task_info = model.vms_create({
+ 'name': u'test-usera',
+ 'template': '/plugins/kimchi/templates/test'
+ })
wait_task(model.task_lookup, task_info['id'])
non_root = list(set(model.users_get_list()) - set(['root']))[0]
model.vm_update(u'test-usera', {'users': [non_root],
'groups': []})
- task_info = model.vms_create({'name': u'test-groupa',
- 'template':
'/plugins/kimchi/templates/test'})
+ task_info = model.vms_create({
+ 'name': u'test-groupa',
+ 'template': '/plugins/kimchi/templates/test'
+ })
wait_task(model.task_lookup, task_info['id'])
a_group = model.groups_get_list()[0]
model.vm_update(u'test-groupa', {'groups': [a_group]})
@@ -148,8 +157,10 @@ class AuthorizationTests(unittest.TestCase):
self.assertEquals(403, resp.status)
# Create a vm using mockmodel directly to test Resource access
- task_info = model.vms_create({'name': 'kimchi-test',
- 'template':
'/plugins/kimchi/templates/test'})
+ task_info = model.vms_create({
+ 'name': 'kimchi-test',
+ 'template': '/plugins/kimchi/templates/test'
+ })
wait_task(model.task_lookup, task_info['id'])
resp = self.request('/plugins/kimchi/vms/kimchi-test', '{}',
'PUT')
self.assertEquals(403, resp.status)
@@ -159,7 +170,8 @@ class AuthorizationTests(unittest.TestCase):
# Non-root users can only update VMs authorized by them
resp = self.request('/plugins/kimchi/vms/test-me/start', '{}',
'POST')
self.assertEquals(200, resp.status)
- resp = self.request('/plugins/kimchi/vms/test-usera/start', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-usera/start',
'{}',
+ 'POST')
self.assertEquals(403, resp.status)
model.template_delete('test')
diff --git a/plugins/kimchi/tests/test_config.py.in
b/plugins/kimchi/tests/test_config.py.in
index 19ae0ba..7b5f190 100644
--- a/plugins/kimchi/tests/test_config.py.in
+++ b/plugins/kimchi/tests/test_config.py.in
@@ -179,11 +179,13 @@ class ConfigTests(unittest.TestCase):
},
'/robots.txt': {
'tools.staticfile.on': True,
- 'tools.staticfile.filename': '%s/robots.txt' %
KimchiPaths().ui_dir
+ 'tools.staticfile.filename': '%s/robots.txt' %
+ KimchiPaths().ui_dir
},
'/help': {
'tools.staticdir.on': True,
- 'tools.staticdir.dir': '%s/ui/pages/help' %
KimchiPaths().prefix,
+ 'tools.staticdir.dir': '%s/ui/pages/help' %
+ KimchiPaths().prefix,
'tools.staticdir.index': 'en_US/index.html',
'tools.nocache.on': True
}
diff --git a/plugins/kimchi/tests/test_exception.py
b/plugins/kimchi/tests/test_exception.py
index d8c0296..2b89adb 100644
--- a/plugins/kimchi/tests/test_exception.py
+++ b/plugins/kimchi/tests/test_exception.py
@@ -56,7 +56,9 @@ class ExceptionTests(unittest.TestCase):
"""
setup_server('production')
# test 404
- resp = json.loads(request(host, ssl_port,
'/plugins/kimchi/vms/blah').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms/blah').read()
+ )
self.assertEquals('404 Not Found', resp.get('code'))
# test 405 wrong method
@@ -66,7 +68,9 @@ class ExceptionTests(unittest.TestCase):
self.assertEquals(msg, resp.get('reason'))
# test 400 parse error
- resp = json.loads(request(host, ssl_port, '/plugins/kimchi/vms',
'{', 'POST').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms', '{',
'POST').read()
+ )
msg = u'KCHAPI0006E: Unable to parse JSON request'
self.assertEquals('400 Bad Request', resp.get('code'))
self.assertEquals(msg, resp.get('reason'))
@@ -74,7 +78,9 @@ class ExceptionTests(unittest.TestCase):
# test 400 missing required parameter
req = json.dumps({})
- resp = json.loads(request(host, ssl_port, '/plugins/kimchi/vms', req,
'POST').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms', req,
'POST').read()
+ )
self.assertEquals('400 Bad Request', resp.get('code'))
m = u"KCHVM0016E: Specify a template to create a virtual machine from"
self.assertEquals(m, resp.get('reason'))
@@ -86,7 +92,9 @@ class ExceptionTests(unittest.TestCase):
"""
setup_server()
# test 404
- resp = json.loads(request(host, ssl_port,
'/plugins/kimchi/vms/blah').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms/blah').read()
+ )
self.assertEquals('404 Not Found', resp.get('code'))
# test 405 wrong method
@@ -96,7 +104,9 @@ class ExceptionTests(unittest.TestCase):
self.assertEquals(msg, resp.get('reason'))
# test 400 parse error
- resp = json.loads(request(host, ssl_port, '/plugins/kimchi/vms',
'{', 'POST').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms', '{',
'POST').read()
+ )
msg = u'KCHAPI0006E: Unable to parse JSON request'
self.assertEquals('400 Bad Request', resp.get('code'))
self.assertEquals(msg, resp.get('reason'))
@@ -104,7 +114,9 @@ class ExceptionTests(unittest.TestCase):
# test 400 missing required parameter
req = json.dumps({})
- resp = json.loads(request(host, ssl_port, '/plugins/kimchi/vms', req,
'POST').read())
+ resp = json.loads(
+ request(host, ssl_port, '/plugins/kimchi/vms', req,
'POST').read()
+ )
m = u"KCHVM0016E: Specify a template to create a virtual machine from"
self.assertEquals('400 Bad Request', resp.get('code'))
self.assertEquals(m, resp.get('reason'))
diff --git a/plugins/kimchi/tests/test_host.py b/plugins/kimchi/tests/test_host.py
index e2aa196..9bc9302 100644
--- a/plugins/kimchi/tests/test_host.py
+++ b/plugins/kimchi/tests/test_host.py
@@ -101,8 +101,9 @@ class HostTests(unittest.TestCase):
def test_host_actions(self):
def _task_lookup(taskid):
- return json.loads(self.request('/plugins/kimchi/tasks/%s' %
- taskid).read())
+ return json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % taskid).read()
+ )
resp = self.request('/plugins/kimchi/host/shutdown', '{}',
'POST')
self.assertEquals(200, resp.status)
@@ -127,11 +128,13 @@ class HostTests(unittest.TestCase):
task_params = [u'id', u'message', u'status',
u'target_uri']
self.assertEquals(sorted(task_params), sorted(task.keys()))
- resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None,
'GET')
+ resp = self.request('/plugins/kimchi/tasks/' + task[u'id'],
None,
+ 'GET')
task_info = json.loads(resp.read())
self.assertEquals(task_info['status'], 'running')
wait_task(_task_lookup, task_info['id'])
- resp = self.request('/plugins/kimchi/tasks/' + task[u'id'], None,
'GET')
+ resp = self.request('/plugins/kimchi/tasks/' + task[u'id'],
None,
+ 'GET')
task_info = json.loads(resp.read())
self.assertEquals(task_info['status'], 'finished')
self.assertIn(u'All packages updated', task_info['message'])
@@ -146,7 +149,7 @@ class HostTests(unittest.TestCase):
keys = ['name', 'path', 'type', 'fstype',
'size', 'mountpoint',
'available']
for item in partitions:
- resp = self.request('/plugins/kimchi/host/partitions/%s' %
+ resp = self.request('/plugins/kimchi/host/partitions/%s' %
item['name'])
info = json.loads(resp.read())
self.assertEquals(sorted(info.keys()), sorted(keys))
@@ -161,8 +164,9 @@ class HostTests(unittest.TestCase):
# Mockmodel brings 3 preconfigured scsi fc_host
self.assertEquals(3, len(nodedevs))
- nodedev = json.loads(self.request(
- '/plugins/kimchi/host/devices/scsi_host2').read())
+ nodedev = json.loads(
+ self.request('/plugins/kimchi/host/devices/scsi_host2').read()
+ )
# Mockmodel generates random wwpn and wwnn
self.assertEquals('scsi_host2', nodedev['name'])
self.assertEquals('fc_host', nodedev['adapter']['type'])
@@ -172,7 +176,7 @@ class HostTests(unittest.TestCase):
devs = json.loads(self.request('/plugins/kimchi/host/devices').read())
dev_names = [dev['name'] for dev in devs]
for dev_type in ('pci', 'usb_device', 'scsi'):
- resp = self.request('/plugins/kimchi/host/devices?_cap=%s' %
+ resp = self.request('/plugins/kimchi/host/devices?_cap=%s' %
dev_type)
devsByType = json.loads(resp.read())
names = [dev['name'] for dev in devsByType]
@@ -185,8 +189,9 @@ class HostTests(unittest.TestCase):
for dev_type in ('pci', 'usb_device', 'scsi'):
resp = self.request(
- '/plugins/kimchi/host/devices?_cap=%s&_passthrough=true'
%
- dev_type)
+ '/plugins/kimchi/host/devices?_cap=%s&_passthrough=true' %
+ dev_type
+ )
filteredDevs = json.loads(resp.read())
filteredNames = [dev['name'] for dev in filteredDevs]
self.assertTrue(set(filteredNames) <= set(dev_names))
@@ -194,7 +199,8 @@ class HostTests(unittest.TestCase):
for dev in passthru_devs:
resp = self.request(
- '/plugins/kimchi/host/devices?_passthrough_affected_by=%s' %
- dev)
+ '/plugins/kimchi/host/devices?_passthrough_affected_by=%s' %
+ dev
+ )
affected_devs = [dev['name'] for dev in json.loads(resp.read())]
self.assertTrue(set(affected_devs) <= set(dev_names))
diff --git a/plugins/kimchi/tests/test_mock_network.py
b/plugins/kimchi/tests/test_mock_network.py
index 9f38869..4e2a939 100644
--- a/plugins/kimchi/tests/test_mock_network.py
+++ b/plugins/kimchi/tests/test_mock_network.py
@@ -63,7 +63,9 @@ class MockNetworkTests(unittest.TestCase):
def test_vlan_tag_bridge(self):
# Verify the current system has at least one interface to create a
# bridged network
- interfaces =
json.loads(self.request('/plugins/kimchi/interfaces?type=nic').read())
+ interfaces = json.loads(
+ self.request('/plugins/kimchi/interfaces?type=nic').read()
+ )
if len(interfaces) > 0:
iface = interfaces[0]['name']
_do_network_test(self, model, {'name': u'bridge-network',
diff --git a/plugins/kimchi/tests/test_mock_storagepool.py
b/plugins/kimchi/tests/test_mock_storagepool.py
index 6fe0981..ea9843b 100644
--- a/plugins/kimchi/tests/test_mock_storagepool.py
+++ b/plugins/kimchi/tests/test_mock_storagepool.py
@@ -60,15 +60,21 @@ class MockStoragepoolTests(unittest.TestCase):
model.reset()
def _task_lookup(self, taskid):
- return json.loads(self.request('/plugins/kimchi/tasks/%s' %
taskid).read())
+ return json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % taskid).read()
+ )
def test_storagepool(self):
# MockModel always returns 2 partitions (vdx, vdz)
- partitions =
json.loads(self.request('/plugins/kimchi/host/partitions').read())
+ partitions = json.loads(
+ self.request('/plugins/kimchi/host/partitions').read()
+ )
devs = [dev['path'] for dev in partitions]
# MockModel always returns 3 FC devices
- fc_devs =
json.loads(self.request('/plugins/kimchi/host/devices?_cap=fc_host').read())
+ fc_devs = json.loads(
+ self.request('/plugins/kimchi/host/devices?_cap=fc_host').read()
+ )
fc_devs = [dev['name'] for dev in fc_devs]
poolDefs = [
diff --git a/plugins/kimchi/tests/test_mock_storagevolume.py
b/plugins/kimchi/tests/test_mock_storagevolume.py
index 22fcce1..9d0a5ad 100644
--- a/plugins/kimchi/tests/test_mock_storagevolume.py
+++ b/plugins/kimchi/tests/test_mock_storagevolume.py
@@ -61,11 +61,15 @@ class MockStorageVolumeTests(unittest.TestCase):
def test_storagevolume(self):
# MockModel always returns 2 partitions (vdx, vdz)
- partitions =
json.loads(self.request('/plugins/kimchi/host/partitions').read())
+ partitions = json.loads(
+ self.request('/plugins/kimchi/host/partitions').read()
+ )
devs = [dev['path'] for dev in partitions]
# MockModel always returns 3 FC devices
- fc_devs =
json.loads(self.request('/plugins/kimchi/host/devices?_cap=fc_host').read())
+ fc_devs = json.loads(
+ self.request('/plugins/kimchi/host/devices?_cap=fc_host').read()
+ )
fc_devs = [dev['name'] for dev in fc_devs]
poolDefs = [
diff --git a/plugins/kimchi/tests/test_mockmodel.py
b/plugins/kimchi/tests/test_mockmodel.py
index 2fae0e2..ffbf8d5 100644
--- a/plugins/kimchi/tests/test_mockmodel.py
+++ b/plugins/kimchi/tests/test_mockmodel.py
@@ -71,8 +71,10 @@ class MockModelTests(unittest.TestCase):
wait_task(model.task_lookup, task['id'])
# Test screenshot refresh for running vm
- request(host, ssl_port, '/plugins/kimchi/vms/test-vm/start',
'{}', 'POST')
- resp = request(host, ssl_port, '/plugins/kimchi/vms/test-vm/screenshot')
+ request(host, ssl_port, '/plugins/kimchi/vms/test-vm/start',
'{}',
+ 'POST')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/vms/test-vm/screenshot')
self.assertEquals(200, resp.status)
self.assertEquals('image/png', resp.getheader('content-type'))
resp1 = request(host, ssl_port, '/plugins/kimchi/vms/test-vm')
diff --git a/plugins/kimchi/tests/test_model.py b/plugins/kimchi/tests/test_model.py
index c70e4fb..353a81d 100644
--- a/plugins/kimchi/tests/test_model.py
+++ b/plugins/kimchi/tests/test_model.py
@@ -129,7 +129,8 @@ class ModelTests(unittest.TestCase):
inst.templates_create(params)
rollback.prependDefer(inst.template_delete, 'test')
- params = {'name': 'kimchi-vm', 'template':
'/plugins/kimchi/templates/test'}
+ params = {'name': 'kimchi-vm',
+ 'template': '/plugins/kimchi/templates/test'}
task = inst.vms_create(params)
rollback.prependDefer(inst.vm_delete, 'kimchi-vm')
inst.task_wait(task['id'], 10)
@@ -302,7 +303,8 @@ class ModelTests(unittest.TestCase):
self.assertEquals('127.0.0.1',
info['graphics']['listen'])
graphics = {'type': 'spice', 'listen':
'127.0.0.1'}
- params = {'name': 'kimchi-spice', 'template':
'/plugins/kimchi/templates/test',
+ params = {'name': 'kimchi-spice',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics}
task2 = inst.vms_create(params)
inst.task_wait(task2['id'])
@@ -481,8 +483,10 @@ class ModelTests(unittest.TestCase):
'cdrom': old_distro_iso}
inst.templates_create(params)
rollback.prependDefer(inst.template_delete, 'old_distro_template')
- params = {'name': vm_name,
- 'template':
'/plugins/kimchi/templates/old_distro_template'}
+ params = {
+ 'name': vm_name,
+ 'template':
'/plugins/kimchi/templates/old_distro_template'
+ }
task2 = inst.vms_create(params)
inst.task_wait(task2['id'])
rollback.prependDefer(inst.vm_delete, vm_name)
@@ -925,10 +929,14 @@ class ModelTests(unittest.TestCase):
objstore_loc=self.tmp_store)
with RollbackContext() as rollback:
- params = {'name': 'test', 'disks': [],
'cdrom': UBUNTU_ISO,
- 'storagepool':
'/plugins/kimchi/storagepools/default-pool',
- 'domain': 'test',
- 'arch': 'i686'}
+ params = {
+ 'name': 'test',
+ 'disks': [],
+ 'cdrom': UBUNTU_ISO,
+ 'storagepool':
'/plugins/kimchi/storagepools/default-pool',
+ 'domain': 'test',
+ 'arch': 'i686'
+ }
inst.templates_create(params)
rollback.prependDefer(inst.template_delete, 'test')
diff --git a/plugins/kimchi/tests/test_model_network.py
b/plugins/kimchi/tests/test_model_network.py
index 8874ed9..e4cf5ef 100644
--- a/plugins/kimchi/tests/test_model_network.py
+++ b/plugins/kimchi/tests/test_model_network.py
@@ -121,7 +121,9 @@ class NetworkTests(unittest.TestCase):
nets = json.loads(self.request('/plugins/kimchi/networks').read())
self.assertEquals(len(networks) + 5, len(nets))
- network =
json.loads(self.request('/plugins/kimchi/networks/network-1').read())
+ network = json.loads(
+ self.request('/plugins/kimchi/networks/network-1').read()
+ )
keys = [u'name', u'connection', u'interface',
u'subnet', u'dhcp',
u'vms', u'in_use', u'autostart',
u'state', u'persistent']
self.assertEquals(sorted(keys), sorted(network.keys()))
@@ -135,7 +137,9 @@ class NetworkTests(unittest.TestCase):
# Verify the current system has at least one interface to create a
# bridged network
- interfaces =
json.loads(self.request('/plugins/kimchi/interfaces?type=nic').read())
+ interfaces = json.loads(
+ self.request('/plugins/kimchi/interfaces?type=nic').read()
+ )
if len(interfaces) > 0:
iface = interfaces[0]['name']
networks.append({'name': u'bridge-network',
'connection': 'bridge',
diff --git a/plugins/kimchi/tests/test_model_storagepool.py
b/plugins/kimchi/tests/test_model_storagepool.py
index 0bdf91a..5f9b966 100644
--- a/plugins/kimchi/tests/test_model_storagepool.py
+++ b/plugins/kimchi/tests/test_model_storagepool.py
@@ -63,7 +63,9 @@ class StoragepoolTests(unittest.TestCase):
self.request = partial(request, host, ssl_port)
def test_get_storagepools(self):
- storagepools =
json.loads(self.request('/plugins/kimchi/storagepools').read())
+ storagepools = json.loads(
+ self.request('/plugins/kimchi/storagepools').read()
+ )
self.assertIn('default', [pool['name'] for pool in
storagepools])
with RollbackContext() as rollback:
@@ -72,7 +74,8 @@ class StoragepoolTests(unittest.TestCase):
name = u'kīмсhī-storagepool-%i' % i
req = json.dumps({'name': name, 'type': 'dir',
'path': '/var/lib/libvirt/images/%i' %
i})
- resp = self.request('/plugins/kimchi/storagepools', req,
'POST')
+ resp = self.request('/plugins/kimchi/storagepools', req,
+ 'POST')
rollback.prependDefer(model.storagepool_delete, name)
self.assertEquals(201, resp.status)
@@ -80,11 +83,13 @@ class StoragepoolTests(unittest.TestCase):
# Pool name must be unique
req = json.dumps({'name': name, 'type': 'dir',
'path': '/var/lib/libvirt/images/%i' %
i})
- resp = self.request('/plugins/kimchi/storagepools', req,
'POST')
+ resp = self.request('/plugins/kimchi/storagepools', req,
+ 'POST')
self.assertEquals(400, resp.status)
# Verify pool information
- resp = self.request('/plugins/kimchi/storagepools/%s' %
name.encode("utf-8"))
+ resp = self.request('/plugins/kimchi/storagepools/%s' %
+ name.encode("utf-8"))
p = json.loads(resp.read())
keys = [u'name', u'state', u'capacity',
u'allocated',
u'available', u'path', u'source',
u'type',
@@ -96,7 +101,9 @@ class StoragepoolTests(unittest.TestCase):
self.assertEquals(True, p['autostart'])
self.assertEquals(0, p['nr_volumes'])
- pools =
json.loads(self.request('/plugins/kimchi/storagepools').read())
+ pools = json.loads(
+ self.request('/plugins/kimchi/storagepools').read()
+ )
self.assertEquals(len(storagepools) + 3, len(pools))
# Create a pool with an existing path
@@ -111,5 +118,6 @@ class StoragepoolTests(unittest.TestCase):
# Reserved pool return 400
req = json.dumps({'name': 'kimchi_isos', 'type':
'dir',
'path': '/var/lib/libvirt/images/%i' % i})
- resp = request(host, ssl_port, '/plugins/kimchi/storagepools', req,
'POST')
+ resp = request(host, ssl_port, '/plugins/kimchi/storagepools', req,
+ 'POST')
self.assertEquals(400, resp.status)
diff --git a/plugins/kimchi/tests/test_model_storagevolume.py
b/plugins/kimchi/tests/test_model_storagevolume.py
index a05e479..8a717d3 100644
--- a/plugins/kimchi/tests/test_model_storagevolume.py
+++ b/plugins/kimchi/tests/test_model_storagevolume.py
@@ -63,13 +63,17 @@ def tearDownModule():
def _do_volume_test(self, model, host, ssl_port, pool_name):
def _task_lookup(taskid):
- return json.loads(self.request('/plugins/kimchi/tasks/%s' %
taskid).read())
+ return json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % taskid).read()
+ )
- uri = '/plugins/kimchi/storagepools/%s/storagevolumes' %
pool_name.encode('utf-8')
+ uri = '/plugins/kimchi/storagepools/%s/storagevolumes' \
+ % pool_name.encode('utf-8')
resp = self.request(uri)
self.assertEquals(200, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/%s' %
pool_name.encode('utf-8'))
+ resp = self.request('/plugins/kimchi/storagepools/%s' %
+ pool_name.encode('utf-8'))
pool_info = json.loads(resp.read())
with RollbackContext() as rollback:
# Create storage volume with 'capacity'
@@ -86,7 +90,9 @@ def _do_volume_test(self, model, host, ssl_port, pool_name):
self.assertEquals(202, resp.status)
task_id = json.loads(resp.read())['id']
wait_task(_task_lookup, task_id)
- status = json.loads(self.request('/plugins/kimchi/tasks/%s' %
task_id).read())
+ status = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % task_id).read()
+ )
self.assertEquals('finished', status['status'])
vol_info = json.loads(self.request(vol_uri).read())
vol_info['name'] = vol
@@ -130,7 +136,9 @@ def _do_volume_test(self, model, host, ssl_port, pool_name):
rollback.prependDefer(model.storagevolume_delete, pool_name,
cloned_vol_name)
wait_task(_task_lookup, task['id'])
- task = json.loads(self.request('/plugins/kimchi/tasks/%s' %
task['id']).read())
+ task = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' %
task['id']).read()
+ )
self.assertEquals('finished', task['status'])
resp = self.request(uri + '/' +
cloned_vol_name.encode('utf-8'))
diff --git a/plugins/kimchi/tests/test_rest.py b/plugins/kimchi/tests/test_rest.py
index a3012c3..a420062 100644
--- a/plugins/kimchi/tests/test_rest.py
+++ b/plugins/kimchi/tests/test_rest.py
@@ -111,8 +111,9 @@ class RestTests(unittest.TestCase):
# Now add a couple of VMs to the mock model
for i in xrange(10):
name = 'vm-%i' % i
- req = json.dumps({'name': name, 'template':
'/plugins/kimchi/templates/test',
- 'users': test_users, 'groups':
test_groups})
+ req = json.dumps({'name': name,
+ 'template':
'/plugins/kimchi/templates/test',
+ 'users': test_users, 'groups':
test_groups})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
@@ -215,7 +216,9 @@ class RestTests(unittest.TestCase):
req = json.dumps(params)
resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT')
self.assertEquals(303, resp.status)
- vm = json.loads(self.request('/plugins/kimchi/vms/∨м-црdαtеd',
req).read())
+ vm = json.loads(
+ self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read()
+ )
for key in params.keys():
self.assertEquals(params[key], vm[key])
@@ -225,7 +228,9 @@ class RestTests(unittest.TestCase):
req = json.dumps({'users': users})
resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req,
'PUT')
self.assertEquals(200, resp.status)
- info = json.loads(self.request('/plugins/kimchi/vms/∨м-црdαtеd',
'{}').read())
+ info = json.loads(
+ self.request('/plugins/kimchi/vms/∨м-црdαtеd', '{}').read()
+ )
self.assertEquals(users, info['users'])
# change only VM groups - users are not changed (default is empty)
@@ -234,7 +239,9 @@ class RestTests(unittest.TestCase):
req = json.dumps({'groups': groups})
resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req,
'PUT')
self.assertEquals(200, resp.status)
- info = json.loads(self.request('/plugins/kimchi/vms/∨м-црdαtеd',
'{}').read())
+ info = json.loads(
+ self.request('/plugins/kimchi/vms/∨м-црdαtеd', '{}').read()
+ )
self.assertEquals(groups, info['groups'])
# change VM users (wrong value) and groups
@@ -271,7 +278,8 @@ class RestTests(unittest.TestCase):
self.assertEquals('images/icon-debian.png', vm['icon'])
# Verify the volume was created
- vol_uri =
'/plugins/kimchi/storagepools/default-pool/storagevolumes/%s-0.img'
+ vol_uri = '/plugins/kimchi/storagepools/default-pool/storagevolumes/' \
+ + '%s-0.img'
resp = self.request(vol_uri % vm['uuid'])
vol = json.loads(resp.read())
self.assertEquals(1 << 30, vol['capacity'])
@@ -292,12 +300,14 @@ class RestTests(unittest.TestCase):
self.assertEquals(400, resp.status)
# Force poweroff the VM
- resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/poweroff',
'{}',
+ 'POST')
vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read())
self.assertEquals('shutoff', vm['state'])
# Test create VM with same name fails with 400
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test'})
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test'})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(400, resp.status)
@@ -306,7 +316,9 @@ class RestTests(unittest.TestCase):
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
wait_task(self._task_lookup, task['id'])
- task = json.loads(self.request('/plugins/kimchi/tasks/%s' %
task['id'], '{}').read())
+ task = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % task['id'],
'{}').read()
+ )
self.assertEquals('finished', task['status'])
clone_vm_name = task['target_uri'].split('/')[-2]
self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name))
@@ -329,12 +341,15 @@ class RestTests(unittest.TestCase):
# Create a snapshot on a stopped VM
params = {'name': 'test-snap'}
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
json.dumps(params),
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
+ json.dumps(params),
'POST')
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
wait_task(self._task_lookup, task['id'])
- task = json.loads(self.request('/plugins/kimchi/tasks/%s' %
task['id']).read())
+ task = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % task['id']).read()
+ )
self.assertEquals('finished', task['status'])
# Look up a non-existing snapshot
@@ -343,8 +358,8 @@ class RestTests(unittest.TestCase):
self.assertEquals(404, resp.status)
# Look up a snapshot
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' %
params['name'], '{}',
- 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' %
+ params['name'], '{}', 'GET')
self.assertEquals(200, resp.status)
snap = json.loads(resp.read())
self.assertTrue(int(time.time()) >= int(snap['created']))
@@ -352,33 +367,39 @@ class RestTests(unittest.TestCase):
self.assertEquals(u'', snap['parent'])
self.assertEquals(u'shutoff', snap['state'])
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}',
+ 'GET')
self.assertEquals(200, resp.status)
snaps = json.loads(resp.read())
self.assertEquals(1, len(snaps))
# Look up current snapshot (the one created above)
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
+ '{}', 'GET')
self.assertEquals(200, resp.status)
snap = json.loads(resp.read())
self.assertEquals(params['name'], snap['name'])
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}', 'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}',
+ 'POST')
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
snap_name = task['target_uri'].split('/')[-1]
wait_task(self._task_lookup, task['id'])
- resp = self.request('/plugins/kimchi/tasks/%s' % task['id'],
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/tasks/%s' % task['id'],
'{}',
+ 'GET')
task = json.loads(resp.read())
self.assertEquals('finished', task['status'])
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots',
'{}',
+ 'GET')
self.assertEquals(200, resp.status)
snaps = json.loads(resp.read())
self.assertEquals(2, len(snaps))
# Look up current snapshot (the one created above)
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
+ '{}', 'GET')
self.assertEquals(200, resp.status)
snap = json.loads(resp.read())
self.assertEquals(snap_name, snap['name'])
@@ -392,14 +413,15 @@ class RestTests(unittest.TestCase):
self.assertEquals(200, resp.status)
vm = json.loads(resp.read())
self.assertEquals(vm['state'], snap['state'])
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current',
+ '{}', 'GET')
self.assertEquals(200, resp.status)
current_snap = json.loads(resp.read())
self.assertEquals(snap, current_snap)
# Delete a snapshot
- resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' %
params['name'],
- '{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' %
+ params['name'], '{}', 'DELETE')
self.assertEquals(204, resp.status)
# Suspend the VM
@@ -407,7 +429,8 @@ class RestTests(unittest.TestCase):
self.assertEquals(200, resp.status)
vm = json.loads(resp.read())
self.assertEquals(vm['state'], 'shutoff')
- resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}',
+ 'POST')
self.assertEquals(400, resp.status)
resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}',
'POST')
self.assertEquals(200, resp.status)
@@ -415,7 +438,8 @@ class RestTests(unittest.TestCase):
self.assertEquals(200, resp.status)
vm = json.loads(resp.read())
self.assertEquals(vm['state'], 'running')
- resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}',
+ 'POST')
self.assertEquals(200, resp.status)
resp = self.request('/plugins/kimchi/vms/test-vm', '{}',
'GET')
self.assertEquals(200, resp.status)
@@ -464,7 +488,8 @@ class RestTests(unittest.TestCase):
# Create a VM with specified graphics type and listen
graphics = {'type': 'vnc', 'listen':
'127.0.0.1'}
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test',
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(202, resp.status)
@@ -480,7 +505,8 @@ class RestTests(unittest.TestCase):
# Create a VM with listen as ipv6 address
graphics = {'type': 'spice', 'listen':
'fe00::0'}
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test',
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(202, resp.status)
@@ -496,7 +522,8 @@ class RestTests(unittest.TestCase):
# Create a VM with specified graphics type and default listen
graphics = {'type': 'spice'}
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test',
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(202, resp.status)
@@ -512,14 +539,16 @@ class RestTests(unittest.TestCase):
# Try to create a VM with invalid graphics type
graphics = {'type': 'invalid'}
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test',
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(400, resp.status)
# Try to create a VM with invalid graphics listen
graphics = {'type': 'spice', 'listen':
'invalid'}
- req = json.dumps({'name': 'test-vm', 'template':
'/plugins/kimchi/templates/test',
+ req = json.dumps({'name': 'test-vm',
+ 'template': '/plugins/kimchi/templates/test',
'graphics': graphics})
resp = self.request('/plugins/kimchi/vms', req, 'POST')
self.assertEquals(400, resp.status)
@@ -537,7 +566,8 @@ class RestTests(unittest.TestCase):
self.assertEquals(201, resp.status)
# Delete the template
rollback.prependDefer(self.request,
- '/plugins/kimchi/templates/test', '{}',
'DELETE')
+ '/plugins/kimchi/templates/test',
'{}',
+ 'DELETE')
# Create a VM with default args
req = json.dumps({'name': 'test-vm',
@@ -547,11 +577,12 @@ class RestTests(unittest.TestCase):
task = json.loads(resp.read())
wait_task(self._task_lookup, task['id'])
# Delete the VM
- rollback.prependDefer(self.request,
- '/plugins/kimchi/vms/test-vm', '{}',
'DELETE')
+ rollback.prependDefer(self.request, '/plugins/kimchi/vms/test-vm',
+ '{}', 'DELETE')
# Check storage devices
- resp = self.request('/plugins/kimchi/vms/test-vm/storages',
'{}', 'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm/storages',
'{}',
+ 'GET')
devices = json.loads(resp.read())
self.assertEquals(2, len(devices))
dev_types = []
@@ -567,7 +598,8 @@ class RestTests(unittest.TestCase):
req = json.dumps({'dev': 'hdx',
'type': 'cdrom',
'path': '/tmp/nonexistent.iso'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
+ 'POST')
self.assertEquals(400, resp.status)
# Create temp storage pool
@@ -578,7 +610,8 @@ class RestTests(unittest.TestCase):
'type': 'dir'})
resp = self.request('/plugins/kimchi/storagepools', req,
'POST')
self.assertEquals(201, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/tmp/activate', req,
'POST')
+ resp = self.request('/plugins/kimchi/storagepools/tmp/activate',
+ req, 'POST')
self.assertEquals(200, resp.status)
# 'name' is required for this type of volume
@@ -586,16 +619,18 @@ class RestTests(unittest.TestCase):
'allocation': 512,
'type': 'disk',
'format': 'raw'})
- resp =
self.request('/plugins/kimchi/storagepools/tmp/storagevolumes',
- req, 'POST')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/tmp/storagevolumes', req,
'POST'
+ )
self.assertEquals(400, resp.status)
req = json.dumps({'name': "attach-volume",
'capacity': 1024,
'allocation': 512,
'type': 'disk',
'format': 'raw'})
- resp =
self.request('/plugins/kimchi/storagepools/tmp/storagevolumes',
- req, 'POST')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/tmp/storagevolumes', req,
'POST'
+ )
self.assertEquals(202, resp.status)
time.sleep(1)
@@ -606,7 +641,9 @@ class RestTests(unittest.TestCase):
'pool': 'tmp',
'vol': 'attach-volume',
'path': '/tmp/existent.iso'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request(
+ '/plugins/kimchi/vms/test-vm/storages', req, 'POST'
+ )
self.assertEquals(400, resp.status)
# Attach disk with both path and volume specified
@@ -615,21 +652,27 @@ class RestTests(unittest.TestCase):
'pool': 'tmp',
'vol': 'attach-volume',
'path': '/tmp/existent.iso'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request(
+ '/plugins/kimchi/vms/test-vm/storages', req, 'POST'
+ )
self.assertEquals(400, resp.status)
# Attach disk with only pool specified
req = json.dumps({'dev': 'hdx',
'type': 'cdrom',
'pool': 'tmp'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request(
+ '/plugins/kimchi/vms/test-vm/storages', req, 'POST'
+ )
self.assertEquals(400, resp.status)
# Attach disk with pool and vol specified
req = json.dumps({'type': 'disk',
'pool': 'tmp',
'vol': 'attach-volume'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request(
+ '/plugins/kimchi/vms/test-vm/storages', req, 'POST'
+ )
self.assertEquals(201, resp.status)
cd_info = json.loads(resp.read())
self.assertEquals('disk', cd_info['type'])
@@ -637,7 +680,9 @@ class RestTests(unittest.TestCase):
# Attach a cdrom with existent dev name
req = json.dumps({'type': 'cdrom',
'path': '/tmp/existent.iso'})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages', req,
'POST')
+ resp = self.request(
+ '/plugins/kimchi/vms/test-vm/storages', req, 'POST'
+ )
self.assertEquals(201, resp.status)
cd_info = json.loads(resp.read())
cd_dev = cd_info['dev']
@@ -645,34 +690,42 @@ class RestTests(unittest.TestCase):
self.assertEquals('/tmp/existent.iso', cd_info['path'])
# Delete the file and cdrom
rollback.prependDefer(self.request,
- '/plugins/kimchi/vms/test-vm/storages/hdx',
'{}', 'DELETE')
+ '/plugins/kimchi/vms/test-vm/storages/hdx',
+ '{}', 'DELETE')
os.remove('/tmp/existent.iso')
# Change path of storage cdrom
cdrom =
u'http://fedora.mirrors.tds.net/pub/fedora/releases/20/'\
'Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso'
req = json.dumps({'path': cdrom})
- resp = self.request('/plugins/kimchi/vms/test-vm/storages/' + cd_dev,
req, 'PUT')
+ resp = self.request('/plugins/kimchi/vms/test-vm/storages/' +
+ cd_dev, req, 'PUT')
self.assertEquals(200, resp.status)
cd_info = json.loads(resp.read())
self.assertEquals(urlparse.urlparse(cdrom).path,
urlparse.urlparse(cd_info['path']).path)
# Test GET
- devs =
json.loads(self.request('/plugins/kimchi/vms/test-vm/storages').read())
+ devs = json.loads(
+ self.request('/plugins/kimchi/vms/test-vm/storages').read()
+ )
self.assertEquals(4, len(devs))
# Detach storage cdrom
- resp = self.request('/plugins/kimchi/vms/test-vm/storages/' +
cd_dev,
- '{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/vms/test-vm/storages/' +
+ cd_dev, '{}', 'DELETE')
self.assertEquals(204, resp.status)
# Test GET
- devs =
json.loads(self.request('/plugins/kimchi/vms/test-vm/storages').read())
+ devs = json.loads(
+ self.request('/plugins/kimchi/vms/test-vm/storages').read()
+ )
self.assertEquals(3, len(devs))
- resp = self.request('/plugins/kimchi/storagepools/tmp/deactivate',
{}, 'POST')
+ resp = self.request('/plugins/kimchi/storagepools/tmp/deactivate',
+ {}, 'POST')
self.assertEquals(200, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/tmp', {},
'DELETE')
+ resp = self.request('/plugins/kimchi/storagepools/tmp', {},
+ 'DELETE')
self.assertEquals(204, resp.status)
def test_vm_iface(self):
@@ -684,7 +737,8 @@ class RestTests(unittest.TestCase):
self.assertEquals(201, resp.status)
# Delete the template
rollback.prependDefer(self.request,
- '/plugins/kimchi/templates/test', '{}',
'DELETE')
+ '/plugins/kimchi/templates/test',
'{}',
+ 'DELETE')
# Create a VM with default args
req = json.dumps({'name': 'test-vm',
@@ -695,7 +749,8 @@ class RestTests(unittest.TestCase):
wait_task(self._task_lookup, task['id'])
# Delete the VM
rollback.prependDefer(self.request,
- '/plugins/kimchi/vms/test-vm', '{}',
'DELETE')
+ '/plugins/kimchi/vms/test-vm', '{}',
+ 'DELETE')
# Create a network
req = json.dumps({'name': 'test-network',
@@ -705,14 +760,19 @@ class RestTests(unittest.TestCase):
self.assertEquals(201, resp.status)
# Delete the network
rollback.prependDefer(self.request,
- '/plugins/kimchi/networks/test-network',
'{}', 'DELETE')
+ '/plugins/kimchi/networks/test-network',
+ '{}', 'DELETE')
- ifaces =
json.loads(self.request('/plugins/kimchi/vms/test-vm/ifaces').read())
+ ifaces = json.loads(
+ self.request('/plugins/kimchi/vms/test-vm/ifaces').read()
+ )
self.assertEquals(1, len(ifaces))
for iface in ifaces:
- res =
json.loads(self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
- iface['mac']).read())
+ res = json.loads(
+ self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
+ iface['mac']).read()
+ )
self.assertEquals('default', res['network'])
self.assertEquals(17, len(res['mac']))
self.assertEquals(get_template_default('old',
'nic_model'),
@@ -728,7 +788,8 @@ class RestTests(unittest.TestCase):
req = json.dumps({"type": "network",
"network": "test-network",
"model": "virtio"})
- resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req,
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req,
+ 'POST')
self.assertEquals(201, resp.status)
iface = json.loads(resp.read())
@@ -744,13 +805,15 @@ class RestTests(unittest.TestCase):
resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
iface['mac'], req, 'PUT')
self.assertEquals(303, resp.status)
- iface =
json.loads(self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
- newMacAddr).read())
+ iface = json.loads(
+ self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
+ newMacAddr).read()
+ )
self.assertEquals(newMacAddr, iface['mac'])
# detach network interface from vm
- resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
iface['mac'],
- '{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' %
+ iface['mac'], '{}', 'DELETE')
self.assertEquals(204, resp.status)
def test_vm_customise_storage(self):
@@ -768,7 +831,8 @@ class RestTests(unittest.TestCase):
'type': 'dir'})
resp = self.request('/plugins/kimchi/storagepools', req, 'POST')
self.assertEquals(201, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/alt/activate', req,
'POST')
+ resp = self.request('/plugins/kimchi/storagepools/alt/activate', req,
+ 'POST')
self.assertEquals(200, resp.status)
# Create a VM
@@ -784,10 +848,12 @@ class RestTests(unittest.TestCase):
# Test template not changed after vm customise its pool
t = json.loads(self.request('/plugins/kimchi/templates/test').read())
- self.assertEquals(t['storagepool'],
'/plugins/kimchi/storagepools/default-pool')
+ self.assertEquals(t['storagepool'],
+ '/plugins/kimchi/storagepools/default-pool')
# Verify the volume was created
- vol_uri = '/plugins/kimchi/storagepools/alt/storagevolumes/%s-0.img' %
vm_info['uuid']
+ vol_uri = '/plugins/kimchi/storagepools/alt/storagevolumes/%s-0.img' \
+ % vm_info['uuid']
resp = self.request(vol_uri)
vol = json.loads(resp.read())
self.assertEquals(1 << 30, vol['capacity'])
@@ -809,18 +875,23 @@ class RestTests(unittest.TestCase):
# Test create vms using lun of this pool
# activate the storage pool
- resp = self.request('/plugins/kimchi/storagepools/scsi_fc_pool/activate',
'{}',
- 'POST')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/scsi_fc_pool/activate', '{}',
'POST'
+ )
# Create template fails because SCSI volume is missing
- tmpl_params = {'name': 'test_fc_pool', 'cdrom':
fake_iso,
- 'storagepool':
'/plugins/kimchi/storagepools/scsi_fc_pool'}
+ tmpl_params = {
+ 'name': 'test_fc_pool', 'cdrom': fake_iso,
+ 'storagepool': '/plugins/kimchi/storagepools/scsi_fc_pool'
+ }
req = json.dumps(tmpl_params)
resp = self.request('/plugins/kimchi/templates', req, 'POST')
self.assertEquals(400, resp.status)
# Choose SCSI volume to create template
- resp =
self.request('/plugins/kimchi/storagepools/scsi_fc_pool/storagevolumes')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/scsi_fc_pool/storagevolumes'
+ )
lun_name = json.loads(resp.read())[0]['name']
tmpl_params['disks'] = [{'index': 0, 'volume':
lun_name}]
@@ -842,7 +913,8 @@ class RestTests(unittest.TestCase):
self.assertEquals('running', vm['state'])
# Force poweroff the VM
- resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/vms/test-vm/poweroff',
'{}',
+ 'POST')
vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read())
self.assertEquals('shutoff', vm['state'])
@@ -859,10 +931,11 @@ class RestTests(unittest.TestCase):
# Create 5 unnamed vms from this template
for i in xrange(1, 6):
req = json.dumps({'template':
'/plugins/kimchi/templates/test'})
- task = json.loads(self.request('/plugins/kimchi/vms',
+ task = json.loads(self.request('/plugins/kimchi/vms',
req, 'POST').read())
wait_task(self._task_lookup, task['id'])
- resp = self.request('/plugins/kimchi/vms/test-vm-%i' % i, {},
'GET')
+ resp = self.request('/plugins/kimchi/vms/test-vm-%i' % i, {},
+ 'GET')
self.assertEquals(resp.status, 200)
count = len(json.loads(self.request('/plugins/kimchi/vms').read()))
self.assertEquals(6, count)
@@ -884,7 +957,10 @@ class RestTests(unittest.TestCase):
def test_create_vm_with_img_based_template(self):
resp = json.loads(
-
self.request('/plugins/kimchi/storagepools/default-pool/storagevolumes').read())
+ self.request(
+ '/plugins/kimchi/storagepools/default-pool/storagevolumes'
+ ).read()
+ )
self.assertEquals(0, len(resp))
# Create a Template
@@ -902,7 +978,10 @@ class RestTests(unittest.TestCase):
# Test storage volume created with backing store of base file
resp = json.loads(
-
self.request('/plugins/kimchi/storagepools/default-pool/storagevolumes').read())
+ self.request(
+ '/plugins/kimchi/storagepools/default-pool/storagevolumes'
+ ).read()
+ )
self.assertEquals(1, len(resp))
def _create_pool(self, name):
@@ -922,13 +1001,15 @@ class RestTests(unittest.TestCase):
def _delete_pool(self, name):
# Delete the storage pool
- resp = self.request('/plugins/kimchi/storagepools/%s' % name,
'{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/storagepools/%s' % name,
'{}',
+ 'DELETE')
self.assertEquals(204, resp.status)
def test_iso_scan_shallow(self):
# fake environment preparation
self._create_pool('pool-3')
- self.request('/plugins/kimchi/storagepools/pool-3/activate',
'{}', 'POST')
+ self.request('/plugins/kimchi/storagepools/pool-3/activate',
'{}',
+ 'POST')
params = {'name': 'fedora.iso',
'capacity': 1073741824, # 1 GiB
'type': 'file',
@@ -936,8 +1017,11 @@ class RestTests(unittest.TestCase):
task_info = model.storagevolumes_create('pool-3', params)
wait_task(self._task_lookup, task_info['id'])
- storagevolume = json.loads(self.request(
-
'/plugins/kimchi/storagepools/kimchi_isos/storagevolumes/').read())[0]
+ storagevolume = json.loads(
+ self.request(
+ '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes/'
+ ).read()
+ )[0]
self.assertEquals('fedora.iso', storagevolume['name'])
self.assertEquals('iso', storagevolume['format'])
self.assertEquals('/var/lib/libvirt/images/fedora.iso',
@@ -966,19 +1050,25 @@ class RestTests(unittest.TestCase):
self.assertEquals(get_template_default('old', 'memory'),
t['memory'])
# Deactivate or destroy scan pool return 405
- resp =
self.request('/plugins/kimchi/storagepools/kimchi_isos/storagevolumes'
- '/deactivate', '{}', 'POST')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes'
+ '/deactivate', '{}', 'POST'
+ )
self.assertEquals(405, resp.status)
- resp =
self.request('/plugins/kimchi/storagepools/kimchi_isos/storagevolumes',
- '{}', 'DELETE')
+ resp = self.request(
+ '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes',
+ '{}', 'DELETE'
+ )
self.assertEquals(405, resp.status)
# Delete the template
- resp = self.request('/plugins/kimchi/templates/%s' % t['name'],
'{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/templates/%s' % t['name'],
'{}',
+ 'DELETE')
self.assertEquals(204, resp.status)
- resp = self.request('/plugins/kimchi/storagepools/pool-3/deactivate',
'{}', 'POST')
+ resp = self.request('/plugins/kimchi/storagepools/pool-3/deactivate',
+ '{}', 'POST')
self.assertEquals(200, resp.status)
self._delete_pool('pool-3')
@@ -1015,7 +1105,8 @@ class RestTests(unittest.TestCase):
lastMod2 = resp.getheader('last-modified')
self.assertEquals(lastMod2, lastMod1)
- resp = self.request('/plugins/kimchi/vms/test-vm/screenshot',
'{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/vms/test-vm/screenshot',
'{}',
+ 'DELETE')
self.assertEquals(405, resp.status)
# No screenshot after stopped the VM
@@ -1040,28 +1131,39 @@ class RestTests(unittest.TestCase):
self.assertEquals(sorted(keys), sorted(interface.keys()))
def _task_lookup(self, taskid):
- return json.loads(self.request('/plugins/kimchi/tasks/%s' %
taskid).read())
+ return json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % taskid).read()
+ )
def test_tasks(self):
- id1 = add_task('/plugins/kimchi/tasks/1', self._async_op,
model.objstore)
- id2 = add_task('/plugins/kimchi/tasks/2', self._except_op,
model.objstore)
- id3 = add_task('/plugins/kimchi/tasks/3', self._intermid_op,
model.objstore)
+ id1 = add_task('/plugins/kimchi/tasks/1', self._async_op,
+ model.objstore)
+ id2 = add_task('/plugins/kimchi/tasks/2', self._except_op,
+ model.objstore)
+ id3 = add_task('/plugins/kimchi/tasks/3', self._intermid_op,
+ model.objstore)
target_uri = urllib2.quote('^/tasks/*', safe="")
filter_data = 'status=running&target_uri=%s' % target_uri
- tasks = json.loads(self.request('/plugins/kimchi/tasks?%s' %
filter_data).read())
+ tasks = json.loads(
+ self.request('/plugins/kimchi/tasks?%s' % filter_data).read()
+ )
self.assertEquals(3, len(tasks))
tasks = json.loads(self.request('/plugins/kimchi/tasks').read())
tasks_ids = [int(t['id']) for t in tasks]
self.assertEquals(set([id1, id2, id3]) - set(tasks_ids), set([]))
wait_task(self._task_lookup, id2)
- foo2 = json.loads(self.request('/plugins/kimchi/tasks/%s' % id2).read())
+ foo2 = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % id2).read()
+ )
keys = ['id', 'status', 'message', 'target_uri']
self.assertEquals(sorted(keys), sorted(foo2.keys()))
self.assertEquals('failed', foo2['status'])
wait_task(self._task_lookup, id3)
- foo3 = json.loads(self.request('/plugins/kimchi/tasks/%s' % id3).read())
+ foo3 = json.loads(
+ self.request('/plugins/kimchi/tasks/%s' % id3).read()
+ )
self.assertEquals('in progress', foo3['message'])
self.assertEquals('running', foo3['status'])
@@ -1129,44 +1231,51 @@ class RestTests(unittest.TestCase):
self.assertEquals(200, resp.status)
def _report_delete(self, name):
- request(host, ssl_port, '/plugins/kimchi/debugreports/%s' % name,
'{}', 'DELETE')
+ request(host, ssl_port, '/plugins/kimchi/debugreports/%s' % name,
'{}',
+ 'DELETE')
def test_create_debugreport(self):
req = json.dumps({'name': 'report1'})
with RollbackContext() as rollback:
- resp = request(host, ssl_port, '/plugins/kimchi/debugreports', req,
'POST')
+ resp = request(host, ssl_port, '/plugins/kimchi/debugreports', req,
+ 'POST')
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
# make sure the debugreport doesn't exist until the
# the task is finished
wait_task(self._task_lookup, task['id'])
rollback.prependDefer(self._report_delete, 'report2')
- resp = request(host, ssl_port,
'/plugins/kimchi/debugreports/report1')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/debugreports/report1')
debugreport = json.loads(resp.read())
self.assertEquals("report1", debugreport['name'])
self.assertEquals(200, resp.status)
req = json.dumps({'name': 'report2'})
- resp = request(host, ssl_port,
'/plugins/kimchi/debugreports/report1',
- req, 'PUT')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/debugreports/report1', req,
'PUT')
self.assertEquals(303, resp.status)
def test_debugreport_download(self):
req = json.dumps({'name': 'report1'})
with RollbackContext() as rollback:
- resp = request(host, ssl_port, '/plugins/kimchi/debugreports', req,
'POST')
+ resp = request(host, ssl_port, '/plugins/kimchi/debugreports', req,
+ 'POST')
self.assertEquals(202, resp.status)
task = json.loads(resp.read())
# make sure the debugreport doesn't exist until the
# the task is finished
wait_task(self._task_lookup, task['id'], 20)
rollback.prependDefer(self._report_delete, 'report1')
- resp = request(host, ssl_port,
'/plugins/kimchi/debugreports/report1')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/debugreports/report1')
debugreport = json.loads(resp.read())
self.assertEquals("report1", debugreport['name'])
self.assertEquals(200, resp.status)
- resp = request(host, ssl_port,
'/plugins/kimchi/debugreports/report1/content')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/debugreports/report1/content')
self.assertEquals(200, resp.status)
- resp = request(host, ssl_port,
'/plugins/kimchi/debugreports/report1')
+ resp = request(host, ssl_port,
+ '/plugins/kimchi/debugreports/report1')
debugre = json.loads(resp.read())
resp = request(host, ssl_port, debugre['uri'])
self.assertEquals(200, resp.status)
diff --git a/plugins/kimchi/tests/test_template.py
b/plugins/kimchi/tests/test_template.py
index 39adeb1..c7de182 100644
--- a/plugins/kimchi/tests/test_template.py
+++ b/plugins/kimchi/tests/test_template.py
@@ -83,24 +83,30 @@ class TemplateTests(unittest.TestCase):
keys = ['name', 'icon', 'invalid', 'os_distro',
'os_version', 'cpus',
'memory', 'cdrom', 'disks',
'storagepool', 'networks',
'folder', 'graphics', 'cpu_info']
- tmpl = json.loads(self.request('/plugins/kimchi/templates/test').read())
+ tmpl = json.loads(
+ self.request('/plugins/kimchi/templates/test').read()
+ )
self.assertEquals(sorted(tmpl.keys()), sorted(keys))
# Verify if default disk format was configured
self.assertEquals(tmpl['disks'][0]['format'], 'qcow2')
# Clone a template
- resp = self.request('/plugins/kimchi/templates/test/clone', '{}',
'POST')
+ resp = self.request('/plugins/kimchi/templates/test/clone',
'{}',
+ 'POST')
self.assertEquals(303, resp.status)
# Verify the cloned template
- tmpl_cloned =
json.loads(self.request('/plugins/kimchi/templates/test-clone1').read())
+ tmpl_cloned = json.loads(
+ self.request('/plugins/kimchi/templates/test-clone1').read()
+ )
del tmpl['name']
del tmpl_cloned['name']
self.assertEquals(tmpl, tmpl_cloned)
# Delete the cloned template
- resp = self.request('/plugins/kimchi/templates/test-clone1',
'{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/templates/test-clone1',
'{}',
+ 'DELETE')
self.assertEquals(204, resp.status)
# Create a template with same name fails with 400
@@ -110,7 +116,8 @@ class TemplateTests(unittest.TestCase):
# Create an image based template
open('/tmp/mock.img', 'w').close()
- t = {'name': 'test_img_template', 'disks':
[{'base': '/tmp/mock.img'}]}
+ t = {'name': 'test_img_template',
+ 'disks': [{'base': '/tmp/mock.img'}]}
req = json.dumps(t)
resp = self.request('/plugins/kimchi/templates', req, 'POST')
self.assertEquals(201, resp.status)
@@ -122,7 +129,9 @@ class TemplateTests(unittest.TestCase):
req = json.dumps(t)
resp = self.request('/plugins/kimchi/templates', req, 'POST')
self.assertEquals(201, resp.status)
- tmpl =
json.loads(self.request('/plugins/kimchi/templates/test-format').read())
+ tmpl = json.loads(
+ self.request('/plugins/kimchi/templates/test-format').read()
+ )
self.assertEquals(tmpl['disks'][0]['format'], 'vmdk')
def test_customized_tmpl(self):
@@ -131,11 +140,14 @@ class TemplateTests(unittest.TestCase):
req = json.dumps(t)
resp = self.request('/plugins/kimchi/templates', req, 'POST')
self.assertEquals(201, resp.status)
- tmpl = json.loads(self.request('/plugins/kimchi/templates/test').read())
+ tmpl = json.loads(
+ self.request('/plugins/kimchi/templates/test').read()
+ )
# Update name
new_name = u'kīмсhīTmpl'
- new_tmpl_uri = '/plugins/kimchi/templates/%s' %
new_name.encode('utf-8')
+ new_tmpl_uri = '/plugins/kimchi/templates/%s' \
+ % new_name.encode('utf-8')
req = json.dumps({'name': new_name})
resp = self.request('/plugins/kimchi/templates/test', req,
'PUT')
self.assertEquals(303, resp.status)
@@ -257,7 +269,9 @@ class TemplateTests(unittest.TestCase):
# Verify the current system has at least one interface to create a
# bridged network
- interfaces =
json.loads(self.request('/plugins/kimchi/interfaces?type=nic').read())
+ interfaces = json.loads(
+ self.request('/plugins/kimchi/interfaces?type=nic').read()
+ )
if len(interfaces) > 0:
iface = interfaces[0]['name']
networks.append({'name': u'bridge-network',
'connection': 'bridge',
@@ -281,11 +295,15 @@ class TemplateTests(unittest.TestCase):
self.assertEquals(201, resp.status)
# MockModel always returns 2 partitions (vdx, vdz)
- partitions =
json.loads(self.request('/plugins/kimchi/host/partitions').read())
+ partitions = json.loads(
+ self.request('/plugins/kimchi/host/partitions').read()
+ )
devs = [dev['path'] for dev in partitions]
# MockModel always returns 3 FC devices
- fc_devs =
json.loads(self.request('/plugins/kimchi/host/devices?_cap=fc_host').read())
+ fc_devs = json.loads(
+ self.request('/plugins/kimchi/host/devices?_cap=fc_host').read()
+ )
fc_devs = [dev['name'] for dev in fc_devs]
poolDefs = [
@@ -303,8 +321,10 @@ class TemplateTests(unittest.TestCase):
'source': {'devices': [devs[0]]}}]
for pool in poolDefs:
- self.request('/plugins/kimchi/storagepools', json.dumps(pool),
'POST')
- pool_uri = '/plugins/kimchi/storagepools/%s' %
pool['name'].encode('utf-8')
+ self.request('/plugins/kimchi/storagepools', json.dumps(pool),
+ 'POST')
+ pool_uri = '/plugins/kimchi/storagepools/%s' \
+ % pool['name'].encode('utf-8')
self.request(pool_uri + '/activate', '{}', 'POST')
req = None
@@ -319,7 +339,8 @@ class TemplateTests(unittest.TestCase):
req = json.dumps({'storagepool': pool_uri})
if req is not None:
- resp = self.request('/plugins/kimchi/templates/test', req,
'PUT')
+ resp = self.request('/plugins/kimchi/templates/test', req,
+ 'PUT')
self.assertEquals(200, resp.status)
def test_tmpl_integrity(self):
@@ -329,7 +350,8 @@ class TemplateTests(unittest.TestCase):
pool = {'type': 'dir', 'name': 'dir-pool',
'path': '/tmp/dir-pool'}
self.request('/plugins/kimchi/storagepools', json.dumps(pool),
'POST')
- pool_uri = '/plugins/kimchi/storagepools/%s' %
pool['name'].encode('utf-8')
+ pool_uri = '/plugins/kimchi/storagepools/%s' \
+ % pool['name'].encode('utf-8')
self.request(pool_uri + '/activate', '{}', 'POST')
# Create a template using the custom network and pool
@@ -342,19 +364,22 @@ class TemplateTests(unittest.TestCase):
# Try to delete network
# It should fail as it is associated to a template
- resp = self.request('/plugins/kimchi/networks/nat-network', '{}',
'DELETE')
+ resp = self.request('/plugins/kimchi/networks/nat-network',
'{}',
+ 'DELETE')
self.assertIn("KCHNET0017E",
json.loads(resp.read())["reason"])
# Update template to release network and then delete it
params = {'networks': []}
req = json.dumps(params)
self.request('/plugins/kimchi/templates/test', req, 'PUT')
- resp = self.request('/plugins/kimchi/networks/nat-network', '{}',
'DELETE')
+ resp = self.request('/plugins/kimchi/networks/nat-network',
'{}',
+ 'DELETE')
self.assertEquals(204, resp.status)
# Try to delete the storagepool
# It should fail as it is associated to a template
- resp = self.request('/plugins/kimchi/storagepools/dir-pool',
'{}', 'DELETE')
+ resp = self.request('/plugins/kimchi/storagepools/dir-pool',
'{}',
+ 'DELETE')
self.assertEquals(400, resp.status)
# Verify the template
diff --git a/plugins/kimchi/tests/utils.py b/plugins/kimchi/tests/utils.py
index f674164..f80b14f 100644
--- a/plugins/kimchi/tests/utils.py
+++ b/plugins/kimchi/tests/utils.py
@@ -167,8 +167,8 @@ def get_remote_iso_path():
"""
host_arch = os.uname()[4]
remote_path = ''
- with open(os.path.join(PluginPaths('kimchi').conf_dir, 'distros.d',
'fedora.json')) \
- as fedora_isos:
+ with open(os.path.join(PluginPaths('kimchi').conf_dir, 'distros.d',
+ 'fedora.json')) as fedora_isos:
# Get a list of dicts
json_isos_list = json.load(fedora_isos)
for iso in json_isos_list:
@@ -228,12 +228,12 @@ def wait_task(task_lookup, taskid, timeout=10):
task_info = task_lookup(taskid)
if task_info['status'] == "running":
wok_log.info("Waiting task %s, message: %s",
- taskid, task_info['message'])
+ taskid, task_info['message'])
time.sleep(1)
else:
return
wok_log.error("Timeout while process long-run task, "
- "try to increase timeout value.")
+ "try to increase timeout value.")
# The action functions in model backend raise NotFoundError exception if the
diff --git a/plugins/kimchi/utils.py b/plugins/kimchi/utils.py
index dc00481..92ca83a 100644
--- a/plugins/kimchi/utils.py
+++ b/plugins/kimchi/utils.py
@@ -37,4 +37,3 @@ def template_name_from_uri(uri):
def pool_name_from_uri(uri):
return _uri_to_name('storagepools', uri)
-
diff --git a/src/wok/cachebust.py b/src/wok/cachebust.py
index d04b2c6..26cb232 100644
--- a/src/wok/cachebust.py
+++ b/src/wok/cachebust.py
@@ -23,7 +23,7 @@ import os
from wok.config import paths, PluginPaths
-def href(url, plugin = None):
+def href(url, plugin=None):
if plugin is None:
basePath = paths.ui_dir
else:
diff --git a/src/wok/server.py b/src/wok/server.py
index 5b48049..e18ecb5 100644
--- a/src/wok/server.py
+++ b/src/wok/server.py
@@ -75,8 +75,8 @@ class Server(object):
# thus it is safe to unsubscribe.
cherrypy.engine.timeout_monitor.unsubscribe()
cherrypy.tools.nocache = cherrypy.Tool('on_end_resource', set_no_cache)
- cherrypy.tools.wokauth = cherrypy.Tool('before_handler',
- auth.wokauth)
+ cherrypy.tools.wokauth = cherrypy.Tool('before_handler', auth.wokauth)
+
# Setting host to 127.0.0.1. This makes wok run
# as a localhost app, inaccessible to the outside
# directly. You must go through the proxy.
@@ -169,11 +169,12 @@ class Server(object):
# dynamically add tools.wokauth.on = True to extra plugin APIs
if extra_auth:
try:
- authed_apis = import_class(('plugins.%s.%s' % (plugin_name,
- extra_auth)))
+ authed_apis = import_class(('plugins.%s.%s' %
+ (plugin_name, extra_auth)))
except ImportError:
cherrypy.log.error_log.error("Failed to import subnodes "
- "for plugin %s" %
plugin_class)
+ "for plugin %s" %
+ plugin_class)
continue
urlSubNodes = {}
diff --git a/src/wok/utils.py b/src/wok/utils.py
index a5083af..8d51f22 100644
--- a/src/wok/utils.py
+++ b/src/wok/utils.py
@@ -191,10 +191,10 @@ def run_command(cmd, timeout=None):
if proc.returncode != 0:
wok_log.error("rc: %s error: %s returned from cmd: %s",
- proc.returncode, error, ' '.join(cmd))
+ proc.returncode, error, ' '.join(cmd))
elif error:
wok_log.debug("error: %s returned from cmd: %s",
- error, ' '.join(cmd))
+ error, ' '.join(cmd))
if timeout_flag[0]:
msg = ("subprocess is killed by signal.SIGKILL for "
@@ -240,7 +240,7 @@ def patch_find_nfs_target(nfs_server):
out = run_command(cmd, 10)[0]
except TimeoutExpired:
wok_log.warning("server %s query timeout, may not have any path "
- "exported", nfs_server)
+ "exported", nfs_server)
return list()
targets = parse_cmd_output(out, output_items=['target'])
--
1.7.1