[PATCH V4 0/5] Storagepool SCSI/FC

V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window V2: - Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!) Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-) -- 1.8.5.3

In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model) @property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') + wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn} -- 1.8.5.3

On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support:
From where this self.fc_host_support comes from?
+ ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') + wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn}

On 02/10/2014 11:28 AM, Aline Manera wrote:
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support:
From where this self.fc_host_support comes from? Humm, good catch, I did not get this problem in tests because _get_resources in src/kimchi/control/base.py hides the problem returning "[]" Fixing.
+ ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') + wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn}
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') + wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn}
You also need to update API.md to add Devices/Device Collection and Resource

On 02/05/2014 10:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') capability/capability/ double capability?
+ wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn} use lxml?
from lxml import objectify root = objectify.fromstring(dev_xml) cap = root.capability.capability return { 'adapter_type': cap.get("type "), "wwnn": cap.wwnn, "wwpn": cap.wwpn } -- Thanks and best regards! Sheldon Feng(冯少合)<shaohef@linux.vnet.ibm.com> IBM Linux Technology Center

Just a minor comment below On 02/10/2014 11:28 PM, Sheldon wrote:
On 02/05/2014 10:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') capability/capability/ double capability?
+ wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') so this DeviceModel is not only for 'fc_host', right? Then other Device may not have wwnn and wwpn. + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn} use lxml?
from lxml import objectify
root = objectify.fromstring(dev_xml) cap = root.capability.capability return { 'adapter_type': cap.get("type "), "wwnn": cap.wwnn, "wwpn": cap.wwpn }
-- Thanks and best regards! Sheldon Feng(冯少合)<shaohef@linux.vnet.ibm.com> IBM Linux Technology Center

On 02/10/2014 01:39 PM, Sheldon wrote:
Just a minor comment below
On 02/10/2014 11:28 PM, Sheldon wrote:
On 02/05/2014 10:18 PM, Rodrigo Trujillo wrote:
In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model)
@property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index a3d9e38..0545a88 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask
from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') capability/capability/ double capability?
+ wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') so this DeviceModel is not only for 'fc_host', right? Then other Device may not have wwnn and wwpn. Yeap you are right... at this moment, the NodeDevices is going to support only the scsi_host type, which is the device that provides fc_host functionality. As kimchi evolves we can increase the support. wwpn and wwnn will be "" if not present.
+ wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn} As my code is working, I think it is better to change this later. If xmlutils.xpath_get_text should not be used, then we need to refactor all the kimchi code.
use lxml?
from lxml import objectify
root = objectify.fromstring(dev_xml) cap = root.capability.capability return { 'adapter_type': cap.get("type "), "wwnn": cap.wwnn, "wwpn": cap.wwpn }

This patch creates functions that allow kimchi users to create an libvirt SCSI storagepool using the rest API. This patch creates the feature test to check fc_host capability in libvirt. This patch implements basic routines to add a disk (scsi) to a new vm template, based on given volumes (LUN name) from UI or API directly. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- docs/API.md | 5 +++- src/kimchi/API.json | 14 ++++++++-- src/kimchi/featuretests.py | 27 +++++++++++++++++++ src/kimchi/model/config.py | 5 +++- src/kimchi/model/host.py | 4 +-- src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++++++++++++++-- src/kimchi/model/storagepools.py | 22 +++++++++++++--- src/kimchi/model/templates.py | 5 ++++ src/kimchi/model/vms.py | 25 ++++++++++++++++-- src/kimchi/vmtemplate.py | 31 +++++++++++++++++++++- 10 files changed, 172 insertions(+), 14 deletions(-) diff --git a/docs/API.md b/docs/API.md index 580728c..7f0628d 100644 --- a/docs/API.md +++ b/docs/API.md @@ -55,6 +55,8 @@ the following general conventions: Independent Computing Environments * null: Graphics is disabled or type not supported * listen: The network which the vnc/spice server listens on. + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as + disk to VM. Required if pool is type SCSI. ### Resource: Virtual Machine @@ -269,7 +271,7 @@ A interface represents available network interface on VM. * **POST**: Create a new Storage Pool * name: The name of the Storage Pool. * type: The type of the defined Storage Pool. - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' * path: The path of the defined Storage Pool. For 'kimchi-iso' pool refers to targeted deep scan path. Pool types: 'dir', 'kimchi-iso'. @@ -288,6 +290,7 @@ A interface represents available network interface on VM. Pool types: 'iscsi'. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. + * adapter_name: *(optional) Scsi host name. ### Resource: Storage Pool diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 08c77c5..842fb11 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -37,7 +37,7 @@ "type": { "description": "The type of the defined Storage Pool", "type": "string", - "pattern": "^dir|netfs|logical|kimchi-iso$", + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$", "required": true }, "path": { @@ -76,6 +76,10 @@ "minimum": 1, "maximum": 65535 }, + "adapter_name": { + "description": "SCSI host name", + "type": "string" + }, "auth": { "description": "Storage back-end authentication information", "type": "object", @@ -112,7 +116,13 @@ "type": "string", "pattern": "^/storagepools/[^/]+/?$" }, - "graphics": { "$ref": "#/kimchitype/graphics" } + "graphics": { "$ref": "#/kimchitype/graphics" }, + "volumes": { + "description": "list of scsi volumes to be assigned to the new VM.", + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true + } } }, "vm_update": { diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py index d924050..f391eb6 100644 --- a/src/kimchi/featuretests.py +++ b/src/kimchi/featuretests.py @@ -57,6 +57,18 @@ ISO_STREAM_XML = """ </devices> </domain>""" +SCSI_FC_XML = """ +<pool type='scsi'> + <name>TEST_SCSI_FC_POOL</name> + <source> + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> +</pool> +""" + class FeatureTests(object): @@ -150,3 +162,18 @@ class FeatureTests(object): return False return True + + @staticmethod + def libvirt_support_fc_host(): + try: + conn = libvirt.open('qemu:///system') + pool = None + pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) + except libvirt.libvirtError as e: + if e.get_error_code() == 27: + # Libvirt requires adapter name, not needed when supports to FC + return False + finally: + pool is None or pool.undefine() + conn is None or conn.close() + return True diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py index 0e66e02..6eb0e10 100644 --- a/src/kimchi/model/config.py +++ b/src/kimchi/model/config.py @@ -49,6 +49,7 @@ class CapabilitiesModel(object): self.qemu_stream = False self.qemu_stream_dns = False self.libvirt_stream_protocols = [] + self.fc_host_support = False # Subscribe function to set host capabilities to be run when cherrypy # server is up @@ -60,6 +61,7 @@ class CapabilitiesModel(object): self.qemu_stream = FeatureTests.qemu_supports_iso_stream() self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() + self.fc_host_support = FeatureTests.libvirt_support_fc_host() self.libvirt_stream_protocols = [] for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: @@ -75,7 +77,8 @@ class CapabilitiesModel(object): return {'libvirt_stream_protocols': self.libvirt_stream_protocols, 'qemu_stream': self.qemu_stream, 'screenshot': VMScreenshot.get_stream_test_result(), - 'system_report_tool': bool(report_tool)} + 'system_report_tool': bool(report_tool), + 'fc_host_support': self.fc_host_support} class DistrosModel(object): diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index 0545a88..816e2e8 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -218,7 +218,7 @@ class DevicesModel(object): return dev_names def _get_devices_fc_host(self): - conn = self.conn.get() + conn = self.conn.get() # Libvirt < 1.0.5 does not support fc_host capability if not self.fc_host_support: ret = [] @@ -226,7 +226,7 @@ class DevicesModel(object): for host in scsi_hosts: xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) path = '/device/capability/capability/@type' - if 'fc_host' in xmlutils.xpath_get_text(xml, path): + if 'fc_host' in xmlutils.xpath_get_text(xml, path): ret.append(host) return ret return conn.listDevices('fc_host',0) diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py index f4dbf2e..ceedbde 100644 --- a/src/kimchi/model/libvirtstoragepool.py +++ b/src/kimchi/model/libvirtstoragepool.py @@ -29,8 +29,7 @@ import libvirt from kimchi.exception import InvalidParameter, OperationFailed, TimeoutExpired from kimchi.iscsi import TargetClient from kimchi.rollbackcontext import RollbackContext -from kimchi.utils import parse_cmd_output, run_command - +from kimchi.utils import kimchi_log, parse_cmd_output, run_command class StoragePoolDef(object): @classmethod @@ -175,6 +174,51 @@ class LogicalPoolDef(StoragePoolDef): return xml +class ScsiPoolDef(StoragePoolDef): + poolType = 'scsi' + + def prepare(self, conn=None): + tmp_name = self.poolArgs['source']['name'] + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','') + # fc_host adapters type are only available in libvirt >= 1.0.5 + if not self.poolArgs['fc_host_support']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ + "setting SCSI adapter type as 'scsi_host'; "\ + "ignoring wwnn and wwpn." %tmp_name + kimchi_log.info(msg) + # Path for Fibre Channel scsi hosts + self.poolArgs['path'] = '/dev/disk/by-path' + if not self.poolArgs['source']['adapter_type']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + + @property + def xml(self): + # Required parameters + # name: + # source[adapter_type]: + # source[name]: + # source[wwnn]: + # source[wwpn]: + # path: + + xml = """ + <pool type='scsi'> + <name>{name}</name> + <source> + <adapter type='{source[adapter_type]}'\ + name='{source[name]}'\ + wwnn='{source[wwnn]}'\ + wwpn='{source[wwpn]}'/> + </source> + <target> + <path>{path}</path> + </target> + </pool> + """.format(**self.poolArgs) + return xml + + class IscsiPoolDef(StoragePoolDef): poolType = 'iscsi' diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py index 233a8a7..9be7dad 100644 --- a/src/kimchi/model/storagepools.py +++ b/src/kimchi/model/storagepools.py @@ -26,6 +26,8 @@ from kimchi import xmlutils from kimchi.scan import Scanner from kimchi.exception import InvalidOperation, MissingParameter from kimchi.exception import NotFoundError, OperationFailed +from kimchi.model.config import CapabilitiesModel +from kimchi.model.host import DeviceModel from kimchi.model.libvirtstoragepool import StoragePoolDef from kimchi.utils import add_task, kimchi_log @@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', 4: 'inaccessible'} STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}} + 'path': '/pool/source/dir/@path'}, + 'scsi': {'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn'}} class StoragePoolsModel(object): @@ -47,6 +53,8 @@ class StoragePoolsModel(object): self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() + self.caps = CapabilitiesModel() + self.device = DeviceModel(**kargs) def get_list(self): try: @@ -67,6 +75,13 @@ class StoragePoolsModel(object): if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) + + if params['type'] == 'scsi': + extra_params = self.device.lookup( + params['source']['adapter_name']) + params['source'].update(extra_params) + params['fc_host_support'] = self.caps.fc_host_support + poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml @@ -84,9 +99,10 @@ class StoragePoolsModel(object): return name pool = conn.storagePoolDefineXML(xml, 0) - if params['type'] in ['logical', 'dir', 'netfs']: + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) - # autostart dir and logical storage pool created from kimchi + # autostart dir, logical, netfs and scsi storage pools created + # from kimchi pool.setAutostart(1) else: # disable autostart for others diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py index 03632a6..b004578 100644 --- a/src/kimchi/model/templates.py +++ b/src/kimchi/model/templates.py @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xmlutils.xpath_get_text(xml, "/pool/target/path")[0] + def _get_storage_type(self): + pool = self._storage_validate() + xml = pool.XMLDesc(0) + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] + def fork_vm_storage(self, vm_uuid): # Provision storage: # TODO: Rebase on the storage API once upstream diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index d4384a1..4623e28 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -155,6 +155,11 @@ class VMsModel(object): 'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB}) + def _get_volume_path(self, pool, vol): + conn = self.conn.get() + pool = conn.storagePoolLookupByName(pool) + return pool.storageVolLookupByName(vol).path() + def create(self, params): conn = self.conn.get() t_name = template_name_from_uri(params['template']) @@ -169,6 +174,7 @@ class VMsModel(object): pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri + vm_overrides['fc_host_support'] = self.caps.fc_host_support t = TemplateModel.get_template(t_name, self.objstore, self.conn, vm_overrides) @@ -177,7 +183,21 @@ class VMsModel(object): raise InvalidOperation(err) t.validate() - vol_list = t.fork_vm_storage(vm_uuid) + + # If storagepool is SCSI, volumes will be LUNs and must be passed by + # the user from UI or manually. + vol_list = [] + if t._get_storage_type() == 'scsi': + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + else: + # Get system path of the LUNs + pool = t.info['storagepool'].split('/')[-1] + for vol in params.get('volumes'): + path = self._get_volume_path(pool, vol) + vol_list.append((vol, path)) + else: + vol_list = t.fork_vm_storage(vm_uuid) # Store the icon for displaying later icon = t.info.get('icon') @@ -193,7 +213,8 @@ class VMsModel(object): xml = t.to_vm_xml(name, vm_uuid, libvirt_stream=libvirt_stream, qemu_stream_dns=self.caps.qemu_stream_dns, - graphics=graphics) + graphics=graphics, + volumes=vol_list) try: conn.defineXML(xml.encode('utf-8')) diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py index 58147e3..368d0b4 100644 --- a/src/kimchi/vmtemplate.py +++ b/src/kimchi/vmtemplate.py @@ -49,6 +49,7 @@ class VMTemplate(object): """ self.name = args['name'] self.info = {} + self.fc_host_support = args.get('fc_host_support') # Identify the cdrom if present iso_distro = iso_version = 'unknown' @@ -180,6 +181,25 @@ class VMTemplate(object): graphics_xml = graphics_xml + spicevmc_xml return graphics_xml + def _get_scsi_disks_xml(self, luns): + ret = "" + # Passthrough configuration + disk_xml = """ + <disk type='volume' device='lun'> + <driver name='qemu' type='raw'/> + <source dev='%(src)s'/> + <target dev='%(dev)s' bus='scsi'/> + </disk>""" + if not self.fc_host_support: + disk_xml = disk_xml.replace('volume','block') + + # Creating disk xml for each lun passed + for index,(lun, path) in enumerate(luns): + dev = "sd%s" % string.lowercase[index] + params = {'src': path, 'dev': dev} + ret = ret + disk_xml % params + return ret + def to_volume_list(self, vm_uuid): storage_path = self._get_storage_path() ret = [] @@ -225,7 +245,6 @@ class VMTemplate(object): params = dict(self.info) params['name'] = vm_name params['uuid'] = vm_uuid - params['disks'] = self._get_disks_xml(vm_uuid) params['networks'] = self._get_networks_xml() params['qemu-namespace'] = '' params['cdroms'] = '' @@ -233,6 +252,13 @@ class VMTemplate(object): graphics = kwargs.get('graphics') params['graphics'] = self._get_graphics_xml(graphics) + # Current implementation just allows to create disk in one single + # storage pool, so we cannot mix the types (scsi volumes vs img file) + if self._get_storage_type() == 'scsi': + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) + else: + params['disks'] = self._get_disks_xml(vm_uuid) + qemu_stream_dns = kwargs.get('qemu_stream_dns', False) libvirt_stream = kwargs.get('libvirt_stream', False) cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) @@ -292,3 +318,6 @@ class VMTemplate(object): def _get_storage_path(self): return '' + + def _get_storage_type(self): + return '' -- 1.8.5.3

On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch creates functions that allow kimchi users to create an libvirt SCSI storagepool using the rest API. This patch creates the feature test to check fc_host capability in libvirt. This patch implements basic routines to add a disk (scsi) to a new vm template, based on given volumes (LUN name) from UI or API directly.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- docs/API.md | 5 +++- src/kimchi/API.json | 14 ++++++++-- src/kimchi/featuretests.py | 27 +++++++++++++++++++ src/kimchi/model/config.py | 5 +++- src/kimchi/model/host.py | 4 +-- src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++++++++++++++-- src/kimchi/model/storagepools.py | 22 +++++++++++++--- src/kimchi/model/templates.py | 5 ++++ src/kimchi/model/vms.py | 25 ++++++++++++++++-- src/kimchi/vmtemplate.py | 31 +++++++++++++++++++++- 10 files changed, 172 insertions(+), 14 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 580728c..7f0628d 100644 --- a/docs/API.md +++ b/docs/API.md @@ -55,6 +55,8 @@ the following general conventions: Independent Computing Environments * null: Graphics is disabled or type not supported * listen: The network which the vnc/spice server listens on. + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as + disk to VM. Required if pool is type SCSI.
### Resource: Virtual Machine @@ -269,7 +271,7 @@ A interface represents available network interface on VM. * **POST**: Create a new Storage Pool * name: The name of the Storage Pool. * type: The type of the defined Storage Pool. - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' * path: The path of the defined Storage Pool. For 'kimchi-iso' pool refers to targeted deep scan path. Pool types: 'dir', 'kimchi-iso'. @@ -288,6 +290,7 @@ A interface represents available network interface on VM. Pool types: 'iscsi'. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. + * adapter_name: *(optional) Scsi host name.
### Resource: Storage Pool
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 08c77c5..842fb11 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -37,7 +37,7 @@ "type": { "description": "The type of the defined Storage Pool", "type": "string", - "pattern": "^dir|netfs|logical|kimchi-iso$", + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$", "required": true }, "path": { @@ -76,6 +76,10 @@ "minimum": 1, "maximum": 65535 }, + "adapter_name": { + "description": "SCSI host name", + "type": "string" + }, "auth": { "description": "Storage back-end authentication information", "type": "object", @@ -112,7 +116,13 @@ "type": "string", "pattern": "^/storagepools/[^/]+/?$" }, - "graphics": { "$ref": "#/kimchitype/graphics" } + "graphics": { "$ref": "#/kimchitype/graphics" },
+ "volumes": { + "description": "list of scsi volumes to be assigned to the new VM.", + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true
Does it a single value? At least from UI I am just able to select a single LUN to create the vm
+ } } }, "vm_update": { diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py index d924050..f391eb6 100644 --- a/src/kimchi/featuretests.py +++ b/src/kimchi/featuretests.py @@ -57,6 +57,18 @@ ISO_STREAM_XML = """ </devices> </domain>"""
+SCSI_FC_XML = """ +<pool type='scsi'> + <name>TEST_SCSI_FC_POOL</name> + <source> + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> +</pool> +""" +
class FeatureTests(object):
@@ -150,3 +162,18 @@ class FeatureTests(object): return False
return True + + @staticmethod + def libvirt_support_fc_host(): + try: + conn = libvirt.open('qemu:///system')
+ pool = None
You can remove the above line
+ pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) + except libvirt.libvirtError as e: + if e.get_error_code() == 27: + # Libvirt requires adapter name, not needed when supports to FC + return False + finally: + pool is None or pool.undefine() + conn is None or conn.close() + return True
Did you run it when libvirt does not support fc_host? Does libvirt display some error message in this case? If so it would be good to silence the errors by disable_screen_error_logging() enable_screen_error_logging() The same mechanism used for iso streaming tests
diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py index 0e66e02..6eb0e10 100644 --- a/src/kimchi/model/config.py +++ b/src/kimchi/model/config.py @@ -49,6 +49,7 @@ class CapabilitiesModel(object): self.qemu_stream = False self.qemu_stream_dns = False self.libvirt_stream_protocols = [] + self.fc_host_support = False
# Subscribe function to set host capabilities to be run when cherrypy # server is up @@ -60,6 +61,7 @@ class CapabilitiesModel(object): self.qemu_stream = FeatureTests.qemu_supports_iso_stream() self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() + self.fc_host_support = FeatureTests.libvirt_support_fc_host()
self.libvirt_stream_protocols = [] for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: @@ -75,7 +77,8 @@ class CapabilitiesModel(object): return {'libvirt_stream_protocols': self.libvirt_stream_protocols, 'qemu_stream': self.qemu_stream, 'screenshot': VMScreenshot.get_stream_test_result(), - 'system_report_tool': bool(report_tool)} + 'system_report_tool': bool(report_tool),
+ 'fc_host_support': self.fc_host_support}
It is only used in backend. There is no need to expose it to API
class DistrosModel(object): diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index 0545a88..816e2e8 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -218,7 +218,7 @@ class DevicesModel(object): return dev_names
def _get_devices_fc_host(self): - conn = self.conn.get() + conn = self.conn.get() # Libvirt < 1.0.5 does not support fc_host capability if not self.fc_host_support: ret = [] @@ -226,7 +226,7 @@ class DevicesModel(object): for host in scsi_hosts: xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) path = '/device/capability/capability/@type' - if 'fc_host' in xmlutils.xpath_get_text(xml, path): + if 'fc_host' in xmlutils.xpath_get_text(xml, path): ret.append(host) return ret
Please, join those changes when you modify the file at first time
return conn.listDevices('fc_host',0) diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py index f4dbf2e..ceedbde 100644 --- a/src/kimchi/model/libvirtstoragepool.py +++ b/src/kimchi/model/libvirtstoragepool.py @@ -29,8 +29,7 @@ import libvirt from kimchi.exception import InvalidParameter, OperationFailed, TimeoutExpired from kimchi.iscsi import TargetClient from kimchi.rollbackcontext import RollbackContext -from kimchi.utils import parse_cmd_output, run_command - +from kimchi.utils import kimchi_log, parse_cmd_output, run_command
class StoragePoolDef(object): @classmethod @@ -175,6 +174,51 @@ class LogicalPoolDef(StoragePoolDef): return xml
+class ScsiPoolDef(StoragePoolDef): + poolType = 'scsi' + + def prepare(self, conn=None): + tmp_name = self.poolArgs['source']['name'] + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','')
From API.md the SCSI host will be in "adapter_name" instead of "name"
+ # fc_host adapters type are only available in libvirt >= 1.0.5 + if not self.poolArgs['fc_host_support']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ + "setting SCSI adapter type as 'scsi_host'; "\ + "ignoring wwnn and wwpn." %tmp_name + kimchi_log.info(msg) + # Path for Fibre Channel scsi hosts + self.poolArgs['path'] = '/dev/disk/by-path'
+ if not self.poolArgs['source']['adapter_type']: + self.poolArgs['source']['adapter_type'] = 'scsi_host'
When do you set the adapter_type to fc_host?
+ + @property + def xml(self): + # Required parameters + # name: + # source[adapter_type]: + # source[name]: + # source[wwnn]: + # source[wwpn]: + # path: + + xml = """ + <pool type='scsi'> + <name>{name}</name> + <source> + <adapter type='{source[adapter_type]}'\ + name='{source[name]}'\ + wwnn='{source[wwnn]}'\ + wwpn='{source[wwpn]}'/> + </source> + <target> + <path>{path}</path> + </target> + </pool> + """.format(**self.poolArgs) + return xml + + class IscsiPoolDef(StoragePoolDef): poolType = 'iscsi'
diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py index 233a8a7..9be7dad 100644 --- a/src/kimchi/model/storagepools.py +++ b/src/kimchi/model/storagepools.py @@ -26,6 +26,8 @@ from kimchi import xmlutils from kimchi.scan import Scanner from kimchi.exception import InvalidOperation, MissingParameter from kimchi.exception import NotFoundError, OperationFailed +from kimchi.model.config import CapabilitiesModel +from kimchi.model.host import DeviceModel from kimchi.model.libvirtstoragepool import StoragePoolDef from kimchi.utils import add_task, kimchi_log
@@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', 4: 'inaccessible'}
STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}} + 'path': '/pool/source/dir/@path'}, + 'scsi': {'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn'}}
class StoragePoolsModel(object): @@ -47,6 +53,8 @@ class StoragePoolsModel(object): self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() + self.caps = CapabilitiesModel() + self.device = DeviceModel(**kargs)
def get_list(self): try: @@ -67,6 +75,13 @@ class StoragePoolsModel(object):
if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) + + if params['type'] == 'scsi': + extra_params = self.device.lookup( + params['source']['adapter_name']) + params['source'].update(extra_params) + params['fc_host_support'] = self.caps.fc_host_support + poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml @@ -84,9 +99,10 @@ class StoragePoolsModel(object): return name
pool = conn.storagePoolDefineXML(xml, 0) - if params['type'] in ['logical', 'dir', 'netfs']: + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) - # autostart dir and logical storage pool created from kimchi + # autostart dir, logical, netfs and scsi storage pools created + # from kimchi pool.setAutostart(1) else: # disable autostart for others diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py index 03632a6..b004578 100644 --- a/src/kimchi/model/templates.py +++ b/src/kimchi/model/templates.py @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xmlutils.xpath_get_text(xml, "/pool/target/path")[0]
+ def _get_storage_type(self): + pool = self._storage_validate() + xml = pool.XMLDesc(0) + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] + def fork_vm_storage(self, vm_uuid): # Provision storage: # TODO: Rebase on the storage API once upstream diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index d4384a1..4623e28 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -155,6 +155,11 @@ class VMsModel(object): 'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB})
+ def _get_volume_path(self, pool, vol): + conn = self.conn.get() + pool = conn.storagePoolLookupByName(pool) + return pool.storageVolLookupByName(vol).path() + def create(self, params): conn = self.conn.get() t_name = template_name_from_uri(params['template']) @@ -169,6 +174,7 @@ class VMsModel(object): pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri + vm_overrides['fc_host_support'] = self.caps.fc_host_support t = TemplateModel.get_template(t_name, self.objstore, self.conn, vm_overrides)
@@ -177,7 +183,21 @@ class VMsModel(object): raise InvalidOperation(err)
t.validate() - vol_list = t.fork_vm_storage(vm_uuid) + + # If storagepool is SCSI, volumes will be LUNs and must be passed by + # the user from UI or manually. + vol_list = [] + if t._get_storage_type() == 'scsi': + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + else: + # Get system path of the LUNs + pool = t.info['storagepool'].split('/')[-1] + for vol in params.get('volumes'): + path = self._get_volume_path(pool, vol) + vol_list.append((vol, path)) + else: + vol_list = t.fork_vm_storage(vm_uuid)
# Store the icon for displaying later icon = t.info.get('icon') @@ -193,7 +213,8 @@ class VMsModel(object): xml = t.to_vm_xml(name, vm_uuid, libvirt_stream=libvirt_stream, qemu_stream_dns=self.caps.qemu_stream_dns, - graphics=graphics) + graphics=graphics, + volumes=vol_list)
try: conn.defineXML(xml.encode('utf-8')) diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py index 58147e3..368d0b4 100644 --- a/src/kimchi/vmtemplate.py +++ b/src/kimchi/vmtemplate.py @@ -49,6 +49,7 @@ class VMTemplate(object): """ self.name = args['name'] self.info = {} + self.fc_host_support = args.get('fc_host_support')
# Identify the cdrom if present iso_distro = iso_version = 'unknown' @@ -180,6 +181,25 @@ class VMTemplate(object): graphics_xml = graphics_xml + spicevmc_xml return graphics_xml
+ def _get_scsi_disks_xml(self, luns): + ret = "" + # Passthrough configuration + disk_xml = """ + <disk type='volume' device='lun'> + <driver name='qemu' type='raw'/> + <source dev='%(src)s'/> + <target dev='%(dev)s' bus='scsi'/> + </disk>""" + if not self.fc_host_support: + disk_xml = disk_xml.replace('volume','block') + + # Creating disk xml for each lun passed + for index,(lun, path) in enumerate(luns): + dev = "sd%s" % string.lowercase[index] + params = {'src': path, 'dev': dev} + ret = ret + disk_xml % params + return ret + def to_volume_list(self, vm_uuid): storage_path = self._get_storage_path() ret = [] @@ -225,7 +245,6 @@ class VMTemplate(object): params = dict(self.info) params['name'] = vm_name params['uuid'] = vm_uuid - params['disks'] = self._get_disks_xml(vm_uuid) params['networks'] = self._get_networks_xml() params['qemu-namespace'] = '' params['cdroms'] = '' @@ -233,6 +252,13 @@ class VMTemplate(object): graphics = kwargs.get('graphics') params['graphics'] = self._get_graphics_xml(graphics)
+ # Current implementation just allows to create disk in one single + # storage pool, so we cannot mix the types (scsi volumes vs img file) + if self._get_storage_type() == 'scsi': + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) + else: + params['disks'] = self._get_disks_xml(vm_uuid) + qemu_stream_dns = kwargs.get('qemu_stream_dns', False) libvirt_stream = kwargs.get('libvirt_stream', False) cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) @@ -292,3 +318,6 @@ class VMTemplate(object):
def _get_storage_path(self): return '' + + def _get_storage_type(self): + return ''

On 02/10/2014 12:01 PM, Aline Manera wrote:
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch creates functions that allow kimchi users to create an libvirt SCSI storagepool using the rest API. This patch creates the feature test to check fc_host capability in libvirt. This patch implements basic routines to add a disk (scsi) to a new vm template, based on given volumes (LUN name) from UI or API directly.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- docs/API.md | 5 +++- src/kimchi/API.json | 14 ++++++++-- src/kimchi/featuretests.py | 27 +++++++++++++++++++ src/kimchi/model/config.py | 5 +++- src/kimchi/model/host.py | 4 +-- src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++++++++++++++-- src/kimchi/model/storagepools.py | 22 +++++++++++++--- src/kimchi/model/templates.py | 5 ++++ src/kimchi/model/vms.py | 25 ++++++++++++++++-- src/kimchi/vmtemplate.py | 31 +++++++++++++++++++++- 10 files changed, 172 insertions(+), 14 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 580728c..7f0628d 100644 --- a/docs/API.md +++ b/docs/API.md @@ -55,6 +55,8 @@ the following general conventions: Independent Computing Environments * null: Graphics is disabled or type not supported * listen: The network which the vnc/spice server listens on. + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as + disk to VM. Required if pool is type SCSI.
### Resource: Virtual Machine @@ -269,7 +271,7 @@ A interface represents available network interface on VM. * **POST**: Create a new Storage Pool * name: The name of the Storage Pool. * type: The type of the defined Storage Pool. - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' * path: The path of the defined Storage Pool. For 'kimchi-iso' pool refers to targeted deep scan path. Pool types: 'dir', 'kimchi-iso'. @@ -288,6 +290,7 @@ A interface represents available network interface on VM. Pool types: 'iscsi'. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. + * adapter_name: *(optional) Scsi host name.
### Resource: Storage Pool
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 08c77c5..842fb11 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -37,7 +37,7 @@ "type": { "description": "The type of the defined Storage Pool", "type": "string", - "pattern": "^dir|netfs|logical|kimchi-iso$", + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$", "required": true }, "path": { @@ -76,6 +76,10 @@ "minimum": 1, "maximum": 65535 }, + "adapter_name": { + "description": "SCSI host name", + "type": "string" + }, "auth": { "description": "Storage back-end authentication information", "type": "object", @@ -112,7 +116,13 @@ "type": "string", "pattern": "^/storagepools/[^/]+/?$" }, - "graphics": { "$ref": "#/kimchitype/graphics" } + "graphics": { "$ref": "#/kimchitype/graphics" },
+ "volumes": { + "description": "list of scsi volumes to be assigned to the new VM.", + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true
Does it a single value? At least from UI I am just able to select a single LUN to create the vm UI allow 1 only, but when calling API directly it is possible to add more then one LUN, like volumes{'lun:0:0:1','lun-0:0:2'} ... the items must be unique
+ } } }, "vm_update": { diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py index d924050..f391eb6 100644 --- a/src/kimchi/featuretests.py +++ b/src/kimchi/featuretests.py @@ -57,6 +57,18 @@ ISO_STREAM_XML = """ </devices> </domain>"""
+SCSI_FC_XML = """ +<pool type='scsi'> + <name>TEST_SCSI_FC_POOL</name> + <source> + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> +</pool> +""" +
class FeatureTests(object):
@@ -150,3 +162,18 @@ class FeatureTests(object): return False
return True + + @staticmethod + def libvirt_support_fc_host(): + try: + conn = libvirt.open('qemu:///system')
+ pool = None
You can remove the above line
The line is required because of finally tests
+ pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) + except libvirt.libvirtError as e: + if e.get_error_code() == 27: + # Libvirt requires adapter name, not needed when supports to FC + return False + finally: + pool is None or pool.undefine() + conn is None or conn.close() + return True
Did you run it when libvirt does not support fc_host? Does libvirt display some error message in this case? If so it would be good to silence the errors by disable_screen_error_logging() enable_screen_error_logging() The same mechanism used for iso streaming tests
Nice tip, thanks
diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py index 0e66e02..6eb0e10 100644 --- a/src/kimchi/model/config.py +++ b/src/kimchi/model/config.py @@ -49,6 +49,7 @@ class CapabilitiesModel(object): self.qemu_stream = False self.qemu_stream_dns = False self.libvirt_stream_protocols = [] + self.fc_host_support = False
# Subscribe function to set host capabilities to be run when cherrypy # server is up @@ -60,6 +61,7 @@ class CapabilitiesModel(object): self.qemu_stream = FeatureTests.qemu_supports_iso_stream() self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() + self.fc_host_support = FeatureTests.libvirt_support_fc_host()
self.libvirt_stream_protocols = [] for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: @@ -75,7 +77,8 @@ class CapabilitiesModel(object): return {'libvirt_stream_protocols': self.libvirt_stream_protocols, 'qemu_stream': self.qemu_stream, 'screenshot': VMScreenshot.get_stream_test_result(), - 'system_report_tool': bool(report_tool)} + 'system_report_tool': bool(report_tool),
+ 'fc_host_support': self.fc_host_support}
It is only used in backend. There is no need to expose it to API
ok
class DistrosModel(object): diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index 0545a88..816e2e8 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -218,7 +218,7 @@ class DevicesModel(object): return dev_names
def _get_devices_fc_host(self): - conn = self.conn.get() + conn = self.conn.get() # Libvirt < 1.0.5 does not support fc_host capability if not self.fc_host_support: ret = [] @@ -226,7 +226,7 @@ class DevicesModel(object): for host in scsi_hosts: xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) path = '/device/capability/capability/@type' - if 'fc_host' in xmlutils.xpath_get_text(xml, path): + if 'fc_host' in xmlutils.xpath_get_text(xml, path): ret.append(host) return ret
Please, join those changes when you modify the file at first time
return conn.listDevices('fc_host',0) diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py index f4dbf2e..ceedbde 100644 --- a/src/kimchi/model/libvirtstoragepool.py +++ b/src/kimchi/model/libvirtstoragepool.py @@ -29,8 +29,7 @@ import libvirt from kimchi.exception import InvalidParameter, OperationFailed, TimeoutExpired from kimchi.iscsi import TargetClient from kimchi.rollbackcontext import RollbackContext -from kimchi.utils import parse_cmd_output, run_command - +from kimchi.utils import kimchi_log, parse_cmd_output, run_command
class StoragePoolDef(object): @classmethod @@ -175,6 +174,51 @@ class LogicalPoolDef(StoragePoolDef): return xml
+class ScsiPoolDef(StoragePoolDef): + poolType = 'scsi' + + def prepare(self, conn=None): + tmp_name = self.poolArgs['source']['name'] + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','')
From API.md the SCSI host will be in "adapter_name" instead of "name"
'source' is going to have "adapter_name", coming from api call, and 'name' which comes from self.device.lookup (see the code in my next comment, below )
+ # fc_host adapters type are only available in libvirt >= 1.0.5 + if not self.poolArgs['fc_host_support']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ + "setting SCSI adapter type as 'scsi_host'; "\ + "ignoring wwnn and wwpn." %tmp_name + kimchi_log.info(msg) + # Path for Fibre Channel scsi hosts + self.poolArgs['path'] = '/dev/disk/by-path'
+ if not self.poolArgs['source']['adapter_type']: + self.poolArgs['source']['adapter_type'] = 'scsi_host'
When do you set the adapter_type to fc_host?
it comes from model/storagepool.py @ create (extra_params) : ********************************************************************** if params['type'] == 'scsi': extra_params = self.device.lookup( params['source']['adapter_name']) # Adds name, adapter_type, wwpn and wwnn to source information params['source'].update(extra_params) params['fc_host_support'] = self.caps.fc_host_support **********************************************************************
+ + @property + def xml(self): + # Required parameters + # name: + # source[adapter_type]: + # source[name]: + # source[wwnn]: + # source[wwpn]: + # path: + + xml = """ + <pool type='scsi'> + <name>{name}</name> + <source> + <adapter type='{source[adapter_type]}'\ + name='{source[name]}'\ + wwnn='{source[wwnn]}'\ + wwpn='{source[wwpn]}'/> + </source> + <target> + <path>{path}</path> + </target> + </pool> + """.format(**self.poolArgs) + return xml + + class IscsiPoolDef(StoragePoolDef): poolType = 'iscsi'
diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py index 233a8a7..9be7dad 100644 --- a/src/kimchi/model/storagepools.py +++ b/src/kimchi/model/storagepools.py @@ -26,6 +26,8 @@ from kimchi import xmlutils from kimchi.scan import Scanner from kimchi.exception import InvalidOperation, MissingParameter from kimchi.exception import NotFoundError, OperationFailed +from kimchi.model.config import CapabilitiesModel +from kimchi.model.host import DeviceModel from kimchi.model.libvirtstoragepool import StoragePoolDef from kimchi.utils import add_task, kimchi_log
@@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', 4: 'inaccessible'}
STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}} + 'path': '/pool/source/dir/@path'}, + 'scsi': {'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn'}}
class StoragePoolsModel(object): @@ -47,6 +53,8 @@ class StoragePoolsModel(object): self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() + self.caps = CapabilitiesModel() + self.device = DeviceModel(**kargs)
def get_list(self): try: @@ -67,6 +75,13 @@ class StoragePoolsModel(object):
if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) + + if params['type'] == 'scsi': + extra_params = self.device.lookup( + params['source']['adapter_name']) + params['source'].update(extra_params) + params['fc_host_support'] = self.caps.fc_host_support + poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml @@ -84,9 +99,10 @@ class StoragePoolsModel(object): return name
pool = conn.storagePoolDefineXML(xml, 0) - if params['type'] in ['logical', 'dir', 'netfs']: + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) - # autostart dir and logical storage pool created from kimchi + # autostart dir, logical, netfs and scsi storage pools created + # from kimchi pool.setAutostart(1) else: # disable autostart for others diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py index 03632a6..b004578 100644 --- a/src/kimchi/model/templates.py +++ b/src/kimchi/model/templates.py @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xmlutils.xpath_get_text(xml, "/pool/target/path")[0]
+ def _get_storage_type(self): + pool = self._storage_validate() + xml = pool.XMLDesc(0) + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] + def fork_vm_storage(self, vm_uuid): # Provision storage: # TODO: Rebase on the storage API once upstream diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index d4384a1..4623e28 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -155,6 +155,11 @@ class VMsModel(object): 'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB})
+ def _get_volume_path(self, pool, vol): + conn = self.conn.get() + pool = conn.storagePoolLookupByName(pool) + return pool.storageVolLookupByName(vol).path() + def create(self, params): conn = self.conn.get() t_name = template_name_from_uri(params['template']) @@ -169,6 +174,7 @@ class VMsModel(object): pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri + vm_overrides['fc_host_support'] = self.caps.fc_host_support t = TemplateModel.get_template(t_name, self.objstore, self.conn, vm_overrides)
@@ -177,7 +183,21 @@ class VMsModel(object): raise InvalidOperation(err)
t.validate() - vol_list = t.fork_vm_storage(vm_uuid) + + # If storagepool is SCSI, volumes will be LUNs and must be passed by + # the user from UI or manually. + vol_list = [] + if t._get_storage_type() == 'scsi': + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + else: + # Get system path of the LUNs + pool = t.info['storagepool'].split('/')[-1] + for vol in params.get('volumes'): + path = self._get_volume_path(pool, vol) + vol_list.append((vol, path)) + else: + vol_list = t.fork_vm_storage(vm_uuid)
# Store the icon for displaying later icon = t.info.get('icon') @@ -193,7 +213,8 @@ class VMsModel(object): xml = t.to_vm_xml(name, vm_uuid, libvirt_stream=libvirt_stream, qemu_stream_dns=self.caps.qemu_stream_dns, - graphics=graphics) + graphics=graphics, + volumes=vol_list)
try: conn.defineXML(xml.encode('utf-8')) diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py index 58147e3..368d0b4 100644 --- a/src/kimchi/vmtemplate.py +++ b/src/kimchi/vmtemplate.py @@ -49,6 +49,7 @@ class VMTemplate(object): """ self.name = args['name'] self.info = {} + self.fc_host_support = args.get('fc_host_support')
# Identify the cdrom if present iso_distro = iso_version = 'unknown' @@ -180,6 +181,25 @@ class VMTemplate(object): graphics_xml = graphics_xml + spicevmc_xml return graphics_xml
+ def _get_scsi_disks_xml(self, luns): + ret = "" + # Passthrough configuration + disk_xml = """ + <disk type='volume' device='lun'> + <driver name='qemu' type='raw'/> + <source dev='%(src)s'/> + <target dev='%(dev)s' bus='scsi'/> + </disk>""" + if not self.fc_host_support: + disk_xml = disk_xml.replace('volume','block') + + # Creating disk xml for each lun passed + for index,(lun, path) in enumerate(luns): + dev = "sd%s" % string.lowercase[index] + params = {'src': path, 'dev': dev} + ret = ret + disk_xml % params + return ret + def to_volume_list(self, vm_uuid): storage_path = self._get_storage_path() ret = [] @@ -225,7 +245,6 @@ class VMTemplate(object): params = dict(self.info) params['name'] = vm_name params['uuid'] = vm_uuid - params['disks'] = self._get_disks_xml(vm_uuid) params['networks'] = self._get_networks_xml() params['qemu-namespace'] = '' params['cdroms'] = '' @@ -233,6 +252,13 @@ class VMTemplate(object): graphics = kwargs.get('graphics') params['graphics'] = self._get_graphics_xml(graphics)
+ # Current implementation just allows to create disk in one single + # storage pool, so we cannot mix the types (scsi volumes vs img file) + if self._get_storage_type() == 'scsi': + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) + else: + params['disks'] = self._get_disks_xml(vm_uuid) + qemu_stream_dns = kwargs.get('qemu_stream_dns', False) libvirt_stream = kwargs.get('libvirt_stream', False) cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) @@ -292,3 +318,6 @@ class VMTemplate(object):
def _get_storage_path(self): return '' + + def _get_storage_type(self): + return ''
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

comments below On 02/05/2014 10:18 PM, Rodrigo Trujillo wrote: > This patch creates functions that allow kimchi users to create an libvirt > SCSI storagepool using the rest API. This patch creates the feature test > to check fc_host capability in libvirt. This patch implements basic > routines to add a disk (scsi) to a new vm template, based on given > volumes (LUN name) from UI or API directly. > > Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> > --- > docs/API.md | 5 +++- > src/kimchi/API.json | 14 ++++++++-- > src/kimchi/featuretests.py | 27 +++++++++++++++++++ > src/kimchi/model/config.py | 5 +++- > src/kimchi/model/host.py | 4 +-- > src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++++++++++++++-- > src/kimchi/model/storagepools.py | 22 +++++++++++++--- > src/kimchi/model/templates.py | 5 ++++ > src/kimchi/model/vms.py | 25 ++++++++++++++++-- > src/kimchi/vmtemplate.py | 31 +++++++++++++++++++++- > 10 files changed, 172 insertions(+), 14 deletions(-) > > diff --git a/docs/API.md b/docs/API.md > index 580728c..7f0628d 100644 > --- a/docs/API.md > +++ b/docs/API.md > @@ -55,6 +55,8 @@ the following general conventions: > Independent Computing Environments > * null: Graphics is disabled or type not supported > * listen: The network which the vnc/spice server listens on. > + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as > + disk to VM. Required if pool is type SCSI. > > > ### Resource: Virtual Machine > @@ -269,7 +271,7 @@ A interface represents available network interface on VM. > * **POST**: Create a new Storage Pool > * name: The name of the Storage Pool. > * type: The type of the defined Storage Pool. > - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' > + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' 'iscsi, scsi' -> 'iscsi', 'scsi' ? > * path: The path of the defined Storage Pool. > For 'kimchi-iso' pool refers to targeted deep scan path. > Pool types: 'dir', 'kimchi-iso'. > @@ -288,6 +290,7 @@ A interface represents available network interface on VM. > Pool types: 'iscsi'. > * username: Login username of the iSCSI target. > * password: Login password of the iSCSI target. > + * adapter_name: *(optional) Scsi host name. * adapter_name *(optional)*: Scsi host name. > > ### Resource: Storage Pool > > diff --git a/src/kimchi/API.json b/src/kimchi/API.json > index 08c77c5..842fb11 100644 > --- a/src/kimchi/API.json > +++ b/src/kimchi/API.json > @@ -37,7 +37,7 @@ > "type": { > "description": "The type of the defined Storage Pool", > "type": "string", > - "pattern": "^dir|netfs|logical|kimchi-iso$", > + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$", where is iscsi? as you say above: + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' > "required": true > }, > "path": { > @@ -76,6 +76,10 @@ > "minimum": 1, > "maximum": 65535 > }, > + "adapter_name": { > + "description": "SCSI host name", > + "type": "string" > + }, > "auth": { > "description": "Storage back-end authentication information", > "type": "object", > @@ -112,7 +116,13 @@ > "type": "string", > "pattern": "^/storagepools/[^/]+/?$" > }, > - "graphics": { "$ref": "#/kimchitype/graphics" } > + "graphics": { "$ref": "#/kimchitype/graphics" }, > + "volumes": { > + "description": "list of scsi volumes to be assigned to the new VM.", > + "type": "array", > + "items": { "type": "string" }, > + "uniqueItems": true > + } > } > }, > "vm_update": { > diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py > index d924050..f391eb6 100644 > --- a/src/kimchi/featuretests.py > +++ b/src/kimchi/featuretests.py > @@ -57,6 +57,18 @@ ISO_STREAM_XML = """ > </devices> > </domain>""" > > +SCSI_FC_XML = """ > +<pool type='scsi'> > + <name>TEST_SCSI_FC_POOL</name> > + <source> > + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> > + </source> > + <target> > + <path>/dev/disk/by-path</path> > + </target> > +</pool> > +""" > + > > class FeatureTests(object): > > @@ -150,3 +162,18 @@ class FeatureTests(object): > return False > > return True > + > + @staticmethod > + def libvirt_support_fc_host(): > + try: > + conn = libvirt.open('qemu:///system') > + pool = None > + pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) > + except libvirt.libvirtError as e: > + if e.get_error_code() == 27: > + # Libvirt requires adapter name, not needed when supports to FC > + return False > + finally: > + pool is None or pool.undefine() > + conn is None or conn.close() > + return True > diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py > index 0e66e02..6eb0e10 100644 > --- a/src/kimchi/model/config.py > +++ b/src/kimchi/model/config.py > @@ -49,6 +49,7 @@ class CapabilitiesModel(object): > self.qemu_stream = False > self.qemu_stream_dns = False > self.libvirt_stream_protocols = [] > + self.fc_host_support = False > > # Subscribe function to set host capabilities to be run when cherrypy > # server is up > @@ -60,6 +61,7 @@ class CapabilitiesModel(object): > self.qemu_stream = FeatureTests.qemu_supports_iso_stream() > self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() > self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() > + self.fc_host_support = FeatureTests.libvirt_support_fc_host() > > self.libvirt_stream_protocols = [] > for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: > @@ -75,7 +77,8 @@ class CapabilitiesModel(object): > return {'libvirt_stream_protocols': self.libvirt_stream_protocols, > 'qemu_stream': self.qemu_stream, > 'screenshot': VMScreenshot.get_stream_test_result(), > - 'system_report_tool': bool(report_tool)} > + 'system_report_tool': bool(report_tool), > + 'fc_host_support': self.fc_host_support} oh, seems in your 1/5 patch, you have call fc_host_support > > class DistrosModel(object): > diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py > index 0545a88..816e2e8 100644 > --- a/src/kimchi/model/host.py > +++ b/src/kimchi/model/host.py > @@ -218,7 +218,7 @@ class DevicesModel(object): > return dev_names > > def _get_devices_fc_host(self): > - conn = self.conn.get() > + conn = self.conn.get() > # Libvirt < 1.0.5 does not support fc_host capability > if not self.fc_host_support: > ret = [] > @@ -226,7 +226,7 @@ class DevicesModel(object): > for host in scsi_hosts: > xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) > path = '/device/capability/capability/@type' > - if 'fc_host' in xmlutils.xpath_get_text(xml, path): > + if 'fc_host' in xmlutils.xpath_get_text(xml, path): > ret.append(host) > return ret > return conn.listDevices('fc_host',0) > diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py > index f4dbf2e..ceedbde 100644 > --- a/src/kimchi/model/libvirtstoragepool.py > +++ b/src/kimchi/model/libvirtstoragepool.py > @@ -29,8 +29,7 @@ import libvirt > from kimchi.exception import InvalidParameter, OperationFailed, TimeoutExpired > from kimchi.iscsi import TargetClient > from kimchi.rollbackcontext import RollbackContext > -from kimchi.utils import parse_cmd_output, run_command > - > +from kimchi.utils import kimchi_log, parse_cmd_output, run_command > > class StoragePoolDef(object): > @classmethod > @@ -175,6 +174,51 @@ class LogicalPoolDef(StoragePoolDef): > return xml > > > +class ScsiPoolDef(StoragePoolDef): > + poolType = 'scsi' > + > + def prepare(self, conn=None): > + tmp_name = self.poolArgs['source']['name'] > + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','') > + # fc_host adapters type are only available in libvirt >= 1.0.5 > + if not self.poolArgs['fc_host_support']: > + self.poolArgs['source']['adapter_type'] = 'scsi_host' > + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ > + "setting SCSI adapter type as 'scsi_host'; "\ > + "ignoring wwnn and wwpn." %tmp_name > + kimchi_log.info(msg) > + # Path for Fibre Channel scsi hosts > + self.poolArgs['path'] = '/dev/disk/by-path' > + if not self.poolArgs['source']['adapter_type']: > + self.poolArgs['source']['adapter_type'] = 'scsi_host' > + > + @property > + def xml(self): > + # Required parameters > + # name: > + # source[adapter_type]: > + # source[name]: > + # source[wwnn]: > + # source[wwpn]: > + # path: > + > + xml = """ > + <pool type='scsi'> > + <name>{name}</name> > + <source> > + <adapter type='{source[adapter_type]}'\ > + name='{source[name]}'\ > + wwnn='{source[wwnn]}'\ > + wwpn='{source[wwpn]}'/> > + </source> > + <target> > + <path>{path}</path> > + </target> > + </pool> > + """.format(**self.poolArgs) > + return xml > + > + > class IscsiPoolDef(StoragePoolDef): > poolType = 'iscsi' > > diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py > index 233a8a7..9be7dad 100644 > --- a/src/kimchi/model/storagepools.py > +++ b/src/kimchi/model/storagepools.py > @@ -26,6 +26,8 @@ from kimchi import xmlutils > from kimchi.scan import Scanner > from kimchi.exception import InvalidOperation, MissingParameter > from kimchi.exception import NotFoundError, OperationFailed > +from kimchi.model.config import CapabilitiesModel > +from kimchi.model.host import DeviceModel > from kimchi.model.libvirtstoragepool import StoragePoolDef > from kimchi.utils import add_task, kimchi_log > > @@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', > 4: 'inaccessible'} > > STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', > - 'path': '/pool/source/dir/@path'}} > + 'path': '/pool/source/dir/@path'}, > + 'scsi': {'adapter_type': '/pool/source/adapter/@type', > + 'adapter_name': '/pool/source/adapter/@name', > + 'wwnn': '/pool/source/adapter/@wwnn', > + 'wwpn': '/pool/source/adapter/@wwpn'}} > > > class StoragePoolsModel(object): > @@ -47,6 +53,8 @@ class StoragePoolsModel(object): > self.objstore = kargs['objstore'] > self.scanner = Scanner(self._clean_scan) > self.scanner.delete() > + self.caps = CapabilitiesModel() > + self.device = DeviceModel(**kargs) > > def get_list(self): > try: > @@ -67,6 +75,13 @@ class StoragePoolsModel(object): > > if params['type'] == 'kimchi-iso': > task_id = self._do_deep_scan(params) > + > + if params['type'] == 'scsi': > + extra_params = self.device.lookup( > + params['source']['adapter_name']) > + params['source'].update(extra_params) > + params['fc_host_support'] = self.caps.fc_host_support > + > poolDef = StoragePoolDef.create(params) > poolDef.prepare(conn) > xml = poolDef.xml > @@ -84,9 +99,10 @@ class StoragePoolsModel(object): > return name > > pool = conn.storagePoolDefineXML(xml, 0) > - if params['type'] in ['logical', 'dir', 'netfs']: > + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: > pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) > - # autostart dir and logical storage pool created from kimchi > + # autostart dir, logical, netfs and scsi storage pools created > + # from kimchi > pool.setAutostart(1) > else: > # disable autostart for others > diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py > index 03632a6..b004578 100644 > --- a/src/kimchi/model/templates.py > +++ b/src/kimchi/model/templates.py > @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): > xml = pool.XMLDesc(0) > return xmlutils.xpath_get_text(xml, "/pool/target/path")[0] > > + def _get_storage_type(self): > + pool = self._storage_validate() > + xml = pool.XMLDesc(0) > + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] > + > def fork_vm_storage(self, vm_uuid): > # Provision storage: > # TODO: Rebase on the storage API once upstream > diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py > index d4384a1..4623e28 100644 > --- a/src/kimchi/model/vms.py > +++ b/src/kimchi/model/vms.py > @@ -155,6 +155,11 @@ class VMsModel(object): > 'diskRdKB': diskRdKB, > 'diskWrKB': diskWrKB}) > > + def _get_volume_path(self, pool, vol): > + conn = self.conn.get() > + pool = conn.storagePoolLookupByName(pool) > + return pool.storageVolLookupByName(vol).path() > + > def create(self, params): > conn = self.conn.get() > t_name = template_name_from_uri(params['template']) > @@ -169,6 +174,7 @@ class VMsModel(object): > pool_uri = params.get('storagepool') > if pool_uri: > vm_overrides['storagepool'] = pool_uri > + vm_overrides['fc_host_support'] = self.caps.fc_host_support > t = TemplateModel.get_template(t_name, self.objstore, self.conn, > vm_overrides) > > @@ -177,7 +183,21 @@ class VMsModel(object): > raise InvalidOperation(err) > > t.validate() > - vol_list = t.fork_vm_storage(vm_uuid) > + > + # If storagepool is SCSI, volumes will be LUNs and must be passed by > + # the user from UI or manually. > + vol_list = [] > + if t._get_storage_type() == 'scsi': > + if not params.get('volumes'): > + raise InvalidOperation("Volume list (LUNs names) not given.") > + else: > + # Get system path of the LUNs > + pool = t.info['storagepool'].split('/')[-1] > + for vol in params.get('volumes'): > + path = self._get_volume_path(pool, vol) > + vol_list.append((vol, path)) > + else: > + vol_list = t.fork_vm_storage(vm_uuid) > > # Store the icon for displaying later > icon = t.info.get('icon') > @@ -193,7 +213,8 @@ class VMsModel(object): > xml = t.to_vm_xml(name, vm_uuid, > libvirt_stream=libvirt_stream, > qemu_stream_dns=self.caps.qemu_stream_dns, > - graphics=graphics) > + graphics=graphics, > + volumes=vol_list) > > try: > conn.defineXML(xml.encode('utf-8')) > diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py > index 58147e3..368d0b4 100644 > --- a/src/kimchi/vmtemplate.py > +++ b/src/kimchi/vmtemplate.py > @@ -49,6 +49,7 @@ class VMTemplate(object): > """ > self.name = args['name'] > self.info = {} > + self.fc_host_support = args.get('fc_host_support') > > # Identify the cdrom if present > iso_distro = iso_version = 'unknown' > @@ -180,6 +181,25 @@ class VMTemplate(object): > graphics_xml = graphics_xml + spicevmc_xml > return graphics_xml > > + def _get_scsi_disks_xml(self, luns): > + ret = "" > + # Passthrough configuration > + disk_xml = """ > + <disk type='volume' device='lun'> > + <driver name='qemu' type='raw'/> > + <source dev='%(src)s'/> > + <target dev='%(dev)s' bus='scsi'/> > + </disk>""" > + if not self.fc_host_support: > + disk_xml = disk_xml.replace('volume','block') > + > + # Creating disk xml for each lun passed > + for index,(lun, path) in enumerate(luns): > + dev = "sd%s" % string.lowercase[index] > + params = {'src': path, 'dev': dev} > + ret = ret + disk_xml % params > + return ret > + > def to_volume_list(self, vm_uuid): > storage_path = self._get_storage_path() > ret = [] > @@ -225,7 +245,6 @@ class VMTemplate(object): > params = dict(self.info) > params['name'] = vm_name > params['uuid'] = vm_uuid > - params['disks'] = self._get_disks_xml(vm_uuid) > params['networks'] = self._get_networks_xml() > params['qemu-namespace'] = '' > params['cdroms'] = '' > @@ -233,6 +252,13 @@ class VMTemplate(object): > graphics = kwargs.get('graphics') > params['graphics'] = self._get_graphics_xml(graphics) > > + # Current implementation just allows to create disk in one single > + # storage pool, so we cannot mix the types (scsi volumes vs img file) > + if self._get_storage_type() == 'scsi': > + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) > + else: > + params['disks'] = self._get_disks_xml(vm_uuid) > + > qemu_stream_dns = kwargs.get('qemu_stream_dns', False) > libvirt_stream = kwargs.get('libvirt_stream', False) > cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) > @@ -292,3 +318,6 @@ class VMTemplate(object): > > def _get_storage_path(self): > return '' > + > + def _get_storage_type(self): > + return '' -- Thanks and best regards! Sheldon Feng(冯少合)<shaohef@linux.vnet.ibm.com> IBM Linux Technology Center

On 02/10/2014 01:58 PM, Sheldon wrote:
comments below
On 02/05/2014 10:18 PM, Rodrigo Trujillo wrote:
This patch creates functions that allow kimchi users to create an libvirt SCSI storagepool using the rest API. This patch creates the feature test to check fc_host capability in libvirt. This patch implements basic routines to add a disk (scsi) to a new vm template, based on given volumes (LUN name) from UI or API directly.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- docs/API.md | 5 +++- src/kimchi/API.json | 14 ++++++++-- src/kimchi/featuretests.py | 27 +++++++++++++++++++ src/kimchi/model/config.py | 5 +++- src/kimchi/model/host.py | 4 +-- src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++++++++++++++-- src/kimchi/model/storagepools.py | 22 +++++++++++++--- src/kimchi/model/templates.py | 5 ++++ src/kimchi/model/vms.py | 25 ++++++++++++++++-- src/kimchi/vmtemplate.py | 31 +++++++++++++++++++++- 10 files changed, 172 insertions(+), 14 deletions(-)
diff --git a/docs/API.md b/docs/API.md index 580728c..7f0628d 100644 --- a/docs/API.md +++ b/docs/API.md @@ -55,6 +55,8 @@ the following general conventions: Independent Computing Environments * null: Graphics is disabled or type not supported * listen: The network which the vnc/spice server listens on. + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as + disk to VM. Required if pool is type SCSI.
### Resource: Virtual Machine @@ -269,7 +271,7 @@ A interface represents available network interface on VM. * **POST**: Create a new Storage Pool * name: The name of the Storage Pool. * type: The type of the defined Storage Pool. - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' 'iscsi, scsi' -> 'iscsi', 'scsi' ? ack
* path: The path of the defined Storage Pool. For 'kimchi-iso' pool refers to targeted deep scan path. Pool types: 'dir', 'kimchi-iso'. @@ -288,6 +290,7 @@ A interface represents available network interface on VM. Pool types: 'iscsi'. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. + * adapter_name: *(optional) Scsi host name.
* adapter_name *(optional)*: Scsi host name. ack
### Resource: Storage Pool
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 08c77c5..842fb11 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -37,7 +37,7 @@ "type": { "description": "The type of the defined Storage Pool", "type": "string", - "pattern": "^dir|netfs|logical|kimchi-iso$", + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$",
where is iscsi? as you say above: Good question ! Not sure how the iscsi tag missing passed. Humm, I think because storagepools_create was not using the validation. Iscsi should have been committed before my patch to fix validation: see b4ca2a87b65b66de23b395a4aa30478e28200be7
Anyway .. I will add iscsi
+ Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi'
"required": true }, "path": { @@ -76,6 +76,10 @@ "minimum": 1, "maximum": 65535 }, + "adapter_name": { + "description": "SCSI host name", + "type": "string" + }, "auth": { "description": "Storage back-end authentication information", "type": "object", @@ -112,7 +116,13 @@ "type": "string", "pattern": "^/storagepools/[^/]+/?$" }, - "graphics": { "$ref": "#/kimchitype/graphics" } + "graphics": { "$ref": "#/kimchitype/graphics" }, + "volumes": { + "description": "list of scsi volumes to be assigned to the new VM.", + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true + } } }, "vm_update": { diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py index d924050..f391eb6 100644 --- a/src/kimchi/featuretests.py +++ b/src/kimchi/featuretests.py @@ -57,6 +57,18 @@ ISO_STREAM_XML = """ </devices> </domain>"""
+SCSI_FC_XML = """ +<pool type='scsi'> + <name>TEST_SCSI_FC_POOL</name> + <source> + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> +</pool> +""" +
class FeatureTests(object):
@@ -150,3 +162,18 @@ class FeatureTests(object): return False
return True + + @staticmethod + def libvirt_support_fc_host(): + try: + conn = libvirt.open('qemu:///system') + pool = None + pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) + except libvirt.libvirtError as e: + if e.get_error_code() == 27: + # Libvirt requires adapter name, not needed when supports to FC + return False + finally: + pool is None or pool.undefine() + conn is None or conn.close() + return True diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py index 0e66e02..6eb0e10 100644 --- a/src/kimchi/model/config.py +++ b/src/kimchi/model/config.py @@ -49,6 +49,7 @@ class CapabilitiesModel(object): self.qemu_stream = False self.qemu_stream_dns = False self.libvirt_stream_protocols = [] + self.fc_host_support = False
# Subscribe function to set host capabilities to be run when cherrypy # server is up @@ -60,6 +61,7 @@ class CapabilitiesModel(object): self.qemu_stream = FeatureTests.qemu_supports_iso_stream() self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() + self.fc_host_support = FeatureTests.libvirt_support_fc_host()
self.libvirt_stream_protocols = [] for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: @@ -75,7 +77,8 @@ class CapabilitiesModel(object): return {'libvirt_stream_protocols': self.libvirt_stream_protocols, 'qemu_stream': self.qemu_stream, 'screenshot': VMScreenshot.get_stream_test_result(), - 'system_report_tool': bool(report_tool)} + 'system_report_tool': bool(report_tool), + 'fc_host_support': self.fc_host_support}
oh, seems in your 1/5 patch, you have call fc_host_support
removed this ... I am going to use class attribute directly
class DistrosModel(object): diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index 0545a88..816e2e8 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -218,7 +218,7 @@ class DevicesModel(object): return dev_names
def _get_devices_fc_host(self): - conn = self.conn.get() + conn = self.conn.get() # Libvirt < 1.0.5 does not support fc_host capability if not self.fc_host_support: ret = [] @@ -226,7 +226,7 @@ class DevicesModel(object): for host in scsi_hosts: xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) path = '/device/capability/capability/@type' - if 'fc_host' in xmlutils.xpath_get_text(xml, path): + if 'fc_host' in xmlutils.xpath_get_text(xml, path): ret.append(host) return ret return conn.listDevices('fc_host',0) diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py index f4dbf2e..ceedbde 100644 --- a/src/kimchi/model/libvirtstoragepool.py +++ b/src/kimchi/model/libvirtstoragepool.py @@ -29,8 +29,7 @@ import libvirt from kimchi.exception import InvalidParameter, OperationFailed, TimeoutExpired from kimchi.iscsi import TargetClient from kimchi.rollbackcontext import RollbackContext -from kimchi.utils import parse_cmd_output, run_command - +from kimchi.utils import kimchi_log, parse_cmd_output, run_command
class StoragePoolDef(object): @classmethod @@ -175,6 +174,51 @@ class LogicalPoolDef(StoragePoolDef): return xml
+class ScsiPoolDef(StoragePoolDef): + poolType = 'scsi' + + def prepare(self, conn=None): + tmp_name = self.poolArgs['source']['name'] + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','') + # fc_host adapters type are only available in libvirt >= 1.0.5 + if not self.poolArgs['fc_host_support']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ + "setting SCSI adapter type as 'scsi_host'; "\ + "ignoring wwnn and wwpn." %tmp_name + kimchi_log.info(msg) + # Path for Fibre Channel scsi hosts + self.poolArgs['path'] = '/dev/disk/by-path' + if not self.poolArgs['source']['adapter_type']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + + @property + def xml(self): + # Required parameters + # name: + # source[adapter_type]: + # source[name]: + # source[wwnn]: + # source[wwpn]: + # path: + + xml = """ + <pool type='scsi'> + <name>{name}</name> + <source> + <adapter type='{source[adapter_type]}'\ + name='{source[name]}'\ + wwnn='{source[wwnn]}'\ + wwpn='{source[wwpn]}'/> + </source> + <target> + <path>{path}</path> + </target> + </pool> + """.format(**self.poolArgs) + return xml + + class IscsiPoolDef(StoragePoolDef): poolType = 'iscsi'
diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py index 233a8a7..9be7dad 100644 --- a/src/kimchi/model/storagepools.py +++ b/src/kimchi/model/storagepools.py @@ -26,6 +26,8 @@ from kimchi import xmlutils from kimchi.scan import Scanner from kimchi.exception import InvalidOperation, MissingParameter from kimchi.exception import NotFoundError, OperationFailed +from kimchi.model.config import CapabilitiesModel +from kimchi.model.host import DeviceModel from kimchi.model.libvirtstoragepool import StoragePoolDef from kimchi.utils import add_task, kimchi_log
@@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', 4: 'inaccessible'}
STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}} + 'path': '/pool/source/dir/@path'}, + 'scsi': {'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn'}}
class StoragePoolsModel(object): @@ -47,6 +53,8 @@ class StoragePoolsModel(object): self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() + self.caps = CapabilitiesModel() + self.device = DeviceModel(**kargs)
def get_list(self): try: @@ -67,6 +75,13 @@ class StoragePoolsModel(object):
if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) + + if params['type'] == 'scsi': + extra_params = self.device.lookup( + params['source']['adapter_name']) + params['source'].update(extra_params) + params['fc_host_support'] = self.caps.fc_host_support + poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml @@ -84,9 +99,10 @@ class StoragePoolsModel(object): return name
pool = conn.storagePoolDefineXML(xml, 0) - if params['type'] in ['logical', 'dir', 'netfs']: + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) - # autostart dir and logical storage pool created from kimchi + # autostart dir, logical, netfs and scsi storage pools created + # from kimchi pool.setAutostart(1) else: # disable autostart for others diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py index 03632a6..b004578 100644 --- a/src/kimchi/model/templates.py +++ b/src/kimchi/model/templates.py @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xmlutils.xpath_get_text(xml, "/pool/target/path")[0]
+ def _get_storage_type(self): + pool = self._storage_validate() + xml = pool.XMLDesc(0) + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] + def fork_vm_storage(self, vm_uuid): # Provision storage: # TODO: Rebase on the storage API once upstream diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index d4384a1..4623e28 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -155,6 +155,11 @@ class VMsModel(object): 'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB})
+ def _get_volume_path(self, pool, vol): + conn = self.conn.get() + pool = conn.storagePoolLookupByName(pool) + return pool.storageVolLookupByName(vol).path() + def create(self, params): conn = self.conn.get() t_name = template_name_from_uri(params['template']) @@ -169,6 +174,7 @@ class VMsModel(object): pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri + vm_overrides['fc_host_support'] = self.caps.fc_host_support t = TemplateModel.get_template(t_name, self.objstore, self.conn, vm_overrides)
@@ -177,7 +183,21 @@ class VMsModel(object): raise InvalidOperation(err)
t.validate() - vol_list = t.fork_vm_storage(vm_uuid) + + # If storagepool is SCSI, volumes will be LUNs and must be passed by + # the user from UI or manually. + vol_list = [] + if t._get_storage_type() == 'scsi': + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + else: + # Get system path of the LUNs + pool = t.info['storagepool'].split('/')[-1] + for vol in params.get('volumes'): + path = self._get_volume_path(pool, vol) + vol_list.append((vol, path)) + else: + vol_list = t.fork_vm_storage(vm_uuid)
# Store the icon for displaying later icon = t.info.get('icon') @@ -193,7 +213,8 @@ class VMsModel(object): xml = t.to_vm_xml(name, vm_uuid, libvirt_stream=libvirt_stream, qemu_stream_dns=self.caps.qemu_stream_dns, - graphics=graphics) + graphics=graphics, + volumes=vol_list)
try: conn.defineXML(xml.encode('utf-8')) diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py index 58147e3..368d0b4 100644 --- a/src/kimchi/vmtemplate.py +++ b/src/kimchi/vmtemplate.py @@ -49,6 +49,7 @@ class VMTemplate(object): """ self.name = args['name'] self.info = {} + self.fc_host_support = args.get('fc_host_support')
# Identify the cdrom if present iso_distro = iso_version = 'unknown' @@ -180,6 +181,25 @@ class VMTemplate(object): graphics_xml = graphics_xml + spicevmc_xml return graphics_xml
+ def _get_scsi_disks_xml(self, luns): + ret = "" + # Passthrough configuration + disk_xml = """ + <disk type='volume' device='lun'> + <driver name='qemu' type='raw'/> + <source dev='%(src)s'/> + <target dev='%(dev)s' bus='scsi'/> + </disk>""" + if not self.fc_host_support: + disk_xml = disk_xml.replace('volume','block') + + # Creating disk xml for each lun passed + for index,(lun, path) in enumerate(luns): + dev = "sd%s" % string.lowercase[index] + params = {'src': path, 'dev': dev} + ret = ret + disk_xml % params + return ret + def to_volume_list(self, vm_uuid): storage_path = self._get_storage_path() ret = [] @@ -225,7 +245,6 @@ class VMTemplate(object): params = dict(self.info) params['name'] = vm_name params['uuid'] = vm_uuid - params['disks'] = self._get_disks_xml(vm_uuid) params['networks'] = self._get_networks_xml() params['qemu-namespace'] = '' params['cdroms'] = '' @@ -233,6 +252,13 @@ class VMTemplate(object): graphics = kwargs.get('graphics') params['graphics'] = self._get_graphics_xml(graphics)
+ # Current implementation just allows to create disk in one single + # storage pool, so we cannot mix the types (scsi volumes vs img file) + if self._get_storage_type() == 'scsi': + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) + else: + params['disks'] = self._get_disks_xml(vm_uuid) + qemu_stream_dns = kwargs.get('qemu_stream_dns', False) libvirt_stream = kwargs.get('libvirt_stream', False) cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) @@ -292,3 +318,6 @@ class VMTemplate(object):
def _get_storage_path(self): return '' + + def _get_storage_type(self): + return ''

This patch modifies the storagepool add user interface in order to show all Fibre Channel scsi hosts found in the host system and let user to create a pool attached to this host (the LUNs will be the volumes). A second option to use and enable FC storages is when a LUN is assigned as a pool of FS type, hosting guest images. This second option will be implement in the future. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 11 ++++++++ ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++++++++++++++- ui/pages/i18n.html.tmpl | 4 +++ ui/pages/storagepool-add.html.tmpl | 12 +++++++++ 4 files changed, 72 insertions(+), 1 deletion(-) diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 63ddd88..66fc41e 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -731,5 +731,16 @@ var kimchi = { success : suc, error : err }); + }, + + listFCHosts : function(suc, err) { + kimchi.requestJSON({ + url : kimchi.url + 'host/devices?_cap=fc_host', + type : 'GET', + contentType : 'application/json', + dataType : 'json', + success : suc, + error : err + }); } }; diff --git a/ui/js/src/kimchi.storagepool_add_main.js b/ui/js/src/kimchi.storagepool_add_main.js index e5922b3..1f1ec41 100644 --- a/ui/js/src/kimchi.storagepool_add_main.js +++ b/ui/js/src/kimchi.storagepool_add_main.js @@ -40,7 +40,21 @@ kimchi.initStorageAddPage = function() { label : "iSCSI", value : "iscsi" } ]; - kimchi.listHostPartitions(function(data) { + kimchi.listFCHosts(function(data){ + if (data.length > 0) { + options.push( { + label : "SCSI Fibre Channel", + value : "scsi" + }); + } + var scsiFCHtml = $('#scsiFCTmpl').html(); + var scsiFCHostListHtml = ''; + $.each(data, function(index, value) { + scsiFCHostListHtml += kimchi.template(scsiFCHtml, value); + }); + $('.scsifc-hosts').html(scsiFCHostListHtml); + + kimchi.listHostPartitions(function(data) { if (data.length > 0) { options.push({ label : "LOGICAL", @@ -107,21 +121,31 @@ kimchi.initStorageAddPage = function() { $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'netfs') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').removeClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'iscsi') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').removeClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); + } else if ($(this).val() === 'scsi') { + $('.path-section').addClass('tmpl-html'); + $('.logical-section').addClass('tmpl-html'); + $('.nfs-section').addClass('tmpl-html'); + $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').removeClass('tmpl-html'); } else if ($(this).val() === 'logical') { $('.path-section').addClass('tmpl-html'); $('.logical-section').removeClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } }); $('#authId').click(function() { @@ -134,6 +158,10 @@ kimchi.initStorageAddPage = function() { $('#iscsiportId').keyup(function(event) { $(this).toggleClass("invalid-field",!/^[0-9]+$/.test($(this).val())); }); + }); + }, function() { + // listFCHosts error handler + kimchi.message.error(i18n['msg.kimchi.list.fchosts.fail']); }); }; @@ -154,6 +182,8 @@ kimchi.validateForm = function() { return kimchi.validateNfsForm(); } else if (poolType === "iscsi") { return kimchi.validateIscsiForm(); + } else if (poolType === "scsi") { + return kimchi.validateScsiFCForm(); } else if (poolType === "logical") { return kimchi.validateLogicalForm(); } else { @@ -204,6 +234,15 @@ kimchi.validateIscsiForm = function() { return true; }; +kimchi.validateScsiFCForm = function() { + var fcHost = $('input:radio[name=adapter_name]:checked').val(); + if (fcHost === undefined) { + kimchi.message.error(i18n['msg.validate.pool.edit.scsifchost']); + return false; + } + return true; +}; + kimchi.validateServer = function(serverField) { if ('' === serverField) { kimchi.message.error(i18n['msg.pool.edit.server.blank']); @@ -248,6 +287,11 @@ kimchi.addPool = function(event) { source.path = $('#nfspathId').val(); source.host = $('#nfsserverId').val(); formData.source = source; + } else if (poolType === 'scsi'){ + var source = {}; + source.adapter_name = formData.adapter_name; + delete formData.adapter_name; + formData.source = source; } else if (poolType === 'iscsi') { var source = {}; source.target = $('#iscsiTargetId').val(); diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index d63d4e9..a4c3ccb 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -55,6 +55,8 @@ var i18n = { 'msg.fail.template.no.iso': "$_("No iso found")", 'msg.fail.template.scan': "$_("Failed to scan")", 'msg.fail.template.distr': "$_("Failed to list iso distributions")", + 'msg.fail.template.retrieve': "$_("Failed to retrieve template")", + 'msg.kimchi.list.fchosts.fail': "$_("Failed to list Fibre Channel SCSI hosts")", 'msg.confirm.delete.title': "$_("Delete Confirmation")", 'msg.confirm': "$_("OK")", 'msg.cancel': "$_("Cancel")", @@ -100,9 +102,11 @@ var i18n = { 'msg.validate.pool.edit.path':"$_("This is not a real linux path.")", 'msg.validate.pool.edit.nfspath':"$_("Invalid nfs mount path.")", 'msg.validate.pool.edit.logical.device':"$_("No logical device selected.")", + 'msg.validate.pool.edit.scsifchost':"$_("A Fibre Channel SCSI host must be selected.")", 'msg.kimchi.storage.pool.empty':"$_("This storage pool is empty.")", 'msg.kimchi.list.volume.fail':"$_("Failed to list the storage pool.")", 'msg.kimchi.storage.pool.not.active':"$_("The storage pool is not active now.")", + 'msg.kimchi.retrieve.pool.fail': "$_("Failed to retrieve storage pool.")", 'fail.delete.template': "$_("Failed to delete template.")", 'Guests':"$_("Guests")", 'Host':"$_("Host")", diff --git a/ui/pages/storagepool-add.html.tmpl b/ui/pages/storagepool-add.html.tmpl index dac99fe..4782d15 100644 --- a/ui/pages/storagepool-add.html.tmpl +++ b/ui/pages/storagepool-add.html.tmpl @@ -104,6 +104,12 @@ <div class="host-partition"></div> </section> </div> + <div class="scsifc-section tmpl-html"> + <section class="form-section"> + <h2>3. $_("Select SCSI Fibre Channel Host")</h2> + <div class="scsifc-hosts"></div> + </section> + </div> <div class="iscsi-section tmpl-html"> <section class="form-section"> <h2>3. $_("iSCSI Server")</h2> @@ -154,5 +160,11 @@ <label for="{name}">{path}</label> </div> </script> + <script id="scsiFCTmpl" type="html/text"> + <div class="field"> + <input type="radio" value="{name}" name="adapter_name" id="fc-{name}"> + <label for="fc-{name}">{name}</label> + </div> + </script> </body> </html> -- 1.8.5.3

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch modifies the storagepool add user interface in order to show all Fibre Channel scsi hosts found in the host system and let user to create a pool attached to this host (the LUNs will be the volumes). A second option to use and enable FC storages is when a LUN is assigned as a pool of FS type, hosting guest images. This second option will be implement in the future.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 11 ++++++++ ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++++++++++++++- ui/pages/i18n.html.tmpl | 4 +++ ui/pages/storagepool-add.html.tmpl | 12 +++++++++ 4 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 63ddd88..66fc41e 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -731,5 +731,16 @@ var kimchi = { success : suc, error : err }); + }, + + listFCHosts : function(suc, err) { + kimchi.requestJSON({ + url : kimchi.url + 'host/devices?_cap=fc_host', + type : 'GET', + contentType : 'application/json', + dataType : 'json', + success : suc, + error : err + }); } }; diff --git a/ui/js/src/kimchi.storagepool_add_main.js b/ui/js/src/kimchi.storagepool_add_main.js index e5922b3..1f1ec41 100644 --- a/ui/js/src/kimchi.storagepool_add_main.js +++ b/ui/js/src/kimchi.storagepool_add_main.js @@ -40,7 +40,21 @@ kimchi.initStorageAddPage = function() { label : "iSCSI", value : "iscsi" } ]; - kimchi.listHostPartitions(function(data) { + kimchi.listFCHosts(function(data){ + if (data.length > 0) { + options.push( { + label : "SCSI Fibre Channel", + value : "scsi" + }); + } + var scsiFCHtml = $('#scsiFCTmpl').html(); + var scsiFCHostListHtml = ''; + $.each(data, function(index, value) { + scsiFCHostListHtml += kimchi.template(scsiFCHtml, value); + }); + $('.scsifc-hosts').html(scsiFCHostListHtml); + + kimchi.listHostPartitions(function(data) { if (data.length > 0) { options.push({ label : "LOGICAL", @@ -107,21 +121,31 @@ kimchi.initStorageAddPage = function() { $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'netfs') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').removeClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'iscsi') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').removeClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); + } else if ($(this).val() === 'scsi') { + $('.path-section').addClass('tmpl-html'); + $('.logical-section').addClass('tmpl-html'); + $('.nfs-section').addClass('tmpl-html'); + $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').removeClass('tmpl-html'); } else if ($(this).val() === 'logical') { $('.path-section').addClass('tmpl-html'); $('.logical-section').removeClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } }); $('#authId').click(function() { @@ -134,6 +158,10 @@ kimchi.initStorageAddPage = function() { $('#iscsiportId').keyup(function(event) { $(this).toggleClass("invalid-field",!/^[0-9]+$/.test($(this).val())); }); + }); + }, function() { + // listFCHosts error handler + kimchi.message.error(i18n['msg.kimchi.list.fchosts.fail']); }); };
@@ -154,6 +182,8 @@ kimchi.validateForm = function() { return kimchi.validateNfsForm(); } else if (poolType === "iscsi") { return kimchi.validateIscsiForm(); + } else if (poolType === "scsi") { + return kimchi.validateScsiFCForm(); } else if (poolType === "logical") { return kimchi.validateLogicalForm(); } else { @@ -204,6 +234,15 @@ kimchi.validateIscsiForm = function() { return true; };
+kimchi.validateScsiFCForm = function() { + var fcHost = $('input:radio[name=adapter_name]:checked').val(); + if (fcHost === undefined) { + kimchi.message.error(i18n['msg.validate.pool.edit.scsifchost']); + return false; + } + return true; +}; + kimchi.validateServer = function(serverField) { if ('' === serverField) { kimchi.message.error(i18n['msg.pool.edit.server.blank']); @@ -248,6 +287,11 @@ kimchi.addPool = function(event) { source.path = $('#nfspathId').val(); source.host = $('#nfsserverId').val(); formData.source = source; + } else if (poolType === 'scsi'){ + var source = {}; + source.adapter_name = formData.adapter_name; + delete formData.adapter_name; + formData.source = source; } else if (poolType === 'iscsi') { var source = {}; source.target = $('#iscsiTargetId').val(); diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index d63d4e9..a4c3ccb 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -55,6 +55,8 @@ var i18n = { 'msg.fail.template.no.iso': "$_("No iso found")", 'msg.fail.template.scan': "$_("Failed to scan")", 'msg.fail.template.distr': "$_("Failed to list iso distributions")", + 'msg.fail.template.retrieve': "$_("Failed to retrieve template")", + 'msg.kimchi.list.fchosts.fail': "$_("Failed to list Fibre Channel SCSI hosts")", 'msg.confirm.delete.title': "$_("Delete Confirmation")", 'msg.confirm': "$_("OK")", 'msg.cancel': "$_("Cancel")", @@ -100,9 +102,11 @@ var i18n = { 'msg.validate.pool.edit.path':"$_("This is not a real linux path.")", 'msg.validate.pool.edit.nfspath':"$_("Invalid nfs mount path.")", 'msg.validate.pool.edit.logical.device':"$_("No logical device selected.")", + 'msg.validate.pool.edit.scsifchost':"$_("A Fibre Channel SCSI host must be selected.")", 'msg.kimchi.storage.pool.empty':"$_("This storage pool is empty.")", 'msg.kimchi.list.volume.fail':"$_("Failed to list the storage pool.")", 'msg.kimchi.storage.pool.not.active':"$_("The storage pool is not active now.")", + 'msg.kimchi.retrieve.pool.fail': "$_("Failed to retrieve storage pool.")", 'fail.delete.template': "$_("Failed to delete template.")", 'Guests':"$_("Guests")", 'Host':"$_("Host")", diff --git a/ui/pages/storagepool-add.html.tmpl b/ui/pages/storagepool-add.html.tmpl index dac99fe..4782d15 100644 --- a/ui/pages/storagepool-add.html.tmpl +++ b/ui/pages/storagepool-add.html.tmpl @@ -104,6 +104,12 @@ <div class="host-partition"></div> </section> </div> + <div class="scsifc-section tmpl-html"> + <section class="form-section"> + <h2>3. $_("Select SCSI Fibre Channel Host")</h2> + <div class="scsifc-hosts"></div> + </section> + </div> <div class="iscsi-section tmpl-html"> <section class="form-section"> <h2>3. $_("iSCSI Server")</h2> @@ -154,5 +160,11 @@ <label for="{name}">{path}</label> </div> </script> + <script id="scsiFCTmpl" type="html/text"> + <div class="field"> + <input type="radio" value="{name}" name="adapter_name" id="fc-{name}"> + <label for="fc-{name}">{name}</label> + </div> + </script> </body> </html>

Reviewed-by: Aline Manera <alinefm@linux.vnet.ibm.com> On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch modifies the storagepool add user interface in order to show all Fibre Channel scsi hosts found in the host system and let user to create a pool attached to this host (the LUNs will be the volumes). A second option to use and enable FC storages is when a LUN is assigned as a pool of FS type, hosting guest images. This second option will be implement in the future.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 11 ++++++++ ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++++++++++++++- ui/pages/i18n.html.tmpl | 4 +++ ui/pages/storagepool-add.html.tmpl | 12 +++++++++ 4 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 63ddd88..66fc41e 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -731,5 +731,16 @@ var kimchi = { success : suc, error : err }); + }, + + listFCHosts : function(suc, err) { + kimchi.requestJSON({ + url : kimchi.url + 'host/devices?_cap=fc_host', + type : 'GET', + contentType : 'application/json', + dataType : 'json', + success : suc, + error : err + }); } }; diff --git a/ui/js/src/kimchi.storagepool_add_main.js b/ui/js/src/kimchi.storagepool_add_main.js index e5922b3..1f1ec41 100644 --- a/ui/js/src/kimchi.storagepool_add_main.js +++ b/ui/js/src/kimchi.storagepool_add_main.js @@ -40,7 +40,21 @@ kimchi.initStorageAddPage = function() { label : "iSCSI", value : "iscsi" } ]; - kimchi.listHostPartitions(function(data) { + kimchi.listFCHosts(function(data){ + if (data.length > 0) { + options.push( { + label : "SCSI Fibre Channel", + value : "scsi" + }); + } + var scsiFCHtml = $('#scsiFCTmpl').html(); + var scsiFCHostListHtml = ''; + $.each(data, function(index, value) { + scsiFCHostListHtml += kimchi.template(scsiFCHtml, value); + }); + $('.scsifc-hosts').html(scsiFCHostListHtml); + + kimchi.listHostPartitions(function(data) { if (data.length > 0) { options.push({ label : "LOGICAL", @@ -107,21 +121,31 @@ kimchi.initStorageAddPage = function() { $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'netfs') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').removeClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'iscsi') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').removeClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); + } else if ($(this).val() === 'scsi') { + $('.path-section').addClass('tmpl-html'); + $('.logical-section').addClass('tmpl-html'); + $('.nfs-section').addClass('tmpl-html'); + $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').removeClass('tmpl-html'); } else if ($(this).val() === 'logical') { $('.path-section').addClass('tmpl-html'); $('.logical-section').removeClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } }); $('#authId').click(function() { @@ -134,6 +158,10 @@ kimchi.initStorageAddPage = function() { $('#iscsiportId').keyup(function(event) { $(this).toggleClass("invalid-field",!/^[0-9]+$/.test($(this).val())); }); + }); + }, function() { + // listFCHosts error handler + kimchi.message.error(i18n['msg.kimchi.list.fchosts.fail']); }); };
@@ -154,6 +182,8 @@ kimchi.validateForm = function() { return kimchi.validateNfsForm(); } else if (poolType === "iscsi") { return kimchi.validateIscsiForm(); + } else if (poolType === "scsi") { + return kimchi.validateScsiFCForm(); } else if (poolType === "logical") { return kimchi.validateLogicalForm(); } else { @@ -204,6 +234,15 @@ kimchi.validateIscsiForm = function() { return true; };
+kimchi.validateScsiFCForm = function() { + var fcHost = $('input:radio[name=adapter_name]:checked').val(); + if (fcHost === undefined) { + kimchi.message.error(i18n['msg.validate.pool.edit.scsifchost']); + return false; + } + return true; +}; + kimchi.validateServer = function(serverField) { if ('' === serverField) { kimchi.message.error(i18n['msg.pool.edit.server.blank']); @@ -248,6 +287,11 @@ kimchi.addPool = function(event) { source.path = $('#nfspathId').val(); source.host = $('#nfsserverId').val(); formData.source = source; + } else if (poolType === 'scsi'){ + var source = {}; + source.adapter_name = formData.adapter_name; + delete formData.adapter_name; + formData.source = source; } else if (poolType === 'iscsi') { var source = {}; source.target = $('#iscsiTargetId').val(); diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index d63d4e9..a4c3ccb 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -55,6 +55,8 @@ var i18n = { 'msg.fail.template.no.iso': "$_("No iso found")", 'msg.fail.template.scan': "$_("Failed to scan")", 'msg.fail.template.distr': "$_("Failed to list iso distributions")", + 'msg.fail.template.retrieve': "$_("Failed to retrieve template")", + 'msg.kimchi.list.fchosts.fail': "$_("Failed to list Fibre Channel SCSI hosts")", 'msg.confirm.delete.title': "$_("Delete Confirmation")", 'msg.confirm': "$_("OK")", 'msg.cancel': "$_("Cancel")", @@ -100,9 +102,11 @@ var i18n = { 'msg.validate.pool.edit.path':"$_("This is not a real linux path.")", 'msg.validate.pool.edit.nfspath':"$_("Invalid nfs mount path.")", 'msg.validate.pool.edit.logical.device':"$_("No logical device selected.")", + 'msg.validate.pool.edit.scsifchost':"$_("A Fibre Channel SCSI host must be selected.")", 'msg.kimchi.storage.pool.empty':"$_("This storage pool is empty.")", 'msg.kimchi.list.volume.fail':"$_("Failed to list the storage pool.")", 'msg.kimchi.storage.pool.not.active':"$_("The storage pool is not active now.")", + 'msg.kimchi.retrieve.pool.fail': "$_("Failed to retrieve storage pool.")", 'fail.delete.template': "$_("Failed to delete template.")", 'Guests':"$_("Guests")", 'Host':"$_("Host")", diff --git a/ui/pages/storagepool-add.html.tmpl b/ui/pages/storagepool-add.html.tmpl index dac99fe..4782d15 100644 --- a/ui/pages/storagepool-add.html.tmpl +++ b/ui/pages/storagepool-add.html.tmpl @@ -104,6 +104,12 @@ <div class="host-partition"></div> </section> </div> + <div class="scsifc-section tmpl-html"> + <section class="form-section"> + <h2>3. $_("Select SCSI Fibre Channel Host")</h2> + <div class="scsifc-hosts"></div> + </section> + </div> <div class="iscsi-section tmpl-html"> <section class="form-section"> <h2>3. $_("iSCSI Server")</h2> @@ -154,5 +160,11 @@ <label for="{name}">{path}</label> </div> </script> + <script id="scsiFCTmpl" type="html/text"> + <div class="field"> + <input type="radio" value="{name}" name="adapter_name" id="fc-{name}"> + <label for="fc-{name}">{name}</label> + </div> + </script> </body> </html>

This patch implements the UI functions and API calls to show to user the list of volumes (LUNs) of a SCSI FC storagepools. The user can then select the LUN when creating a new VM. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 13 +++++++ ui/js/src/kimchi.guest_add_main.js | 73 +++++++++++++++++++++++++++++++++++--- ui/pages/i18n.html.tmpl | 1 + 3 files changed, 83 insertions(+), 4 deletions(-) diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 66fc41e..4597c5d 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -155,6 +155,19 @@ var kimchi = { }); }, + /* + * Retrieve the information of a storage pool by the given name. + */ + retrieveStoragePool : function(storagePoolName, suc, err) { + kimchi.requestJSON({ + url : kimchi.url + "storagepools/" + + encodeURIComponent(storagePoolName), + type : 'GET', + contentType : 'application/json', + dataType : 'json' + }).done(suc); + }, + /** * Retrieve the information of a template by the given name. */ diff --git a/ui/js/src/kimchi.guest_add_main.js b/ui/js/src/kimchi.guest_add_main.js index 2085562..6b8fc38 100644 --- a/ui/js/src/kimchi.guest_add_main.js +++ b/ui/js/src/kimchi.guest_add_main.js @@ -62,9 +62,7 @@ kimchi.guest_add_main = function() { } }); - var addGuest = function(event) { - var formData = $('#form-vm-add').serializeObject(); - + var addGuest = function(formData) { kimchi.createVM(formData, function() { kimchi.listVmsAuto(); kimchi.window.close(); @@ -79,8 +77,75 @@ kimchi.guest_add_main = function() { return false; }; + // This function is used to select a lun for new vm disk if template has + // a SCSI storagepool associated. + function getLun() { + var formData = $('#form-vm-add').serializeObject(); + var templateName = formData.template.substring(11); + kimchi.retrieveTemplate(templateName, function(templateInfo) { + var poolName = templateInfo.storagepool.substring(14); + kimchi.retrieveStoragePool(poolName, function(poolInfo){ + if (poolInfo.type === "scsi") { + kimchi.listStorageVolumes(poolInfo.name, function(lunsList) { + if (lunsList.length == 0) { + kimchi.message.error('There are not volumes for this pool'); + return false; + } + var popUpList = '<section class="form-section">' + + '<h2>1. Storage Pool: ' + poolInfo.name + '</h2>' + + '<div class="lun_radios">'; + $.each(lunsList, function(index, value) { + popUpList += '<div class="field">' + + '<input type="radio" id="lun-' + value.name + '" name="lun" value="' + value.name + '">' + + '<label for="lun-' + value.name + '">' + value.name + '</label></div>'; + }); + popUpList += '</div></section>'; + console.log(popUpList) + var popup = $(popUpList); + popup.dialog({ + autoOpen : true, + modal : true, + width : 400, + draggable : false, + resizable : false, + closeText: "X", + dialogClass : "network-config", + title: "Please, select a LUN", + close: function( event, ui ) { $('input[name=lun]').attr('checked',false); }, + buttons : [ { + text : i18n.action_select_lun, + class: "ui-button-primary", + click : function() { + var lunName = $('input:radio[name=lun]:checked').val(); + if (lunName === undefined) { + kimchi.message.error('You must select a LUN'); + } else { + formData.volumes = new Array(lunName); + addGuest(formData); + } + $( this ).dialog( "close" ); + } + }] + }); + },function() { + // listStorageVolumes error handler + kimchi.message.error(i18n['msg.kimchi.list.volume.fail']); + }); + } + else { addGuest(formData); } + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.kimchi.retrieve.pool.fail']); + }); + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.fail.template.retrieve']); + }); + return false; + } + $('#form-vm-add').on('submit', addGuest); - $('#vm-doAdd').on('click', addGuest); + $('#vm-doAdd').on('click', getLun); showTemplates(); }; diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index a4c3ccb..f635fdf 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -123,6 +123,7 @@ var i18n = { 'network_dialog_ok': "$_("OK")", 'network_dialog_cancel': "$_("Cancel")", 'action_create': "$_("Create")", + 'action_select_lun': "$_("Select")", 'msg_warning': "$_("Warning")", 'msg.logicalpool.confirm.delete': "$_("It will format your disk and you will loose any data in" " there, are you sure to continue? ")", -- 1.8.5.3

When I select the text associated to the radio box it does not select the option. Need to fix it More comments below On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch implements the UI functions and API calls to show to user the list of volumes (LUNs) of a SCSI FC storagepools. The user can then select the LUN when creating a new VM.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 13 +++++++ ui/js/src/kimchi.guest_add_main.js | 73 +++++++++++++++++++++++++++++++++++--- ui/pages/i18n.html.tmpl | 1 + 3 files changed, 83 insertions(+), 4 deletions(-)
diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 66fc41e..4597c5d 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -155,6 +155,19 @@ var kimchi = { }); },
+ /* + * Retrieve the information of a storage pool by the given name. + */ + retrieveStoragePool : function(storagePoolName, suc, err) { + kimchi.requestJSON({ + url : kimchi.url + "storagepools/" + + encodeURIComponent(storagePoolName), + type : 'GET', + contentType : 'application/json', + dataType : 'json' + }).done(suc); + }, + /** * Retrieve the information of a template by the given name. */ diff --git a/ui/js/src/kimchi.guest_add_main.js b/ui/js/src/kimchi.guest_add_main.js index 2085562..6b8fc38 100644 --- a/ui/js/src/kimchi.guest_add_main.js +++ b/ui/js/src/kimchi.guest_add_main.js @@ -62,9 +62,7 @@ kimchi.guest_add_main = function() { } });
- var addGuest = function(event) { - var formData = $('#form-vm-add').serializeObject(); - + var addGuest = function(formData) { kimchi.createVM(formData, function() { kimchi.listVmsAuto(); kimchi.window.close(); @@ -79,8 +77,75 @@ kimchi.guest_add_main = function() { return false; };
+ // This function is used to select a lun for new vm disk if template has + // a SCSI storagepool associated. + function getLun() { + var formData = $('#form-vm-add').serializeObject(); + var templateName = formData.template.substring(11);
+ kimchi.retrieveTemplate(templateName,
function(templateInfo) { + var poolName = templateInfo.storagepool.substring(14); + kimchi.retrieveStoragePool(poolName, function(poolInfo){ + if (poolInfo.type === "scsi") { + kimchi.listStorageVolumes(poolInfo.name, function(lunsList) { + if (lunsList.length == 0) { + kimchi.message.error('There are not volumes for this pool');
You need to add the message to i18n.html.tmpl and use it here, otherwise this message will not be translated.
+ return false; + } + var popUpList = '<section class="form-section">' + + '<h2>1. Storage Pool: ' + poolInfo.name + '</h2>' + + '<div class="lun_radios">';
We need to improve the message to the user. "You select a Template associated to a SCSI Storage Pool so you need to select which LUN you wan to use as primary disk to guest." Or something like it
+ $.each(lunsList, function(index, value) { + popUpList += '<div class="field">' + + '<input type="radio" id="lun-' + value.name + '" name="lun" value="' + value.name + '">' + + '<label for="lun-' + value.name + '">' + value.name + '</label></div>'; + }); + popUpList += '</div></section>'; + console.log(popUpList) + var popup = $(popUpList); + popup.dialog({ + autoOpen : true, + modal : true, + width : 400, + draggable : false, + resizable : false, + closeText: "X", + dialogClass : "network-config", + title: "Please, select a LUN", + close: function( event, ui ) { $('input[name=lun]').attr('checked',false); }, + buttons : [ { + text : i18n.action_select_lun, + class: "ui-button-primary", + click : function() { + var lunName = $('input:radio[name=lun]:checked').val(); + if (lunName === undefined) { + kimchi.message.error('You must select a LUN'); + } else { + formData.volumes = new Array(lunName); + addGuest(formData); + } + $( this ).dialog( "close" ); + } + }] + });
What about split this big function into smaller ones? It is hard to read it that way Example: 1) function to get template 2) function to get storage associate to template 3) function to get type of storage pool get template; get storage pool get storage pool type if pool type != scsi: return 4) function to display new dialog
+ },function() { + // listStorageVolumes error handler + kimchi.message.error(i18n['msg.kimchi.list.volume.fail']); + }); + } + else { addGuest(formData); } + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.kimchi.retrieve.pool.fail']); + }); + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.fail.template.retrieve']); + }); + return false; + } + $('#form-vm-add').on('submit', addGuest); - $('#vm-doAdd').on('click', addGuest); + $('#vm-doAdd').on('click', getLun);
showTemplates(); }; diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index a4c3ccb..f635fdf 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -123,6 +123,7 @@ var i18n = { 'network_dialog_ok': "$_("OK")", 'network_dialog_cancel': "$_("Cancel")", 'action_create': "$_("Create")", + 'action_select_lun': "$_("Select")", 'msg_warning': "$_("Warning")", 'msg.logicalpool.confirm.delete': "$_("It will format your disk and you will loose any data in" " there, are you sure to continue? ")",

On 02/10/2014 12:20 PM, Aline Manera wrote:
When I select the text associated to the radio box it does not select the option. Need to fix it
This is working in my tests
More comments below
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch implements the UI functions and API calls to show to user the list of volumes (LUNs) of a SCSI FC storagepools. The user can then select the LUN when creating a new VM.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 13 +++++++ ui/js/src/kimchi.guest_add_main.js | 73 +++++++++++++++++++++++++++++++++++--- ui/pages/i18n.html.tmpl | 1 + 3 files changed, 83 insertions(+), 4 deletions(-)
diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 66fc41e..4597c5d 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -155,6 +155,19 @@ var kimchi = { }); },
+ /* + * Retrieve the information of a storage pool by the given name. + */ + retrieveStoragePool : function(storagePoolName, suc, err) { + kimchi.requestJSON({ + url : kimchi.url + "storagepools/" + + encodeURIComponent(storagePoolName), + type : 'GET', + contentType : 'application/json', + dataType : 'json' + }).done(suc); + }, + /** * Retrieve the information of a template by the given name. */ diff --git a/ui/js/src/kimchi.guest_add_main.js b/ui/js/src/kimchi.guest_add_main.js index 2085562..6b8fc38 100644 --- a/ui/js/src/kimchi.guest_add_main.js +++ b/ui/js/src/kimchi.guest_add_main.js @@ -62,9 +62,7 @@ kimchi.guest_add_main = function() { } });
- var addGuest = function(event) { - var formData = $('#form-vm-add').serializeObject(); - + var addGuest = function(formData) { kimchi.createVM(formData, function() { kimchi.listVmsAuto(); kimchi.window.close(); @@ -79,8 +77,75 @@ kimchi.guest_add_main = function() { return false; };
+ // This function is used to select a lun for new vm disk if template has + // a SCSI storagepool associated. + function getLun() { + var formData = $('#form-vm-add').serializeObject(); + var templateName = formData.template.substring(11);
+ kimchi.retrieveTemplate(templateName,
function(templateInfo) { + var poolName = templateInfo.storagepool.substring(14); + kimchi.retrieveStoragePool(poolName, function(poolInfo){ + if (poolInfo.type === "scsi") { + kimchi.listStorageVolumes(poolInfo.name, function(lunsList) { + if (lunsList.length == 0) { + kimchi.message.error('There are not volumes for this pool');
You need to add the message to i18n.html.tmpl and use it here, otherwise this message will not be translated. Done
+ return false; + } + var popUpList = '<section class="form-section">' + + '<h2>1. Storage Pool: ' + poolInfo.name + '</h2>' + + '<div class="lun_radios">';
We need to improve the message to the user.
"You select a Template associated to a SCSI Storage Pool so you need to select which LUN you wan to use as primary disk to guest."
Or something like it
Done
+ $.each(lunsList, function(index, value) { + popUpList += '<div class="field">' + + '<input type="radio" id="lun-' + value.name + '" name="lun" value="' + value.name + '">' + + '<label for="lun-' + value.name + '">' + value.name + '</label></div>'; + }); + popUpList += '</div></section>'; + console.log(popUpList) + var popup = $(popUpList); + popup.dialog({ + autoOpen : true, + modal : true, + width : 400, + draggable : false, + resizable : false, + closeText: "X", + dialogClass : "network-config", + title: "Please, select a LUN", + close: function( event, ui ) { $('input[name=lun]').attr('checked',false); }, + buttons : [ { + text : i18n.action_select_lun, + class: "ui-button-primary", + click : function() { + var lunName = $('input:radio[name=lun]:checked').val(); + if (lunName === undefined) { + kimchi.message.error('You must select a LUN'); + } else { + formData.volumes = new Array(lunName); + addGuest(formData); + } + $( this ).dialog( "close" ); + } + }] + });
What about split this big function into smaller ones? It is hard to read it that way
Example:
1) function to get template 2) function to get storage associate to template 3) function to get type of storage pool
get template; get storage pool get storage pool type
if pool type != scsi: return
4) function to display new dialog
I am not 100% sure how to do this. JS functions are still complicated for me, so it will take a while to code/test. As the code is working, I would rather ask some UI specialist to do this refactoring in next days. Or I can try to do in future, when I have less things in my backlog.
+ },function() { + // listStorageVolumes error handler + kimchi.message.error(i18n['msg.kimchi.list.volume.fail']); + }); + } + else { addGuest(formData); } + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.kimchi.retrieve.pool.fail']); + }); + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.fail.template.retrieve']); + }); + return false; + } + $('#form-vm-add').on('submit', addGuest); - $('#vm-doAdd').on('click', addGuest); + $('#vm-doAdd').on('click', getLun);
showTemplates(); }; diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index a4c3ccb..f635fdf 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -123,6 +123,7 @@ var i18n = { 'network_dialog_ok': "$_("OK")", 'network_dialog_cancel': "$_("Cancel")", 'action_create': "$_("Create")", + 'action_select_lun': "$_("Select")", 'msg_warning': "$_("Warning")", 'msg.logicalpool.confirm.delete': "$_("It will format your disk and you will loose any data in" " there, are you sure to continue? ")",
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 02/12/2014 03:01 AM, Rodrigo Trujillo wrote:
On 02/10/2014 12:20 PM, Aline Manera wrote:
When I select the text associated to the radio box it does not select the option. Need to fix it
This is working in my tests
When creating a new vm from SCSI storage pool? Which browser are you using? I am on Firefox 26.
More comments below
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch implements the UI functions and API calls to show to user the list of volumes (LUNs) of a SCSI FC storagepools. The user can then select the LUN when creating a new VM.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 13 +++++++ ui/js/src/kimchi.guest_add_main.js | 73 +++++++++++++++++++++++++++++++++++--- ui/pages/i18n.html.tmpl | 1 + 3 files changed, 83 insertions(+), 4 deletions(-)
diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 66fc41e..4597c5d 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -155,6 +155,19 @@ var kimchi = { }); },
+ /* + * Retrieve the information of a storage pool by the given name. + */ + retrieveStoragePool : function(storagePoolName, suc, err) { + kimchi.requestJSON({ + url : kimchi.url + "storagepools/" + + encodeURIComponent(storagePoolName), + type : 'GET', + contentType : 'application/json', + dataType : 'json' + }).done(suc); + }, + /** * Retrieve the information of a template by the given name. */ diff --git a/ui/js/src/kimchi.guest_add_main.js b/ui/js/src/kimchi.guest_add_main.js index 2085562..6b8fc38 100644 --- a/ui/js/src/kimchi.guest_add_main.js +++ b/ui/js/src/kimchi.guest_add_main.js @@ -62,9 +62,7 @@ kimchi.guest_add_main = function() { } });
- var addGuest = function(event) { - var formData = $('#form-vm-add').serializeObject(); - + var addGuest = function(formData) { kimchi.createVM(formData, function() { kimchi.listVmsAuto(); kimchi.window.close(); @@ -79,8 +77,75 @@ kimchi.guest_add_main = function() { return false; };
+ // This function is used to select a lun for new vm disk if template has + // a SCSI storagepool associated. + function getLun() { + var formData = $('#form-vm-add').serializeObject(); + var templateName = formData.template.substring(11);
+ kimchi.retrieveTemplate(templateName,
function(templateInfo) { + var poolName = templateInfo.storagepool.substring(14); + kimchi.retrieveStoragePool(poolName, function(poolInfo){ + if (poolInfo.type === "scsi") { + kimchi.listStorageVolumes(poolInfo.name, function(lunsList) { + if (lunsList.length == 0) { + kimchi.message.error('There are not volumes for this pool');
You need to add the message to i18n.html.tmpl and use it here, otherwise this message will not be translated. Done
+ return false; + } + var popUpList = '<section class="form-section">' + + '<h2>1. Storage Pool: ' + poolInfo.name + '</h2>' + + '<div class="lun_radios">';
We need to improve the message to the user.
"You select a Template associated to a SCSI Storage Pool so you need to select which LUN you wan to use as primary disk to guest."
Or something like it
Done
+ $.each(lunsList, function(index, value) { + popUpList += '<div class="field">' + + '<input type="radio" id="lun-' + value.name + '" name="lun" value="' + value.name + '">' + + '<label for="lun-' + value.name + '">' + value.name + '</label></div>'; + }); + popUpList += '</div></section>'; + console.log(popUpList) + var popup = $(popUpList); + popup.dialog({ + autoOpen : true, + modal : true, + width : 400, + draggable : false, + resizable : false, + closeText: "X", + dialogClass : "network-config", + title: "Please, select a LUN", + close: function( event, ui ) { $('input[name=lun]').attr('checked',false); }, + buttons : [ { + text : i18n.action_select_lun, + class: "ui-button-primary", + click : function() { + var lunName = $('input:radio[name=lun]:checked').val(); + if (lunName === undefined) { + kimchi.message.error('You must select a LUN'); + } else { + formData.volumes = new Array(lunName); + addGuest(formData); + } + $( this ).dialog( "close" ); + } + }] + });
What about split this big function into smaller ones? It is hard to read it that way
Example:
1) function to get template 2) function to get storage associate to template 3) function to get type of storage pool
get template; get storage pool get storage pool type
if pool type != scsi: return
4) function to display new dialog
I am not 100% sure how to do this. JS functions are still complicated for me, so it will take a while to code/test. As the code is working, I would rather ask some UI specialist to do this refactoring in next days. Or I can try to do in future, when I have less things in my backlog.
Are you saying to I merge that in this way (which no one can read) and then other person redo the work?
+ },function() { + // listStorageVolumes error handler + kimchi.message.error(i18n['msg.kimchi.list.volume.fail']); + }); + } + else { addGuest(formData); } + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.kimchi.retrieve.pool.fail']); + }); + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.fail.template.retrieve']); + }); + return false; + } + $('#form-vm-add').on('submit', addGuest); - $('#vm-doAdd').on('click', addGuest); + $('#vm-doAdd').on('click', getLun);
showTemplates(); }; diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index a4c3ccb..f635fdf 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -123,6 +123,7 @@ var i18n = { 'network_dialog_ok': "$_("OK")", 'network_dialog_cancel': "$_("Cancel")", 'action_create': "$_("Create")", + 'action_select_lun': "$_("Select")", 'msg_warning': "$_("Warning")", 'msg.logicalpool.confirm.delete': "$_("It will format your disk and you will loose any data in" " there, are you sure to continue? ")",
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

This patch modifies the Mock model functions to allow user to create a SCSI FC pool in test environment. Then implements functions to test API and rest. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 53 +++++++++++++++++++++++++++++++++++++++++------ tests/test_rest.py | 47 +++++++++++++++++++++++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++++++++++++ 3 files changed, 115 insertions(+), 6 deletions(-) diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 4e276eb..f6400a4 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -66,7 +66,8 @@ class MockModel(object): return {'libvirt_stream_protocols': ['http', 'https', 'ftp', 'ftps', 'tftp'], 'qemu_stream': True, 'screenshot': True, - 'system_report_tool': True} + 'system_report_tool': True, + 'fc_host_support': True} def reset(self): self._mock_vms = {} @@ -155,7 +156,15 @@ class MockModel(object): if icon: vm.info['icon'] = icon - vm.disk_paths = t.fork_vm_storage(vm_uuid) + pool = t._storage_validate() + if pool.info['type'] == 'scsi': + vm.disk_paths = [] + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + for vol in params['volumes']: + vm.disk_paths.append(pool._volumes[vol].info['path']) + else: + vm.disk_paths = t.fork_vm_storage(vm_uuid) self._mock_vms[name] = vm return name @@ -298,8 +307,18 @@ class MockModel(object): name = params['name'] pool = MockStoragePool(name) pool.info['type'] = params['type'] - pool.info['path'] = params['path'] - if params['type'] == 'dir': + if params['type'] == 'scsi': + pool.info['path'] = '/dev/disk/by-path' + pool.info['source'] = params['source'] + if not pool.info['source'].get('adapter_name'): + raise KeyError('adapter_name') + for vol in ['unit:0:0:1','unit:0:0:2', + 'unit:0:0:3','unit:0:0:4']: + mockvol = MockStorageVolume(vol, name, self._def_lun(vol)) + pool._volumes[vol] = mockvol + else: + pool.info['path'] = params['path'] + if params['type'] in ['dir','scsi']: pool.info['autostart'] = True else: pool.info['autostart'] = False @@ -388,6 +407,27 @@ class MockModel(object): "Unable to list volumes of inactive storagepool %s" % pool) return res._volumes.keys() + def _def_lun(self, name): + capacity = int(random.uniform(100, 300)) << 20 + path = "/dev/disk/by-path/pci-0000:0e:00.0-fc-0x20999980e52e4492-lun" + return { + "capacity": capacity, + "name": name, + "format": random.choice(['dos','unknown']), + "allocation": capacity, + "path": path + name[-1], + "type": "block" } + + def devices_get_list(self, _cap=None): + return ['scsi_host3', 'scsi_host4','scsi_host5'] + + def device_lookup(self, nodedev_name): + return { + 'name': nodedev_name, + 'adapter_type': 'fc_host', + 'wwnn': uuid.uuid4().hex[:16], + 'wwpn': uuid.uuid4().hex[:16]} + def isopool_lookup(self, name): return {'state': 'active', 'type': 'kimchi-iso'} @@ -789,9 +829,10 @@ class MockStorageVolume(object): self.pool = pool fmt = params.get('format', 'raw') capacity = params.get('capacity', 1024) - self.info = {'type': 'disk', + self.info = {'type': params.get('type','disk'), 'capacity': capacity << 20, - 'allocation': 512, + 'allocation': params.get('allocation','512'), + 'path': params.get('path'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/tests/test_rest.py b/tests/test_rest.py index 0ed293b..8b033ae 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -23,6 +23,7 @@ import base64 import json import os +import random import time import unittest @@ -144,6 +145,18 @@ class RestTests(unittest.TestCase): h = {'Accept': 'text/plain'} self.assertHTTPStatus(406, "/", None, 'GET', h) + def test_host_devices(self): + nodedevs = json.loads(self.request('/host/devices').read()) + # Mockmodel brings 3 preconfigured scsi fc_host + self.assertEquals(3, len(nodedevs)) + + nodedev = json.loads(self.request('/host/devices/scsi_host4').read()) + # Mockmodel generates random wwpn and wwnn + self.assertEquals('scsi_host4', nodedev['name']) + self.assertEquals('fc_host', nodedev['adapter_type']) + self.assertEquals(16, len(nodedev['wwpn'])) + self.assertEquals(16, len(nodedev['wwnn'])) + def test_get_vms(self): vms = json.loads(self.request('/vms').read()) self.assertEquals(0, len(vms)) @@ -440,6 +453,40 @@ class RestTests(unittest.TestCase): # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri) + def test_scsi_fc_storage(self): + # Create scsi fc pool + req = json.dumps({'name': 'scsi_fc_pool', + 'type': 'scsi', + 'source': {'adapter_name': 'scsi_host3'}}) + resp = self.request('/storagepools', req, 'POST') + self.assertEquals(201, resp.status) + + # Create template with this pool + req = json.dumps({'name': 'test_fc_pool', 'cdrom': '/nonexistent.iso', + 'storagepool': '/storagepools/scsi_fc_pool'}) + resp = self.request('/templates', req, 'POST') + self.assertEquals(201, resp.status) + + # Test create vms using lun of this pool + ### activate the storage pool + resp = self.request('/storagepools/scsi_fc_pool/activate', '{}', 'POST') + + ### Get scsi pool luns and choose one + resp = self.request('/storagepools/scsi_fc_pool/storagevolumes') + luns = json.loads(resp.read()) + lun_name = random.choice(luns).get('name') + + ### Create vm in scsi pool without volumes: Error + req = json.dumps({'template': '/templates/test_fc_pool'}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(400, resp.status) + + ### Create vm in scsi pool + req = json.dumps({'template': '/templates/test_fc_pool', + 'volumes': [lun_name]}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(201, resp.status) + def test_template_customise_storage(self): req = json.dumps({'name': 'test', 'cdrom': '/nonexistent.iso', 'disks': [{'size': 1}]}) diff --git a/tests/test_storagepool.py b/tests/test_storagepool.py index a3f4983..700c66e 100644 --- a/tests/test_storagepool.py +++ b/tests/test_storagepool.py @@ -141,6 +141,27 @@ class storagepoolTests(unittest.TestCase): <path>/dev/disk/by-id</path> </target> </pool> + """}, + {'def': + {'type': 'scsi', + 'name': 'unitTestSCSIFCPool', + 'path': '/dev/disk/by-path', + 'source': { + 'name': 'scsi_host3', + 'adapter_type': 'fc_host', + 'wwpn': '0123456789abcdef', + 'wwnn': 'abcdef0123456789' }}, + 'xml': + """ + <pool type='scsi'> + <name>unitTestSCSIFCPool</name> + <source> + <adapter type='fc_host' name='scsi_host3' wwnn='abcdef0123456789' wwpn='0123456789abcdef'></adapter> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> + </pool> """}] for poolDef in poolDefs: -- 1.8.5.3

On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch modifies the Mock model functions to allow user to create a SCSI FC pool in test environment. Then implements functions to test API and rest.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 53 +++++++++++++++++++++++++++++++++++++++++------ tests/test_rest.py | 47 +++++++++++++++++++++++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++++++++++++ 3 files changed, 115 insertions(+), 6 deletions(-)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 4e276eb..f6400a4 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -66,7 +66,8 @@ class MockModel(object): return {'libvirt_stream_protocols': ['http', 'https', 'ftp', 'ftps', 'tftp'], 'qemu_stream': True, 'screenshot': True, - 'system_report_tool': True} + 'system_report_tool': True,
+ 'fc_host_support': True}
As I said before you don't need to expose it as it is a backend information
def reset(self): self._mock_vms = {} @@ -155,7 +156,15 @@ class MockModel(object): if icon: vm.info['icon'] = icon
- vm.disk_paths = t.fork_vm_storage(vm_uuid) + pool = t._storage_validate() + if pool.info['type'] == 'scsi': + vm.disk_paths = [] + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.")
It would be MissingParameter() error
+ for vol in params['volumes']: + vm.disk_paths.append(pool._volumes[vol].info['path']) + else: + vm.disk_paths = t.fork_vm_storage(vm_uuid) self._mock_vms[name] = vm return name
@@ -298,8 +307,18 @@ class MockModel(object): name = params['name'] pool = MockStoragePool(name) pool.info['type'] = params['type'] - pool.info['path'] = params['path'] - if params['type'] == 'dir': + if params['type'] == 'scsi': + pool.info['path'] = '/dev/disk/by-path' + pool.info['source'] = params['source'] + if not pool.info['source'].get('adapter_name'): + raise KeyError('adapter_name')
raise MissingParameter()
+ for vol in ['unit:0:0:1','unit:0:0:2', + 'unit:0:0:3','unit:0:0:4']: + mockvol = MockStorageVolume(vol, name, self._def_lun(vol)) + pool._volumes[vol] = mockvol + else: + pool.info['path'] = params['path'] + if params['type'] in ['dir','scsi']: pool.info['autostart'] = True else: pool.info['autostart'] = False @@ -388,6 +407,27 @@ class MockModel(object): "Unable to list volumes of inactive storagepool %s" % pool) return res._volumes.keys()
+ def _def_lun(self, name): + capacity = int(random.uniform(100, 300)) << 20 + path = "/dev/disk/by-path/pci-0000:0e:00.0-fc-0x20999980e52e4492-lun" + return { + "capacity": capacity, + "name": name, + "format": random.choice(['dos','unknown']), + "allocation": capacity, + "path": path + name[-1], + "type": "block" }
This function should be in MockStorageVolume() class and while creating a new volume MockStorageVolume() it knows what to do
+ + def devices_get_list(self, _cap=None): + return ['scsi_host3', 'scsi_host4','scsi_host5'] + + def device_lookup(self, nodedev_name): + return { + 'name': nodedev_name, + 'adapter_type': 'fc_host', + 'wwnn': uuid.uuid4().hex[:16], + 'wwpn': uuid.uuid4().hex[:16]} + def isopool_lookup(self, name): return {'state': 'active', 'type': 'kimchi-iso'} @@ -789,9 +829,10 @@ class MockStorageVolume(object): self.pool = pool fmt = params.get('format', 'raw') capacity = params.get('capacity', 1024) - self.info = {'type': 'disk', + self.info = {'type': params.get('type','disk'), 'capacity': capacity << 20, - 'allocation': 512, + 'allocation': params.get('allocation','512'), + 'path': params.get('path'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/tests/test_rest.py b/tests/test_rest.py index 0ed293b..8b033ae 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -23,6 +23,7 @@ import base64 import json import os +import random import time import unittest
@@ -144,6 +145,18 @@ class RestTests(unittest.TestCase): h = {'Accept': 'text/plain'} self.assertHTTPStatus(406, "/", None, 'GET', h)
+ def test_host_devices(self): + nodedevs = json.loads(self.request('/host/devices').read()) + # Mockmodel brings 3 preconfigured scsi fc_host + self.assertEquals(3, len(nodedevs)) + + nodedev = json.loads(self.request('/host/devices/scsi_host4').read()) + # Mockmodel generates random wwpn and wwnn + self.assertEquals('scsi_host4', nodedev['name']) + self.assertEquals('fc_host', nodedev['adapter_type']) + self.assertEquals(16, len(nodedev['wwpn'])) + self.assertEquals(16, len(nodedev['wwnn'])) + def test_get_vms(self): vms = json.loads(self.request('/vms').read()) self.assertEquals(0, len(vms)) @@ -440,6 +453,40 @@ class RestTests(unittest.TestCase): # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri)
+ def test_scsi_fc_storage(self): + # Create scsi fc pool + req = json.dumps({'name': 'scsi_fc_pool', + 'type': 'scsi', + 'source': {'adapter_name': 'scsi_host3'}}) + resp = self.request('/storagepools', req, 'POST') + self.assertEquals(201, resp.status) + + # Create template with this pool + req = json.dumps({'name': 'test_fc_pool', 'cdrom': '/nonexistent.iso', + 'storagepool': '/storagepools/scsi_fc_pool'}) + resp = self.request('/templates', req, 'POST') + self.assertEquals(201, resp.status) + + # Test create vms using lun of this pool + ### activate the storage pool + resp = self.request('/storagepools/scsi_fc_pool/activate', '{}', 'POST') + + ### Get scsi pool luns and choose one + resp = self.request('/storagepools/scsi_fc_pool/storagevolumes') + luns = json.loads(resp.read()) + lun_name = random.choice(luns).get('name') + + ### Create vm in scsi pool without volumes: Error + req = json.dumps({'template': '/templates/test_fc_pool'}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(400, resp.status) + + ### Create vm in scsi pool + req = json.dumps({'template': '/templates/test_fc_pool', + 'volumes': [lun_name]}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(201, resp.status) +
Please, add test to start/stop and delete the created VM
def test_template_customise_storage(self): req = json.dumps({'name': 'test', 'cdrom': '/nonexistent.iso', 'disks': [{'size': 1}]}) diff --git a/tests/test_storagepool.py b/tests/test_storagepool.py index a3f4983..700c66e 100644 --- a/tests/test_storagepool.py +++ b/tests/test_storagepool.py @@ -141,6 +141,27 @@ class storagepoolTests(unittest.TestCase): <path>/dev/disk/by-id</path> </target> </pool> + """}, + {'def': + {'type': 'scsi', + 'name': 'unitTestSCSIFCPool', + 'path': '/dev/disk/by-path', + 'source': { + 'name': 'scsi_host3', + 'adapter_type': 'fc_host', + 'wwpn': '0123456789abcdef', + 'wwnn': 'abcdef0123456789' }}, + 'xml': + """ + <pool type='scsi'> + <name>unitTestSCSIFCPool</name> + <source> + <adapter type='fc_host' name='scsi_host3' wwnn='abcdef0123456789' wwpn='0123456789abcdef'></adapter> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> + </pool> """}]
for poolDef in poolDefs:

On 02/10/2014 12:32 PM, Aline Manera wrote:
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
This patch modifies the Mock model functions to allow user to create a SCSI FC pool in test environment. Then implements functions to test API and rest.
Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 53 +++++++++++++++++++++++++++++++++++++++++------ tests/test_rest.py | 47 +++++++++++++++++++++++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++++++++++++ 3 files changed, 115 insertions(+), 6 deletions(-)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index 4e276eb..f6400a4 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -66,7 +66,8 @@ class MockModel(object): return {'libvirt_stream_protocols': ['http', 'https', 'ftp', 'ftps', 'tftp'], 'qemu_stream': True, 'screenshot': True, - 'system_report_tool': True} + 'system_report_tool': True,
+ 'fc_host_support': True}
As I said before you don't need to expose it as it is a backend information
def reset(self): self._mock_vms = {} @@ -155,7 +156,15 @@ class MockModel(object): if icon: vm.info['icon'] = icon
- vm.disk_paths = t.fork_vm_storage(vm_uuid) + pool = t._storage_validate() + if pool.info['type'] == 'scsi': + vm.disk_paths = [] + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.")
It would be MissingParameter() error
+ for vol in params['volumes']: + vm.disk_paths.append(pool._volumes[vol].info['path']) + else: + vm.disk_paths = t.fork_vm_storage(vm_uuid) self._mock_vms[name] = vm return name
@@ -298,8 +307,18 @@ class MockModel(object): name = params['name'] pool = MockStoragePool(name) pool.info['type'] = params['type'] - pool.info['path'] = params['path'] - if params['type'] == 'dir': + if params['type'] == 'scsi': + pool.info['path'] = '/dev/disk/by-path' + pool.info['source'] = params['source'] + if not pool.info['source'].get('adapter_name'): + raise KeyError('adapter_name')
raise MissingParameter() ok
+ for vol in ['unit:0:0:1','unit:0:0:2', + 'unit:0:0:3','unit:0:0:4']: + mockvol = MockStorageVolume(vol, name, self._def_lun(vol)) + pool._volumes[vol] = mockvol + else: + pool.info['path'] = params['path'] + if params['type'] in ['dir','scsi']: pool.info['autostart'] = True else: pool.info['autostart'] = False @@ -388,6 +407,27 @@ class MockModel(object): "Unable to list volumes of inactive storagepool %s" % pool) return res._volumes.keys()
+ def _def_lun(self, name): + capacity = int(random.uniform(100, 300)) << 20 + path = "/dev/disk/by-path/pci-0000:0e:00.0-fc-0x20999980e52e4492-lun" + return { + "capacity": capacity, + "name": name, + "format": random.choice(['dos','unknown']), + "allocation": capacity, + "path": path + name[-1], + "type": "block" }
This function should be in MockStorageVolume() class and while creating a new volume MockStorageVolume() it knows what to do
ack
+ + def devices_get_list(self, _cap=None): + return ['scsi_host3', 'scsi_host4','scsi_host5'] + + def device_lookup(self, nodedev_name): + return { + 'name': nodedev_name, + 'adapter_type': 'fc_host', + 'wwnn': uuid.uuid4().hex[:16], + 'wwpn': uuid.uuid4().hex[:16]} + def isopool_lookup(self, name): return {'state': 'active', 'type': 'kimchi-iso'} @@ -789,9 +829,10 @@ class MockStorageVolume(object): self.pool = pool fmt = params.get('format', 'raw') capacity = params.get('capacity', 1024) - self.info = {'type': 'disk', + self.info = {'type': params.get('type','disk'), 'capacity': capacity << 20, - 'allocation': 512, + 'allocation': params.get('allocation','512'), + 'path': params.get('path'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/tests/test_rest.py b/tests/test_rest.py index 0ed293b..8b033ae 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -23,6 +23,7 @@ import base64 import json import os +import random import time import unittest
@@ -144,6 +145,18 @@ class RestTests(unittest.TestCase): h = {'Accept': 'text/plain'} self.assertHTTPStatus(406, "/", None, 'GET', h)
+ def test_host_devices(self): + nodedevs = json.loads(self.request('/host/devices').read()) + # Mockmodel brings 3 preconfigured scsi fc_host + self.assertEquals(3, len(nodedevs)) + + nodedev = json.loads(self.request('/host/devices/scsi_host4').read()) + # Mockmodel generates random wwpn and wwnn + self.assertEquals('scsi_host4', nodedev['name']) + self.assertEquals('fc_host', nodedev['adapter_type']) + self.assertEquals(16, len(nodedev['wwpn'])) + self.assertEquals(16, len(nodedev['wwnn'])) + def test_get_vms(self): vms = json.loads(self.request('/vms').read()) self.assertEquals(0, len(vms)) @@ -440,6 +453,40 @@ class RestTests(unittest.TestCase): # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri)
+ def test_scsi_fc_storage(self): + # Create scsi fc pool + req = json.dumps({'name': 'scsi_fc_pool', + 'type': 'scsi', + 'source': {'adapter_name': 'scsi_host3'}}) + resp = self.request('/storagepools', req, 'POST') + self.assertEquals(201, resp.status) + + # Create template with this pool + req = json.dumps({'name': 'test_fc_pool', 'cdrom': '/nonexistent.iso', + 'storagepool': '/storagepools/scsi_fc_pool'}) + resp = self.request('/templates', req, 'POST') + self.assertEquals(201, resp.status) + + # Test create vms using lun of this pool + ### activate the storage pool + resp = self.request('/storagepools/scsi_fc_pool/activate', '{}', 'POST') + + ### Get scsi pool luns and choose one + resp = self.request('/storagepools/scsi_fc_pool/storagevolumes') + luns = json.loads(resp.read()) + lun_name = random.choice(luns).get('name') + + ### Create vm in scsi pool without volumes: Error + req = json.dumps({'template': '/templates/test_fc_pool'}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(400, resp.status) + + ### Create vm in scsi pool + req = json.dumps({'template': '/templates/test_fc_pool', + 'volumes': [lun_name]}) + resp = self.request('/vms', req, 'POST') + self.assertEquals(201, resp.status) +
Please, add test to start/stop and delete the created VM done
def test_template_customise_storage(self): req = json.dumps({'name': 'test', 'cdrom': '/nonexistent.iso', 'disks': [{'size': 1}]}) diff --git a/tests/test_storagepool.py b/tests/test_storagepool.py index a3f4983..700c66e 100644 --- a/tests/test_storagepool.py +++ b/tests/test_storagepool.py @@ -141,6 +141,27 @@ class storagepoolTests(unittest.TestCase): <path>/dev/disk/by-id</path> </target> </pool> + """}, + {'def': + {'type': 'scsi', + 'name': 'unitTestSCSIFCPool', + 'path': '/dev/disk/by-path', + 'source': { + 'name': 'scsi_host3', + 'adapter_type': 'fc_host', + 'wwpn': '0123456789abcdef', + 'wwnn': 'abcdef0123456789' }}, + 'xml': + """ + <pool type='scsi'> + <name>unitTestSCSIFCPool</name> + <source> + <adapter type='fc_host' name='scsi_host3' wwnn='abcdef0123456789' wwpn='0123456789abcdef'></adapter> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> + </pool> """}]
for poolDef in poolDefs:
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

The UI looks distorted when all options are available to create the storage pool: http://picpaste.com/_8EA4BB37FF97475-ItaBa6cJ.jpg On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)

On 02/10/2014 11:22 AM, Aline Manera wrote:
The UI looks distorted when all options are available to create the storage pool:
And when creating a virtual machine from a Template which points to a SCSI pool, if I don't select any LUN to create the vm disk the popup is closed and an error message appears on top of the page "No LUN selected". You should display the error with the window open so user have a chance do update it and just close the popup when the required field is provided.
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 02/10/2014 11:35 AM, Aline Manera wrote:
On 02/10/2014 11:22 AM, Aline Manera wrote:
The UI looks distorted when all options are available to create the storage pool:
And when creating a virtual machine from a Template which points to a SCSI pool, if I don't select any LUN to create the vm disk the popup is closed and an error message appears on top of the page "No LUN selected".
You should display the error with the window open so user have a chance do update it and just close the popup when the required field is provided. Done
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 02/10/2014 11:22 AM, Aline Manera wrote:
The UI looks distorted when all options are available to create the storage pool:
fixed
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

I am not able to delete an VM created with SCSI pool. From log, I got: Request Headers: COOKIE: kimchi=b5bde7b2dba4e0423c9e529b36c0b6f941eb589e; userid=alinefm HOST: localhost:8000 CONNECTION: keep-alive Remote-Addr: 127.0.0.1 ACCEPT: application/json, text/javascript, */*; q=0.01 USER-AGENT: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0 X-REQUESTED-WITH: XMLHttpRequest ACCEPT-LANGUAGE: en-us,en;q=0.7,pt-br;q=0.3 Content-Type: application/json REFERER: http://localhost:8000/ ACCEPT-ENCODING: gzip, deflate [10/Feb/2014:11:35:34] HTTP Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/cherrypy/_cprequest.py", line 656, in respond response.body = self.handler() File "/usr/lib/python2.7/dist-packages/cherrypy/lib/encoding.py", line 188, in __call__ self.body = self.oldhandler(*args, **kwargs) File "/usr/lib/python2.7/dist-packages/cherrypy/_cpdispatch.py", line 34, in __call__ return self.callable(*self.args, **self.kwargs) File "/home/alinefm/kimchi/src/kimchi/control/base.py", line 124, in index return self.delete() File "/home/alinefm/kimchi/src/kimchi/control/base.py", line 100, in delete fn(*self.model_args) File "/home/alinefm/kimchi/src/kimchi/mockmodel.py", line 119, in vm_delete self.storagevolume_delete(disk['pool'], disk['volume']) TypeError: string indices must be integers, not str On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)

Fixed On 02/10/2014 11:36 AM, Aline Manera wrote:
I am not able to delete an VM created with SCSI pool.
From log, I got:
Request Headers: COOKIE: kimchi=b5bde7b2dba4e0423c9e529b36c0b6f941eb589e; userid=alinefm HOST: localhost:8000 CONNECTION: keep-alive Remote-Addr: 127.0.0.1 ACCEPT: application/json, text/javascript, */*; q=0.01 USER-AGENT: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0 X-REQUESTED-WITH: XMLHttpRequest ACCEPT-LANGUAGE: en-us,en;q=0.7,pt-br;q=0.3 Content-Type: application/json REFERER: http://localhost:8000/ ACCEPT-ENCODING: gzip, deflate [10/Feb/2014:11:35:34] HTTP Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/cherrypy/_cprequest.py", line 656, in respond response.body = self.handler() File "/usr/lib/python2.7/dist-packages/cherrypy/lib/encoding.py", line 188, in __call__ self.body = self.oldhandler(*args, **kwargs) File "/usr/lib/python2.7/dist-packages/cherrypy/_cpdispatch.py", line 34, in __call__ return self.callable(*self.args, **self.kwargs) File "/home/alinefm/kimchi/src/kimchi/control/base.py", line 124, in index return self.delete() File "/home/alinefm/kimchi/src/kimchi/control/base.py", line 100, in delete fn(*self.model_args) File "/home/alinefm/kimchi/src/kimchi/mockmodel.py", line 119, in vm_delete self.storagevolume_delete(disk['pool'], disk['volume']) TypeError: string indices must be integers, not str
On 02/05/2014 12:18 PM, Rodrigo Trujillo wrote:
V4: - Implements mockmodel and tests - Fix UI - Fix other minor issues
V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window
V2:
- Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!)
Rodrigo Trujillo (5): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM Storagepool SCSI/FC: Modifies mockmodel and implements tests for FC pool
docs/API.md | 5 ++- src/kimchi/API.json | 14 +++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/mockmodel.py | 53 ++++++++++++++++++++--- src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 +++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 48 ++++++++++++++++++++- src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- tests/test_rest.py | 47 ++++++++++++++++++++ tests/test_storagepool.py | 21 +++++++++ ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 73 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 5 +++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 19 files changed, 510 insertions(+), 23 deletions(-)
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel
participants (3)
-
Aline Manera
-
Rodrigo Trujillo
-
Sheldon