[PATCH V3 0/4] (WIP) Storagepool SCSI/FC

Still working in test cases and mockmodel V3: - Changed API to only receive the scsi host name when creating new pool - Changed API to require LUN when creating new VM on SCSI pool - Created feature test and removed libvirt test function - Rebased with new model structure - Added error function handlers to UIs - Fixed LUN selection window V2: - Implements Fibre Channel devices discover in the host - Allow vms_create receive a volume to create the disk (if pool is SCSI) - Create basic UI to select SCSI Host when creating SCSI FC pool - Draft of UI to select LUN to create new VM when template has a SCSI pool configured. (Need help of UI guys here!) Rodrigo Trujillo (4): Storagepool SCSI/FC: Implement node devices API backend Storagepool SCSI/FC: Backend implementation Storagepool SCSI/FC: Implement UI for FC scsi_host pool Storagepool SCSI/FC: Modifies UI flow to select a LUN to new VM docs/API.md | 5 ++- src/kimchi/API.json | 14 ++++++- src/kimchi/control/host.py | 16 +++++++ src/kimchi/featuretests.py | 27 ++++++++++++ src/kimchi/model/config.py | 5 ++- src/kimchi/model/host.py | 54 ++++++++++++++++++++++++ src/kimchi/model/libvirtstoragepool.py | 45 ++++++++++++++++++++ src/kimchi/model/storagepools.py | 22 ++++++++-- src/kimchi/model/templates.py | 5 +++ src/kimchi/model/vms.py | 25 ++++++++++- src/kimchi/vmtemplate.py | 31 +++++++++++++- ui/js/src/kimchi.api.js | 24 +++++++++++ ui/js/src/kimchi.guest_add_main.js | 72 ++++++++++++++++++++++++++++++-- ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++- ui/pages/i18n.html.tmpl | 4 ++ ui/pages/storagepool-add.html.tmpl | 12 ++++++ 16 files changed, 392 insertions(+), 15 deletions(-) -- 1.8.5.3

In order to implement support to SCSI/FC UI, it is necessary to retrieve node devices. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- src/kimchi/control/host.py | 16 ++++++++++++++ src/kimchi/model/host.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/src/kimchi/control/host.py b/src/kimchi/control/host.py index 053c822..936d298 100644 --- a/src/kimchi/control/host.py +++ b/src/kimchi/control/host.py @@ -36,6 +36,7 @@ class Host(Resource): self.shutdown = self.generate_action_handler('shutdown') self.stats = HostStats(self.model) self.partitions = Partitions(self.model) + self.devices = Devices(self.model) @property def data(self): @@ -61,3 +62,18 @@ class Partition(Resource): @property def data(self): return self.info + + +class Devices(Collection): + def __init__(self, model): + super(Devices, self).__init__(model) + self.resource = Device + + +class Device(Resource): + def __init__(self, model, id): + super(Device, self).__init__(model, id) + + @property + def data(self): + return self.info diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index d5fc124..0981aba 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -30,6 +30,7 @@ from cherrypy.process.plugins import BackgroundTask from kimchi import disks from kimchi import netinfo +from kimchi import xmlutils from kimchi.basemodel import Singleton from kimchi.exception import NotFoundError, OperationFailed from kimchi.model.vms import DOM_STATE_MAP @@ -199,3 +200,56 @@ class PartitionModel(object): raise NotFoundError("Partition %s not found in the host" % name) return disks.get_partition_details(name) + + +class DevicesModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def get_list(self, _cap=None): + conn = self.conn.get() + if _cap == None: + dev_names = [name.name() for name in conn.listAllDevices(0)] + elif _cap == 'fc_host': + dev_names = self._get_devices_fc_host() + else: + # Get devices with required capability + dev_names = conn.listDevices(_cap,0) + return dev_names + + def _get_devices_fc_host(self): + conn = self.conn.get() + # Libvirt < 1.0.5 does not support fc_host capability + if not self.fc_host_support: + ret = [] + scsi_hosts = conn.listDevices('scsi_host',0) + for host in scsi_hosts: + xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) + path = '/device/capability/capability/@type' + if 'fc_host' in xmlutils.xpath_get_text(xml, path): + ret.append(host) + return ret + return conn.listDevices('fc_host',0) + + +class DeviceModel(object): + def __init__(self, **kargs): + self.conn = kargs['conn'] + + def lookup(self, nodedev_name): + conn = self.conn.get() + try: + dev_xml = conn.nodeDeviceLookupByName(nodedev_name).XMLDesc(0) + except: + raise NotFoundError('Node device "%s" not found' % nodedev_name) + cap_type = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/@type') + wwnn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwnn') + wwpn = xmlutils.xpath_get_text( + dev_xml, '/device/capability/capability/wwpn') + return { + 'name': nodedev_name, + 'adapter_type': cap_type, + 'wwnn': wwnn, + 'wwpn': wwpn} -- 1.8.5.3

This patch creates functions that allow kimchi users to create an libvirt SCSI storagepool using the rest API. This patch creates the feature test to check fc_host capability in libvirt. This patch implements basic routines to add a disk (scsi) to a new vm template, based on given volumes (LUN name) from UI or API directly. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- docs/API.md | 5 +++- src/kimchi/API.json | 14 +++++++++-- src/kimchi/featuretests.py | 27 ++++++++++++++++++++ src/kimchi/model/config.py | 5 +++- src/kimchi/model/host.py | 4 +-- src/kimchi/model/libvirtstoragepool.py | 45 ++++++++++++++++++++++++++++++++++ src/kimchi/model/storagepools.py | 22 ++++++++++++++--- src/kimchi/model/templates.py | 5 ++++ src/kimchi/model/vms.py | 25 +++++++++++++++++-- src/kimchi/vmtemplate.py | 31 ++++++++++++++++++++++- 10 files changed, 171 insertions(+), 12 deletions(-) diff --git a/docs/API.md b/docs/API.md index 580728c..7f0628d 100644 --- a/docs/API.md +++ b/docs/API.md @@ -55,6 +55,8 @@ the following general conventions: Independent Computing Environments * null: Graphics is disabled or type not supported * listen: The network which the vnc/spice server listens on. + * volumes *(optional)*: List of Fibre channel LUN names to be assigned as + disk to VM. Required if pool is type SCSI. ### Resource: Virtual Machine @@ -269,7 +271,7 @@ A interface represents available network interface on VM. * **POST**: Create a new Storage Pool * name: The name of the Storage Pool. * type: The type of the defined Storage Pool. - Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi' + Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi, scsi' * path: The path of the defined Storage Pool. For 'kimchi-iso' pool refers to targeted deep scan path. Pool types: 'dir', 'kimchi-iso'. @@ -288,6 +290,7 @@ A interface represents available network interface on VM. Pool types: 'iscsi'. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. + * adapter_name: *(optional) Scsi host name. ### Resource: Storage Pool diff --git a/src/kimchi/API.json b/src/kimchi/API.json index 08c77c5..842fb11 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -37,7 +37,7 @@ "type": { "description": "The type of the defined Storage Pool", "type": "string", - "pattern": "^dir|netfs|logical|kimchi-iso$", + "pattern": "^dir|netfs|logical|kimchi-iso|scsi$", "required": true }, "path": { @@ -76,6 +76,10 @@ "minimum": 1, "maximum": 65535 }, + "adapter_name": { + "description": "SCSI host name", + "type": "string" + }, "auth": { "description": "Storage back-end authentication information", "type": "object", @@ -112,7 +116,13 @@ "type": "string", "pattern": "^/storagepools/[^/]+/?$" }, - "graphics": { "$ref": "#/kimchitype/graphics" } + "graphics": { "$ref": "#/kimchitype/graphics" }, + "volumes": { + "description": "list of scsi volumes to be assigned to the new VM.", + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true + } } }, "vm_update": { diff --git a/src/kimchi/featuretests.py b/src/kimchi/featuretests.py index 1557dd9..5ef3da0 100644 --- a/src/kimchi/featuretests.py +++ b/src/kimchi/featuretests.py @@ -56,6 +56,18 @@ ISO_STREAM_XML = """ </devices> </domain>""" +SCSI_FC_XML = """ +<pool type='scsi'> + <name>TEST_SCSI_FC_POOL</name> + <source> + <adapter type='fc_host' wwnn='1234567890abcdef' wwpn='abcdef1234567890'/> + </source> + <target> + <path>/dev/disk/by-path</path> + </target> +</pool> +""" + class FeatureTests(object): @@ -123,3 +135,18 @@ class FeatureTests(object): return False return True + + @staticmethod + def libvirt_support_fc_host(): + try: + conn = libvirt.open('qemu:///system') + pool = None + pool = conn.storagePoolDefineXML(SCSI_FC_XML, 0) + except libvirt.libvirtError as e: + if e.get_error_code() == 27: + # Libvirt requires adapter name, not needed when supports to FC + return False + finally: + pool is None or pool.undefine() + conn is None or conn.close() + return True diff --git a/src/kimchi/model/config.py b/src/kimchi/model/config.py index 0e66e02..6eb0e10 100644 --- a/src/kimchi/model/config.py +++ b/src/kimchi/model/config.py @@ -49,6 +49,7 @@ class CapabilitiesModel(object): self.qemu_stream = False self.qemu_stream_dns = False self.libvirt_stream_protocols = [] + self.fc_host_support = False # Subscribe function to set host capabilities to be run when cherrypy # server is up @@ -60,6 +61,7 @@ class CapabilitiesModel(object): self.qemu_stream = FeatureTests.qemu_supports_iso_stream() self.qemu_stream_dns = FeatureTests.qemu_iso_stream_dns() self.nfs_target_probe = FeatureTests.libvirt_support_nfs_probe() + self.fc_host_support = FeatureTests.libvirt_support_fc_host() self.libvirt_stream_protocols = [] for p in ['http', 'https', 'ftp', 'ftps', 'tftp']: @@ -75,7 +77,8 @@ class CapabilitiesModel(object): return {'libvirt_stream_protocols': self.libvirt_stream_protocols, 'qemu_stream': self.qemu_stream, 'screenshot': VMScreenshot.get_stream_test_result(), - 'system_report_tool': bool(report_tool)} + 'system_report_tool': bool(report_tool), + 'fc_host_support': self.fc_host_support} class DistrosModel(object): diff --git a/src/kimchi/model/host.py b/src/kimchi/model/host.py index 0981aba..d068614 100644 --- a/src/kimchi/model/host.py +++ b/src/kimchi/model/host.py @@ -218,7 +218,7 @@ class DevicesModel(object): return dev_names def _get_devices_fc_host(self): - conn = self.conn.get() + conn = self.conn.get() # Libvirt < 1.0.5 does not support fc_host capability if not self.fc_host_support: ret = [] @@ -226,7 +226,7 @@ class DevicesModel(object): for host in scsi_hosts: xml = conn.nodeDeviceLookupByName(host).XMLDesc(0) path = '/device/capability/capability/@type' - if 'fc_host' in xmlutils.xpath_get_text(xml, path): + if 'fc_host' in xmlutils.xpath_get_text(xml, path): ret.append(host) return ret return conn.listDevices('fc_host',0) diff --git a/src/kimchi/model/libvirtstoragepool.py b/src/kimchi/model/libvirtstoragepool.py index f4dbf2e..8374560 100644 --- a/src/kimchi/model/libvirtstoragepool.py +++ b/src/kimchi/model/libvirtstoragepool.py @@ -175,6 +175,51 @@ class LogicalPoolDef(StoragePoolDef): return xml +class ScsiPoolDef(StoragePoolDef): + poolType = 'scsi' + + def prepare(self, conn=None): + tmp_name = self.poolArgs['source']['name'] + self.poolArgs['source']['name'] = tmp_name.replace('scsi_','') + # fc_host adapters type are only available in libvirt >= 1.0.5 + if not self.poolArgs['fc_host_support']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ + "setting SCSI adapter type as 'scsi_host'; "\ + "ignoring wwnn and wwpn." %tmp_name + kimchi_log.info(msg) + # Path for Fibre Channel scsi hosts + self.poolArgs['path'] = '/dev/disk/by-path' + if not self.poolArgs['source']['adapter_type']: + self.poolArgs['source']['adapter_type'] = 'scsi_host' + + @property + def xml(self): + # Required parameters + # name: + # source[adapter_type]: + # source[name]: + # source[wwnn]: + # source[wwpn]: + # path: + + xml = """ + <pool type='scsi'> + <name>{name}</name> + <source> + <adapter type='{source[adapter_type]}'\ + name='{source[name]}'\ + wwnn='{source[wwnn]}'\ + wwpn='{source[wwpn]}'/> + </source> + <target> + <path>{path}</path> + </target> + </pool> + """.format(**self.poolArgs) + return xml + + class IscsiPoolDef(StoragePoolDef): poolType = 'iscsi' diff --git a/src/kimchi/model/storagepools.py b/src/kimchi/model/storagepools.py index 233a8a7..9be7dad 100644 --- a/src/kimchi/model/storagepools.py +++ b/src/kimchi/model/storagepools.py @@ -26,6 +26,8 @@ from kimchi import xmlutils from kimchi.scan import Scanner from kimchi.exception import InvalidOperation, MissingParameter from kimchi.exception import NotFoundError, OperationFailed +from kimchi.model.config import CapabilitiesModel +from kimchi.model.host import DeviceModel from kimchi.model.libvirtstoragepool import StoragePoolDef from kimchi.utils import add_task, kimchi_log @@ -38,7 +40,11 @@ POOL_STATE_MAP = {0: 'inactive', 4: 'inaccessible'} STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}} + 'path': '/pool/source/dir/@path'}, + 'scsi': {'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn'}} class StoragePoolsModel(object): @@ -47,6 +53,8 @@ class StoragePoolsModel(object): self.objstore = kargs['objstore'] self.scanner = Scanner(self._clean_scan) self.scanner.delete() + self.caps = CapabilitiesModel() + self.device = DeviceModel(**kargs) def get_list(self): try: @@ -67,6 +75,13 @@ class StoragePoolsModel(object): if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) + + if params['type'] == 'scsi': + extra_params = self.device.lookup( + params['source']['adapter_name']) + params['source'].update(extra_params) + params['fc_host_support'] = self.caps.fc_host_support + poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) xml = poolDef.xml @@ -84,9 +99,10 @@ class StoragePoolsModel(object): return name pool = conn.storagePoolDefineXML(xml, 0) - if params['type'] in ['logical', 'dir', 'netfs']: + if params['type'] in ['logical', 'dir', 'netfs', 'scsi']: pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) - # autostart dir and logical storage pool created from kimchi + # autostart dir, logical, netfs and scsi storage pools created + # from kimchi pool.setAutostart(1) else: # disable autostart for others diff --git a/src/kimchi/model/templates.py b/src/kimchi/model/templates.py index 03632a6..b004578 100644 --- a/src/kimchi/model/templates.py +++ b/src/kimchi/model/templates.py @@ -161,6 +161,11 @@ class LibvirtVMTemplate(VMTemplate): xml = pool.XMLDesc(0) return xmlutils.xpath_get_text(xml, "/pool/target/path")[0] + def _get_storage_type(self): + pool = self._storage_validate() + xml = pool.XMLDesc(0) + return xmlutils.xpath_get_text(xml, "/pool/@type")[0] + def fork_vm_storage(self, vm_uuid): # Provision storage: # TODO: Rebase on the storage API once upstream diff --git a/src/kimchi/model/vms.py b/src/kimchi/model/vms.py index e9f7753..fb77191 100644 --- a/src/kimchi/model/vms.py +++ b/src/kimchi/model/vms.py @@ -155,6 +155,11 @@ class VMsModel(object): 'diskRdKB': diskRdKB, 'diskWrKB': diskWrKB}) + def _get_volume_path(self, pool, vol): + conn = self.conn.get() + pool = conn.storagePoolLookupByName(pool) + return pool.storageVolLookupByName(vol).path() + def create(self, params): conn = self.conn.get() t_name = template_name_from_uri(params['template']) @@ -169,6 +174,7 @@ class VMsModel(object): pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri + vm_overrides['fc_host_support'] = self.caps.fc_host_support t = TemplateModel.get_template(t_name, self.objstore, self.conn, vm_overrides) @@ -177,7 +183,21 @@ class VMsModel(object): raise InvalidOperation(err) t.validate() - vol_list = t.fork_vm_storage(vm_uuid) + + # If storagepool is SCSI, volumes will be LUNs and must be passed by + # the user from UI or manually. + vol_list = [] + if t._get_storage_type() == 'scsi': + if not params.get('volumes'): + raise InvalidOperation("Volume list (LUNs names) not given.") + else: + # Get system path of the LUNs + pool = t.info['storagepool'].split('/')[-1] + for vol in params.get('volumes'): + path = self._get_volume_path(pool, vol) + vol_list.append((vol, path)) + else: + vol_list = t.fork_vm_storage(vm_uuid) # Store the icon for displaying later icon = t.info.get('icon') @@ -193,7 +213,8 @@ class VMsModel(object): xml = t.to_vm_xml(name, vm_uuid, libvirt_stream=libvirt_stream, qemu_stream_dns=self.caps.qemu_stream_dns, - graphics=graphics) + graphics=graphics, + volumes=vol_list) try: conn.defineXML(xml.encode('utf-8')) diff --git a/src/kimchi/vmtemplate.py b/src/kimchi/vmtemplate.py index 58147e3..368d0b4 100644 --- a/src/kimchi/vmtemplate.py +++ b/src/kimchi/vmtemplate.py @@ -49,6 +49,7 @@ class VMTemplate(object): """ self.name = args['name'] self.info = {} + self.fc_host_support = args.get('fc_host_support') # Identify the cdrom if present iso_distro = iso_version = 'unknown' @@ -180,6 +181,25 @@ class VMTemplate(object): graphics_xml = graphics_xml + spicevmc_xml return graphics_xml + def _get_scsi_disks_xml(self, luns): + ret = "" + # Passthrough configuration + disk_xml = """ + <disk type='volume' device='lun'> + <driver name='qemu' type='raw'/> + <source dev='%(src)s'/> + <target dev='%(dev)s' bus='scsi'/> + </disk>""" + if not self.fc_host_support: + disk_xml = disk_xml.replace('volume','block') + + # Creating disk xml for each lun passed + for index,(lun, path) in enumerate(luns): + dev = "sd%s" % string.lowercase[index] + params = {'src': path, 'dev': dev} + ret = ret + disk_xml % params + return ret + def to_volume_list(self, vm_uuid): storage_path = self._get_storage_path() ret = [] @@ -225,7 +245,6 @@ class VMTemplate(object): params = dict(self.info) params['name'] = vm_name params['uuid'] = vm_uuid - params['disks'] = self._get_disks_xml(vm_uuid) params['networks'] = self._get_networks_xml() params['qemu-namespace'] = '' params['cdroms'] = '' @@ -233,6 +252,13 @@ class VMTemplate(object): graphics = kwargs.get('graphics') params['graphics'] = self._get_graphics_xml(graphics) + # Current implementation just allows to create disk in one single + # storage pool, so we cannot mix the types (scsi volumes vs img file) + if self._get_storage_type() == 'scsi': + params['disks'] = self._get_scsi_disks_xml(kwargs.get('volumes')) + else: + params['disks'] = self._get_disks_xml(vm_uuid) + qemu_stream_dns = kwargs.get('qemu_stream_dns', False) libvirt_stream = kwargs.get('libvirt_stream', False) cdrom_xml = self._get_cdrom_xml(libvirt_stream, qemu_stream_dns) @@ -292,3 +318,6 @@ class VMTemplate(object): def _get_storage_path(self): return '' + + def _get_storage_type(self): + return '' -- 1.8.5.3

This patch modifies the storagepool add user interface in order to show all Fibre Channel scsi hosts found in the host system and let user to create a pool attached to this host (the LUNs will be the volumes). A second option to use and enable FC storages is when a LUN is assigned as a pool of FS type, hosting guest images. This second option will be implement in the future. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 11 ++++++++ ui/js/src/kimchi.storagepool_add_main.js | 46 +++++++++++++++++++++++++++++++- ui/pages/i18n.html.tmpl | 4 +++ ui/pages/storagepool-add.html.tmpl | 12 +++++++++ 4 files changed, 72 insertions(+), 1 deletion(-) diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 63ddd88..66fc41e 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -731,5 +731,16 @@ var kimchi = { success : suc, error : err }); + }, + + listFCHosts : function(suc, err) { + kimchi.requestJSON({ + url : kimchi.url + 'host/devices?_cap=fc_host', + type : 'GET', + contentType : 'application/json', + dataType : 'json', + success : suc, + error : err + }); } }; diff --git a/ui/js/src/kimchi.storagepool_add_main.js b/ui/js/src/kimchi.storagepool_add_main.js index e5922b3..1f1ec41 100644 --- a/ui/js/src/kimchi.storagepool_add_main.js +++ b/ui/js/src/kimchi.storagepool_add_main.js @@ -40,7 +40,21 @@ kimchi.initStorageAddPage = function() { label : "iSCSI", value : "iscsi" } ]; - kimchi.listHostPartitions(function(data) { + kimchi.listFCHosts(function(data){ + if (data.length > 0) { + options.push( { + label : "SCSI Fibre Channel", + value : "scsi" + }); + } + var scsiFCHtml = $('#scsiFCTmpl').html(); + var scsiFCHostListHtml = ''; + $.each(data, function(index, value) { + scsiFCHostListHtml += kimchi.template(scsiFCHtml, value); + }); + $('.scsifc-hosts').html(scsiFCHostListHtml); + + kimchi.listHostPartitions(function(data) { if (data.length > 0) { options.push({ label : "LOGICAL", @@ -107,21 +121,31 @@ kimchi.initStorageAddPage = function() { $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'netfs') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').removeClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } else if ($(this).val() === 'iscsi') { $('.path-section').addClass('tmpl-html'); $('.logical-section').addClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').removeClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); + } else if ($(this).val() === 'scsi') { + $('.path-section').addClass('tmpl-html'); + $('.logical-section').addClass('tmpl-html'); + $('.nfs-section').addClass('tmpl-html'); + $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').removeClass('tmpl-html'); } else if ($(this).val() === 'logical') { $('.path-section').addClass('tmpl-html'); $('.logical-section').removeClass('tmpl-html'); $('.nfs-section').addClass('tmpl-html'); $('.iscsi-section').addClass('tmpl-html'); + $('.scsifc-section').addClass('tmpl-html'); } }); $('#authId').click(function() { @@ -134,6 +158,10 @@ kimchi.initStorageAddPage = function() { $('#iscsiportId').keyup(function(event) { $(this).toggleClass("invalid-field",!/^[0-9]+$/.test($(this).val())); }); + }); + }, function() { + // listFCHosts error handler + kimchi.message.error(i18n['msg.kimchi.list.fchosts.fail']); }); }; @@ -154,6 +182,8 @@ kimchi.validateForm = function() { return kimchi.validateNfsForm(); } else if (poolType === "iscsi") { return kimchi.validateIscsiForm(); + } else if (poolType === "scsi") { + return kimchi.validateScsiFCForm(); } else if (poolType === "logical") { return kimchi.validateLogicalForm(); } else { @@ -204,6 +234,15 @@ kimchi.validateIscsiForm = function() { return true; }; +kimchi.validateScsiFCForm = function() { + var fcHost = $('input:radio[name=adapter_name]:checked').val(); + if (fcHost === undefined) { + kimchi.message.error(i18n['msg.validate.pool.edit.scsifchost']); + return false; + } + return true; +}; + kimchi.validateServer = function(serverField) { if ('' === serverField) { kimchi.message.error(i18n['msg.pool.edit.server.blank']); @@ -248,6 +287,11 @@ kimchi.addPool = function(event) { source.path = $('#nfspathId').val(); source.host = $('#nfsserverId').val(); formData.source = source; + } else if (poolType === 'scsi'){ + var source = {}; + source.adapter_name = formData.adapter_name; + delete formData.adapter_name; + formData.source = source; } else if (poolType === 'iscsi') { var source = {}; source.target = $('#iscsiTargetId').val(); diff --git a/ui/pages/i18n.html.tmpl b/ui/pages/i18n.html.tmpl index d63d4e9..a4c3ccb 100644 --- a/ui/pages/i18n.html.tmpl +++ b/ui/pages/i18n.html.tmpl @@ -55,6 +55,8 @@ var i18n = { 'msg.fail.template.no.iso': "$_("No iso found")", 'msg.fail.template.scan': "$_("Failed to scan")", 'msg.fail.template.distr': "$_("Failed to list iso distributions")", + 'msg.fail.template.retrieve': "$_("Failed to retrieve template")", + 'msg.kimchi.list.fchosts.fail': "$_("Failed to list Fibre Channel SCSI hosts")", 'msg.confirm.delete.title': "$_("Delete Confirmation")", 'msg.confirm': "$_("OK")", 'msg.cancel': "$_("Cancel")", @@ -100,9 +102,11 @@ var i18n = { 'msg.validate.pool.edit.path':"$_("This is not a real linux path.")", 'msg.validate.pool.edit.nfspath':"$_("Invalid nfs mount path.")", 'msg.validate.pool.edit.logical.device':"$_("No logical device selected.")", + 'msg.validate.pool.edit.scsifchost':"$_("A Fibre Channel SCSI host must be selected.")", 'msg.kimchi.storage.pool.empty':"$_("This storage pool is empty.")", 'msg.kimchi.list.volume.fail':"$_("Failed to list the storage pool.")", 'msg.kimchi.storage.pool.not.active':"$_("The storage pool is not active now.")", + 'msg.kimchi.retrieve.pool.fail': "$_("Failed to retrieve storage pool.")", 'fail.delete.template': "$_("Failed to delete template.")", 'Guests':"$_("Guests")", 'Host':"$_("Host")", diff --git a/ui/pages/storagepool-add.html.tmpl b/ui/pages/storagepool-add.html.tmpl index dac99fe..4782d15 100644 --- a/ui/pages/storagepool-add.html.tmpl +++ b/ui/pages/storagepool-add.html.tmpl @@ -104,6 +104,12 @@ <div class="host-partition"></div> </section> </div> + <div class="scsifc-section tmpl-html"> + <section class="form-section"> + <h2>3. $_("Select SCSI Fibre Channel Host")</h2> + <div class="scsifc-hosts"></div> + </section> + </div> <div class="iscsi-section tmpl-html"> <section class="form-section"> <h2>3. $_("iSCSI Server")</h2> @@ -154,5 +160,11 @@ <label for="{name}">{path}</label> </div> </script> + <script id="scsiFCTmpl" type="html/text"> + <div class="field"> + <input type="radio" value="{name}" name="adapter_name" id="fc-{name}"> + <label for="fc-{name}">{name}</label> + </div> + </script> </body> </html> -- 1.8.5.3

This patch implements the UI functions and API calls to show to user the list of volumes (LUNs) of a SCSI FC storagepools. The user can then select the LUN when creating a new VM. Signed-off-by: Rodrigo Trujillo <rodrigo.trujillo@linux.vnet.ibm.com> --- ui/js/src/kimchi.api.js | 13 +++++++ ui/js/src/kimchi.guest_add_main.js | 72 +++++++++++++++++++++++++++++++++++--- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/ui/js/src/kimchi.api.js b/ui/js/src/kimchi.api.js index 66fc41e..4597c5d 100644 --- a/ui/js/src/kimchi.api.js +++ b/ui/js/src/kimchi.api.js @@ -155,6 +155,19 @@ var kimchi = { }); }, + /* + * Retrieve the information of a storage pool by the given name. + */ + retrieveStoragePool : function(storagePoolName, suc, err) { + kimchi.requestJSON({ + url : kimchi.url + "storagepools/" + + encodeURIComponent(storagePoolName), + type : 'GET', + contentType : 'application/json', + dataType : 'json' + }).done(suc); + }, + /** * Retrieve the information of a template by the given name. */ diff --git a/ui/js/src/kimchi.guest_add_main.js b/ui/js/src/kimchi.guest_add_main.js index 2085562..66dc212 100644 --- a/ui/js/src/kimchi.guest_add_main.js +++ b/ui/js/src/kimchi.guest_add_main.js @@ -62,9 +62,7 @@ kimchi.guest_add_main = function() { } }); - var addGuest = function(event) { - var formData = $('#form-vm-add').serializeObject(); - + var addGuest = function(formData) { kimchi.createVM(formData, function() { kimchi.listVmsAuto(); kimchi.window.close(); @@ -79,8 +77,74 @@ kimchi.guest_add_main = function() { return false; }; + // This function is used to select a lun for new vm disk if template has + // a SCSI storagepool associated. + function getLun() { + var formData = $('#form-vm-add').serializeObject(); + var templateName = formData.template.substring(11); + kimchi.retrieveTemplate(templateName, function(templateInfo) { + var poolName = templateInfo.storagepool.substring(14); + kimchi.retrieveStoragePool(poolName, function(poolInfo){ + if (poolInfo.type === "scsi") { + kimchi.listStorageVolumes(poolInfo.name, function(lunsList) { + if (lunsList.length == 0) { + kimchi.message.error('There are not volumes for this pool'); + return false; + } + var popUpList = '<div class="section-container">' + + '<div class="section-header">1. Storage Pool: ' + poolInfo.name + '</div>' + + '<div class="text-help">'; + $.each(lunsList, function(index, value) { + popUpList += '<div class="input-container">' + + '<input type="radio" id="lun-' + value.name + '" name="lun" value="' + value.name + '">' + + '<label for="lun-' + value.name + '">' + value.name + '</label></div>'; + }); + popUpList += '</div></div>'; + console.log(popUpList) + var popup = $(popUpList); + popup.dialog({ + autoOpen : true, + modal : true, + width : 400, + draggable : false, + resizable : false, + closeText: "X", + dialogClass : "network-ui-dialog", + title: "Please, select a LUN", + close: function( event, ui ) { $('input[name=lun]').attr('checked',false); }, + buttons : [ { + text : "Select", + click : function() { + var lunName = $('input:radio[name=lun]:checked').val(); + if (lunName === undefined) { + kimchi.message.error('You must select a LUN'); + } else { + formData.volumes = new Array(lunName); + addGuest(formData); + } + $( this ).dialog( "close" ); + } + }] + }); + },function() { + // listStorageVolumes error handler + kimchi.message.error(i18n['msg.kimchi.list.volume.fail']); + }); + } + else { addGuest(formData); } + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.kimchi.retrieve.pool.fail']); + }); + }, function() { + // retrieveStoragePool error handler + kimchi.message.error(i18n['msg.fail.template.retrieve']); + }); + return false; + } + $('#form-vm-add').on('submit', addGuest); - $('#vm-doAdd').on('click', addGuest); + $('#vm-doAdd').on('click', getLun); showTemplates(); }; -- 1.8.5.3
participants (1)
-
Rodrigo Trujillo