
Patch seems ok, just some comments below On 11/11/2015 04:41 PM, Aline Manera wrote:
Signed-off-by: Aline Manera <alinefm@linux.vnet.ibm.com> Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> --- src/wok/plugins/kimchi/API.json | 5 + src/wok/plugins/kimchi/control/host.py | 22 ++++ src/wok/plugins/kimchi/disks.py | 123 +++++++++++++++++++++ src/wok/plugins/kimchi/docs/API.md | 25 ++++- src/wok/plugins/kimchi/i18n.py | 4 + src/wok/plugins/kimchi/mockmodel.py | 27 +++++ src/wok/plugins/kimchi/model/host.py | 29 +++++ src/wok/plugins/kimchi/model/libvirtstoragepool.py | 9 +- src/wok/plugins/kimchi/model/storagepools.py | 30 +++-- .../plugins/kimchi/tests/test_mock_storagepool.py | 8 +- 10 files changed, 265 insertions(+), 17 deletions(-)
diff --git a/src/wok/plugins/kimchi/API.json b/src/wok/plugins/kimchi/API.json index 961f35f..7daa2a9 100644 --- a/src/wok/plugins/kimchi/API.json +++ b/src/wok/plugins/kimchi/API.json @@ -141,6 +141,11 @@ "error": "KCHPOOL0025E" } } + }, + "from_vg": { + "description": "Use an existing VG or create a new one", This description is strange, please, check what is written in API.MD + "type": "boolean", + "error": "KCHPOOL0026E" } } } diff --git a/src/wok/plugins/kimchi/control/host.py b/src/wok/plugins/kimchi/control/host.py index 9bc703a..39df0d0 100644 --- a/src/wok/plugins/kimchi/control/host.py +++ b/src/wok/plugins/kimchi/control/host.py @@ -35,12 +35,34 @@ class Host(Resource): self.devices = Devices(self.model) self.cpuinfo = CPUInfo(self.model) self.partitions = Partitions(self.model) + self.vgs = VolumeGroups(self.model)
@property def data(self): return {}
+class VolumeGroups(Collection): + def __init__(self, model): + super(VolumeGroups, self).__init__(model) + self.role_key = 'host' + self.uri_fmt = "/host/vgs" + self.admin_methods = ['GET'] + self.resource = VolumeGroup + + +class VolumeGroup(Resource): + def __init__(self, model, id=None): + super(VolumeGroup, self).__init__(model, id) + self.role_key = 'host' + self.uri_fmt = "/host/vgs/%s" + self.admin_methods = ['GET'] + + @property + def data(self): + return self.info + + class VMHolders(SimpleCollection): def __init__(self, model, device_id): super(VMHolders, self).__init__(model) diff --git a/src/wok/plugins/kimchi/disks.py b/src/wok/plugins/kimchi/disks.py index eb40e3a..0ac792e 100644 --- a/src/wok/plugins/kimchi/disks.py +++ b/src/wok/plugins/kimchi/disks.py @@ -194,3 +194,126 @@ def get_partition_details(name): dev['path'] = dev_path dev['name'] = name return dev + + +def vgs(): + """ + lists all volume groups in the system. All size units are in bytes. + + [{'vgname': 'vgtest', 'size': 999653638144L, 'free': 0}] + """ + cmd = ['vgs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'vg_name,vg_size,vg_free'] + + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + out, err = proc.communicate() + + if proc.returncode != 0: + raise OperationFailed("KCHDISKS0003E", {'err': err}) + + if not out: + return [] + + # remove blank spaces and create a list of VGs + vgs = map(lambda v: v.strip(), out.strip('\n').split('\n')) + + # create a dict based on data retrieved from vgs + return map(lambda l: {'vgname': l[0], + 'size': long(l[1]), + 'free': long(l[2])}, + [fields.split() for fields in vgs]) + + +def lvs(vgname=None): + """ + lists all logical volumes found in the system. It can be filtered by + the volume group. All size units are in bytes. + + [{'lvname': 'lva', 'path': '/dev/vgtest/lva', 'size': 12345L}, + {'lvname': 'lvb', 'path': '/dev/vgtest/lvb', 'size': 12345L}] + """ + cmd = ['lvs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'lv_name,lv_path,lv_size,vg_name'] + + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + out, err = proc.communicate() + + if proc.returncode != 0: + raise OperationFailed("KCHDISKS0003E", {'err': err}) + + if not out: + return [] + + # remove blank spaces and create a list of LVs filtered by vgname, if + # provided + lvs = filter(lambda f: vgname is None or vgname in f, + map(lambda v: v.strip(), out.strip('\n').split('\n'))) + + # create a dict based on data retrieved from lvs + return map(lambda l: {'lvname': l[0], + 'path': l[1], + 'size': long(l[2])}, + [fields.split() for fields in lvs]) + + +def pvs(vgname=None): + """ + lists all physical volumes in the system. It can be filtered by the + volume group. All size units are in bytes. + + [{'pvname': '/dev/sda3', + 'size': 469502001152L, + 'uuid': 'kkon5B-vnFI-eKHn-I5cG-Hj0C-uGx0-xqZrXI'}, + {'pvname': '/dev/sda2', + 'size': 21470642176L, + 'uuid': 'CyBzhK-cQFl-gWqr-fyWC-A50Y-LMxu-iHiJq4'}] + """ + cmd = ['pvs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'pv_name,pv_size,pv_uuid,vg_name'] + + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + out, err = proc.communicate() + + if proc.returncode != 0: + raise OperationFailed("KCHDISKS0003E", {'err': err}) + + if not out: + return [] + + # remove blank spaces and create a list of PVs filtered by vgname, if + # provided + pvs = filter(lambda f: vgname is None or vgname in f, + map(lambda v: v.strip(), out.strip('\n').split('\n'))) + + # create a dict based on data retrieved from pvs + return map(lambda l: {'pvname': l[0], + 'size': long(l[1]), + 'uuid': l[2]}, + [fields.split() for fields in pvs]) diff --git a/src/wok/plugins/kimchi/docs/API.md b/src/wok/plugins/kimchi/docs/API.md index 52368b7..14f014d 100644 --- a/src/wok/plugins/kimchi/docs/API.md +++ b/src/wok/plugins/kimchi/docs/API.md @@ -444,7 +444,8 @@ A interface represents available network interface on VM.
* **GET**: Retrieve a summarized list of all defined Storage Pools * **POST**: Create a new Storage Pool - * name: The name of the Storage Pool. + * name: The name of the Storage Pool. It corresponds to the VG name while + creating a logical storage pool from an existing VG. * type: The type of the defined Storage Pool. Supported types: 'dir', 'kimchi-iso', 'netfs', 'logical', 'iscsi', 'scsi' * path: The path of the defined Storage Pool. @@ -466,6 +467,8 @@ A interface represents available network interface on VM. * username: Login username of the iSCSI target. * password: Login password of the iSCSI target. * adapter_name: SCSI host name. + * from_vg: Indicate if a logical pool will be created from an + existing VG or not.
### Resource: Storage Pool
@@ -897,6 +900,26 @@ List of available groups. Otherwise blank. * available: false, if the partition is in use by system; true, otherwise.
+### Collection: Volume Groups + +**URI:** /plugins/kimchi/host/vgs + +**Methods:** + +* **GET**: Retrieves a detailed list of all volume groups in the host. + +### Resource: Volume Group + +**URI:** /plugins/kimchi/host/vgs/*:name* + +**Methods:** + +* **GET**: Retrieve the description of a single Volume Group: + * name: The volume group name. Used to identify it in this API. + * lvs: The logical volumes associated to this volume group. + * pvs: The phisical volumes associated to this volume group. + * free: Amount of free space in the volume group. + * size: Total size of the volume group.
### Collection: Peers
diff --git a/src/wok/plugins/kimchi/i18n.py b/src/wok/plugins/kimchi/i18n.py index 59b61de..a4092e7 100644 --- a/src/wok/plugins/kimchi/i18n.py +++ b/src/wok/plugins/kimchi/i18n.py @@ -29,6 +29,7 @@ messages = {
"KCHDISKS0001E": _("Error while getting block devices. Details: %(err)s"), "KCHDISKS0002E": _("Error while getting block device information for %(device)s."), + "KCHDISKS0003E": _("Unable to retrieve LVM information. Details: %(err)s"),
Not a big issue, but why not KCHLVMXXX ?
"KCHPART0001E": _("Partition %(name)s does not exist in the host"),
@@ -199,6 +200,7 @@ messages = { "KCHPOOL0035E": _("Unable to delete pool %(name)s as it is associated with some templates"), "KCHPOOL0036E": _("A volume group named '%(name)s' already exists. Please, choose another name to create the logical pool."), "KCHPOOL0037E": _("Unable to update database with deep scan information due error: %(err)s"), + "KCHPOOL0038E": _("No volume group '%(name)s' found. Please, specify an existing volume group to create the logical pool from."),
"KCHVOL0001E": _("Storage volume %(name)s already exists"), "KCHVOL0002E": _("Storage volume %(name)s does not exist in storage pool %(pool)s"), @@ -294,4 +296,6 @@ messages = { "KCHCPUINF0002E": _("Invalid vCPU/topology combination."), "KCHCPUINF0003E": _("This host (or current configuration) does not allow CPU topology."),
+ "KCHLVMS0001E": _("Invalid volume group name parameter: %(name)s."), + } diff --git a/src/wok/plugins/kimchi/mockmodel.py b/src/wok/plugins/kimchi/mockmodel.py index 895401e..9186f78 100644 --- a/src/wok/plugins/kimchi/mockmodel.py +++ b/src/wok/plugins/kimchi/mockmodel.py @@ -39,6 +39,7 @@ from wok.plugins.kimchi.model.libvirtstoragepool import NetfsPoolDef from wok.plugins.kimchi.model.libvirtstoragepool import StoragePoolDef from wok.plugins.kimchi.model.model import Model from wok.plugins.kimchi.model.storagepools import StoragePoolModel +from wok.plugins.kimchi.model.storagepools import StoragePoolsModel from wok.plugins.kimchi.model.storagevolumes import StorageVolumeModel from wok.plugins.kimchi.model.storagevolumes import StorageVolumesModel from wok.plugins.kimchi.model import storagevolumes @@ -73,6 +74,7 @@ class MockModel(Model): defaults.update(mockmodel_defaults) osinfo.defaults = dict(defaults)
+ self._mock_vgs = MockVolumeGroups() self._mock_partitions = MockPartitions() self._mock_devices = MockDevices() self._mock_storagevolumes = MockStorageVolumes() @@ -113,6 +115,7 @@ class MockModel(Model): setattr(self, m, mock_method)
DeviceModel.lookup = self._mock_device_lookup + StoragePoolsModel._check_lvm = self._check_lvm StoragePoolModel._update_lvm_disks = self._update_lvm_disks StorageVolumesModel.get_list = self._mock_storagevolumes_get_list StorageVolumeModel.doUpload = self._mock_storagevolume_doUpload @@ -252,6 +255,10 @@ class MockModel(Model):
return MockModel._libvirt_get_vol_path(pool, vol)
+ def _check_lvm(self, name, from_vg): + # do not do any verification while using MockModel + pass + def _update_lvm_disks(self, pool_name, disks): conn = self.conn.get() pool = conn.storagePoolLookupByName(pool_name.encode('utf-8')) @@ -334,6 +341,12 @@ class MockModel(Model): def _mock_partition_lookup(self, name): return self._mock_partitions.partitions[name]
+ def _mock_volumegroups_get_list(self): + return self._mock_vgs.data.keys() + + def _mock_volumegroup_lookup(self, name): + return self._mock_vgs.data[name] + def _mock_vm_clone(self, name): new_name = get_next_clone_name(self.vms_get_list(), name) snapshots = MockModel._mock_snapshots.get(name, []) @@ -418,6 +431,20 @@ class MockStorageVolumes(object): 'isvalid': True}}
+class MockVolumeGroups(object): + def __init__(self): + self.data = {"hostVG": {"lvs": [], + "name": "hostVG", + "pvs": ["/dev/vdx"], + "free": 5347737600, + "size": 5347737600}, + "kimchiVG": {"lvs": [], + "name": "kimchiVG", + "pvs": ["/dev/vdz", "/dev/vdw"], + "free": 10695475200, + "size": 10695475200}} + + class MockPartitions(object): def __init__(self): self.partitions = {"vdx": {"available": True, "name": "vdx", diff --git a/src/wok/plugins/kimchi/model/host.py b/src/wok/plugins/kimchi/model/host.py index 96f4bea..4a37dc8 100644 --- a/src/wok/plugins/kimchi/model/host.py +++ b/src/wok/plugins/kimchi/model/host.py @@ -241,3 +241,32 @@ class PartitionModel(object):
def lookup(self, name): return disks.get_partition_details(name) + + +class VolumeGroupsModel(object): + def __init__(self, **kargs): + pass + + def get_list(self): + if not disks.vgs(): + return []
The if above is not necessary
+ return [vg['vgname'] for vg in disks.vgs()] + + +class VolumeGroupModel(object): + def __init__(self, **kargs): + pass + + def lookup(self, name): + def _format(vg): + return {'name': vg['vgname'], + 'size': vg['size'], + 'free': vg['free'], + 'pvs': [pv['pvname'] for pv in disks.pvs(vg['vgname'])], + 'lvs': [lv['lvname'] for lv in disks.lvs(vg['vgname'])]} + + vgs = [_format(vg) for vg in disks.vgs() if vg['vgname'] == name] + if not vgs: + raise InvalidParameter("KCHLVMS0001E", {'name': name}) + + return vgs[0] diff --git a/src/wok/plugins/kimchi/model/libvirtstoragepool.py b/src/wok/plugins/kimchi/model/libvirtstoragepool.py index 5da0b3b..0fa8ce0 100644 --- a/src/wok/plugins/kimchi/model/libvirtstoragepool.py +++ b/src/wok/plugins/kimchi/model/libvirtstoragepool.py @@ -144,11 +144,12 @@ class LogicalPoolDef(StoragePoolDef): pool = E.pool(type='logical') pool.append(E.name(self.poolArgs['name']))
- source = E.source() - for device_path in self.poolArgs['source']['devices']: - source.append(E.device(path=device_path)) + if not self.poolArgs['source'].get('from_vg', False): + source = E.source() + for device_path in self.poolArgs['source']['devices']: + source.append(E.device(path=device_path)) + pool.append(source)
- pool.append(source) pool.append(E.target(E.path(self.path))) return ET.tostring(pool, encoding='unicode', pretty_print=True)
diff --git a/src/wok/plugins/kimchi/model/storagepools.py b/src/wok/plugins/kimchi/model/storagepools.py index cc0bc7c..ec866c5 100644 --- a/src/wok/plugins/kimchi/model/storagepools.py +++ b/src/wok/plugins/kimchi/model/storagepools.py @@ -142,9 +142,27 @@ class StoragePoolsModel(object): raise OperationFailed("KCHPOOL0006E", {'err': e.get_error_message()})
+ def _check_lvm(self, name, from_vg): + vgdisplay_cmd = ['vgdisplay', name.encode('utf-8')] + output, error, returncode = run_command(vgdisplay_cmd) + # From vgdisplay error codes: + # 1 error reading VGDA + # 2 volume group doesn't exist + # 3 not all physical volumes of volume group online + # 4 volume group not found + # 5 no volume groups found at all + # 6 error reading VGDA from lvmtab + if from_vg and returncode in [2, 4, 5]: + raise InvalidOperation("KCHPOOL0038E", {'name': name}) + + if not from_vg and returncode not in [2, 4, 5]: + raise InvalidOperation("KCHPOOL0036E", {'name': name}) + def create(self, params): task_id = None conn = self.conn.get() + from_vg = params.get('source', {}).get('from_vg', False) + try: name = params['name'] if name == ISO_POOL_NAME: @@ -154,17 +172,7 @@ class StoragePoolsModel(object): # used before but a volume group will already exist with this name # So check the volume group does not exist to create the pool if params['type'] == 'logical': - vgdisplay_cmd = ['vgdisplay', name.encode('utf-8')] - output, error, returncode = run_command(vgdisplay_cmd) - # From vgdisplay error codes: - # 1 error reading VGDA - # 2 volume group doesn't exist - # 3 not all physical volumes of volume group online - # 4 volume group not found - # 5 no volume groups found at all - # 6 error reading VGDA from lvmtab - if returncode not in [2, 4, 5]: - raise InvalidOperation("KCHPOOL0036E", {'name': name}) + self._check_lvm(name, from_vg)
if params['type'] == 'kimchi-iso': task_id = self._do_deep_scan(params) diff --git a/src/wok/plugins/kimchi/tests/test_mock_storagepool.py b/src/wok/plugins/kimchi/tests/test_mock_storagepool.py index ea9843b..a2609eb 100644 --- a/src/wok/plugins/kimchi/tests/test_mock_storagepool.py +++ b/src/wok/plugins/kimchi/tests/test_mock_storagepool.py @@ -65,6 +65,10 @@ class MockStoragepoolTests(unittest.TestCase): )
def test_storagepool(self): + # MockModel always returns 2 VGs (hostVG, kimchiVG) + vgs = json.loads(self.request('/plugins/kimchi/host/vgs').read()) + vg_names = [vg['name'] for vg in vgs] + # MockModel always returns 2 partitions (vdx, vdz) partitions = json.loads( self.request('/plugins/kimchi/host/partitions').read() @@ -89,7 +93,9 @@ class MockStoragepoolTests(unittest.TestCase): 'source': {'host': '127.0.0.1', 'target': 'iqn.2015-01.localhost.kimchiUnitTest'}}, {'type': 'logical', 'name': u'kīмсhīUnitTestLogicalPool', - 'source': {'devices': [devs[0]]}}] + 'source': {'devices': [devs[0]]}}, + {'type': 'logical', 'name': vg_names[0], + 'source': {'from_vg': True}}]
def _do_test(params): name = params['name']