[PATCHv3 0/5] Add volume reference count

From: Royce Lv <lvroyce@linux.vnet.ibm.com> Available volumes will be tracked by volume reference count. Add this field to storage volume, so that storage volume/pool action validation can rely on it. Royce Lv (5): Fix vm disk path when it does not have source element Add volume ref_cnt: update api.md Add volume ref_cnt: Update controller and json schema Add volume ref_cnt: Add model and mockmodel implementation Add volume ref_cnt: Update test docs/API.md | 3 +++ src/kimchi/API.json | 24 ++++++++++++++++++++++++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/i18n.py | 3 +++ src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ src/kimchi/model/vmstorages.py | 23 +++++++++++++---------- tests/test_model.py | 4 ++++ tests/test_rest.py | 2 ++ 9 files changed, 84 insertions(+), 10 deletions(-) -- 1.8.1.2

From: Royce Lv <lvroyce@linux.vnet.ibm.com> When vm disk does not have source element, objectify library will error with 'no such element: source' Trap this exception. Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/model/vmstorages.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/kimchi/model/vmstorages.py b/src/kimchi/model/vmstorages.py index 7796f30..44b49df 100644 --- a/src/kimchi/model/vmstorages.py +++ b/src/kimchi/model/vmstorages.py @@ -167,17 +167,20 @@ class VMStorageModel(object): if disk is None: raise NotFoundError("KCHCDROM0007E", {'dev_name': dev_name, 'vm_name': vm_name}) - source = disk.source path = "" - if source is not None: - src_type = disk.attrib['type'] - if src_type == 'network': - host = source.host - path = (source.attrib['protocol'] + '://' + - host.attrib['name'] + ':' + - host.attrib['port'] + source.attrib['name']) - else: - path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + try: + source = disk.source + if source is not None: + src_type = disk.attrib['type'] + if src_type == 'network': + host = source.host + path = (source.attrib['protocol'] + '://' + + host.attrib['name'] + ':' + + host.attrib['port'] + source.attrib['name']) + else: + path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + except: + pass dev_type = disk.attrib['device'] return {'dev': dev_name, 'type': dev_type, -- 1.8.1.2

On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
When vm disk does not have source element, objectify library will error with 'no such element: source' Trap this exception.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/model/vmstorages.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/src/kimchi/model/vmstorages.py b/src/kimchi/model/vmstorages.py index 7796f30..44b49df 100644 --- a/src/kimchi/model/vmstorages.py +++ b/src/kimchi/model/vmstorages.py @@ -167,17 +167,20 @@ class VMStorageModel(object): if disk is None: raise NotFoundError("KCHCDROM0007E", {'dev_name': dev_name, 'vm_name': vm_name}) - source = disk.source path = "" - if source is not None: - src_type = disk.attrib['type'] - if src_type == 'network': - host = source.host - path = (source.attrib['protocol'] + '://' + - host.attrib['name'] + ':' + - host.attrib['port'] + source.attrib['name']) - else: - path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + try: + source = disk.source + if source is not None: + src_type = disk.attrib['type'] + if src_type == 'network': + host = source.host + path = (source.attrib['protocol'] + '://' + + host.attrib['name'] + ':' + + host.attrib['port'] + source.attrib['name']) + else: + path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + except: + pass dev_type = disk.attrib['device'] return {'dev': dev_name, 'type': dev_type,
Q. When is there no 'source' element? And independent on that, we still need to return the 'path' in addition to 'dev' and 'type'

On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
When vm disk does not have source element, objectify library will error with 'no such element: source' Trap this exception.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/model/vmstorages.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/src/kimchi/model/vmstorages.py b/src/kimchi/model/vmstorages.py index 7796f30..44b49df 100644 --- a/src/kimchi/model/vmstorages.py +++ b/src/kimchi/model/vmstorages.py @@ -167,17 +167,20 @@ class VMStorageModel(object): if disk is None: raise NotFoundError("KCHCDROM0007E", {'dev_name': dev_name, 'vm_name': vm_name}) - source = disk.source path = "" - if source is not None: - src_type = disk.attrib['type'] - if src_type == 'network': - host = source.host - path = (source.attrib['protocol'] + '://' + - host.attrib['name'] + ':' + - host.attrib['port'] + source.attrib['name']) - else: - path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + try: + source = disk.source + if source is not None: + src_type = disk.attrib['type'] + if src_type == 'network': + host = source.host + path = (source.attrib['protocol'] + '://' + + host.attrib['name'] + ':' + + host.attrib['port'] + source.attrib['name']) + else: + path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] + except: + pass dev_type = disk.attrib['device'] return {'dev': dev_name, 'type': dev_type,
Q. When is there no 'source' element? I have a vm in virtmanager, which disconnect the cdrom(media-ejected)
On 2014年02月26日 01:40, Aline Manera wrote: this does not have any source and run the previous code, error raise.
And independent on that, we still need to return the 'path' in addition to 'dev' and 'type'
ACK

From: Royce Lv <lvroyce@linux.vnet.ibm.com> Update doc to support storage volume reference count. Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- docs/API.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/API.md b/docs/API.md index e97eace..e3f9ae1 100644 --- a/docs/API.md +++ b/docs/API.md @@ -395,6 +395,9 @@ A interface represents available network interface on VM. * os_distro *(optional)*: os distribution of the volume, for iso volume only. * os_version *(optional)*: os version of the volume, for iso volume only. * bootable *(optional)*: True if iso image is bootable and not corrupted. + * ref_cnt: Number of vms which used this volume, + 0 for volumes which are available for attachment. + >1 indicate number of vms used this volume. * **DELETE**: Remove the Storage Volume * **POST**: *See Storage Volume Actions* -- 1.8.1.2

On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Update doc to support storage volume reference count.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- docs/API.md | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/docs/API.md b/docs/API.md index e97eace..e3f9ae1 100644 --- a/docs/API.md +++ b/docs/API.md @@ -395,6 +395,9 @@ A interface represents available network interface on VM. * os_distro *(optional)*: os distribution of the volume, for iso volume only. * os_version *(optional)*: os version of the volume, for iso volume only. * bootable *(optional)*: True if iso image is bootable and not corrupted. + * ref_cnt: Number of vms which used this volume,
+ 0 for volumes which are available for attachment. + >1 indicate number of vms used this volume.
Is the number of vms important? Why not only true or false? in_use: True|False
* **DELETE**: Remove the Storage Volume * **POST**: *See Storage Volume Actions*

On 2014年02月26日 01:46, Aline Manera wrote:
On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Update doc to support storage volume reference count.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- docs/API.md | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/docs/API.md b/docs/API.md index e97eace..e3f9ae1 100644 --- a/docs/API.md +++ b/docs/API.md @@ -395,6 +395,9 @@ A interface represents available network interface on VM. * os_distro *(optional)*: os distribution of the volume, for iso volume only. * os_version *(optional)*: os version of the volume, for iso volume only. * bootable *(optional)*: True if iso image is bootable and not corrupted. + * ref_cnt: Number of vms which used this volume,
+ 0 for volumes which are available for attachment. + >1 indicate number of vms used this volume.
Is the number of vms important? Why not only true or false?
in_use: True|False In the future, if we are going to support share disk, the ref_cnt can be more than 2, in use is not able to track this.
* **DELETE**: Remove the Storage Volume * **POST**: *See Storage Volume Actions*

From: Royce Lv <lvroyce@linux.vnet.ibm.com> Add ref_cnt to controller to report reference count information. Update json schema validation. Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/API.json | 24 ++++++++++++++++++++++++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/i18n.py | 3 +++ 3 files changed, 28 insertions(+) diff --git a/src/kimchi/API.json b/src/kimchi/API.json index f595bbf..4afdebf 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -143,6 +143,30 @@ } } }, + "storagevolumes_create": { + "type": "object", + "properties": { + "name": { + "description": "The name of the Storage Volume", + "type": "string", + "minLength": 1, + "required": true, + "error": "KCHVOL0012E" + }, + "allocation": { + "description": "The size(MiB) of allocation when create the storage volume", + "type": "number", + "minimum": 1, + "error": "KCHVOL0013E" + }, + "format": { + "description": "The format of the volume", + "type": "string", + "pattern": "^qcow2|raw$", + "error": "KCHVOL0014E" + } + } + }, "vms_create": { "type": "object", "error": "KCHVM0016E", diff --git a/src/kimchi/control/storagevolumes.py b/src/kimchi/control/storagevolumes.py index cd15bcc..b05a5a8 100644 --- a/src/kimchi/control/storagevolumes.py +++ b/src/kimchi/control/storagevolumes.py @@ -55,6 +55,7 @@ class StorageVolume(Resource): 'capacity': self.info['capacity'], 'allocation': self.info['allocation'], 'path': self.info['path'], + 'ref_cnt': self.info['ref_cnt'], 'format': self.info['format']} for key in ('os_version', 'os_distro', 'bootable'): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index fea0184..d59a87c 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -147,6 +147,9 @@ messages = { "KCHVOL0009E": _("Unable to wipe storage volumes %(name)s. Details: %(err)s"), "KCHVOL0010E": _("Unable to delete storage volume %(name)s. Details: %(err)s"), "KCHVOL0011E": _("Unable to resize storage volume %(name)s. Details: %(err)s"), + "KCHVOL0012E": _("Storage volume name must be a string"), + "KCHVOL0013E": _("Storage volume allocation must be an integer number"), + "KCHVOL0014E": _("Storage volume format not supported"), "KCHIFACE0001E": _("Interface %(name)s does not exist"), -- 1.8.1.2

On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Add ref_cnt to controller to report reference count information. Update json schema validation.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/API.json | 24 ++++++++++++++++++++++++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/i18n.py | 3 +++ 3 files changed, 28 insertions(+)
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index f595bbf..4afdebf 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -143,6 +143,30 @@ } } }, + "storagevolumes_create": { + "type": "object",
Missing 'error' parameter. In the outer 'error' schema we should list the required parameters.
+ "properties": { + "name": { + "description": "The name of the Storage Volume", + "type": "string", + "minLength": 1, + "required": true, + "error": "KCHVOL0012E" + }, + "allocation": { + "description": "The size(MiB) of allocation when create the storage volume", + "type": "number", + "minimum": 1, + "error": "KCHVOL0013E" + }, + "format": { + "description": "The format of the volume", + "type": "string", + "pattern": "^qcow2|raw$", + "error": "KCHVOL0014E" + } + } + }, "vms_create": { "type": "object", "error": "KCHVM0016E", diff --git a/src/kimchi/control/storagevolumes.py b/src/kimchi/control/storagevolumes.py index cd15bcc..b05a5a8 100644 --- a/src/kimchi/control/storagevolumes.py +++ b/src/kimchi/control/storagevolumes.py @@ -55,6 +55,7 @@ class StorageVolume(Resource): 'capacity': self.info['capacity'], 'allocation': self.info['allocation'], 'path': self.info['path'], + 'ref_cnt': self.info['ref_cnt'], 'format': self.info['format']}
for key in ('os_version', 'os_distro', 'bootable'): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index fea0184..d59a87c 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -147,6 +147,9 @@ messages = { "KCHVOL0009E": _("Unable to wipe storage volumes %(name)s. Details: %(err)s"), "KCHVOL0010E": _("Unable to delete storage volume %(name)s. Details: %(err)s"), "KCHVOL0011E": _("Unable to resize storage volume %(name)s. Details: %(err)s"), + "KCHVOL0012E": _("Storage volume name must be a string"), + "KCHVOL0013E": _("Storage volume allocation must be an integer number"), + "KCHVOL0014E": _("Storage volume format not supported"),
"KCHIFACE0001E": _("Interface %(name)s does not exist"),

On 2014年02月26日 01:42, Aline Manera wrote:
On 02/25/2014 05:35 AM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Add ref_cnt to controller to report reference count information. Update json schema validation.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/API.json | 24 ++++++++++++++++++++++++ src/kimchi/control/storagevolumes.py | 1 + src/kimchi/i18n.py | 3 +++ 3 files changed, 28 insertions(+)
diff --git a/src/kimchi/API.json b/src/kimchi/API.json index f595bbf..4afdebf 100644 --- a/src/kimchi/API.json +++ b/src/kimchi/API.json @@ -143,6 +143,30 @@ } } }, + "storagevolumes_create": { + "type": "object",
Missing 'error' parameter. In the outer 'error' schema we should list the required parameters. ACK
+ "properties": { + "name": { + "description": "The name of the Storage Volume", + "type": "string", + "minLength": 1, + "required": true, + "error": "KCHVOL0012E" + }, + "allocation": { + "description": "The size(MiB) of allocation when create the storage volume", + "type": "number", + "minimum": 1, + "error": "KCHVOL0013E" + }, + "format": { + "description": "The format of the volume", + "type": "string", + "pattern": "^qcow2|raw$", + "error": "KCHVOL0014E" + } + } + }, "vms_create": { "type": "object", "error": "KCHVM0016E", diff --git a/src/kimchi/control/storagevolumes.py b/src/kimchi/control/storagevolumes.py index cd15bcc..b05a5a8 100644 --- a/src/kimchi/control/storagevolumes.py +++ b/src/kimchi/control/storagevolumes.py @@ -55,6 +55,7 @@ class StorageVolume(Resource): 'capacity': self.info['capacity'], 'allocation': self.info['allocation'], 'path': self.info['path'], + 'ref_cnt': self.info['ref_cnt'], 'format': self.info['format']}
for key in ('os_version', 'os_distro', 'bootable'): diff --git a/src/kimchi/i18n.py b/src/kimchi/i18n.py index fea0184..d59a87c 100644 --- a/src/kimchi/i18n.py +++ b/src/kimchi/i18n.py @@ -147,6 +147,9 @@ messages = { "KCHVOL0009E": _("Unable to wipe storage volumes %(name)s. Details: %(err)s"), "KCHVOL0010E": _("Unable to delete storage volume %(name)s. Details: %(err)s"), "KCHVOL0011E": _("Unable to resize storage volume %(name)s. Details: %(err)s"), + "KCHVOL0012E": _("Storage volume name must be a string"), + "KCHVOL0013E": _("Storage volume allocation must be an integer number"), + "KCHVOL0014E": _("Storage volume format not supported"),
"KCHIFACE0001E": _("Interface %(name)s does not exist"),

From: Royce Lv <lvroyce@linux.vnet.ibm.com> Set ref_cnt to 0 when new volume created, if the ref_cnt cannot be found, which means it is created outside of kimchi scope, report it when lookup storage volume, search info about storage volumes used by vms and count the ref_cnt. Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index b50bf31..7620d38 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -423,6 +423,7 @@ class MockModel(object): name = params['name'] volume = MockStorageVolume(pool, name, params) volume.info['type'] = params['type'] + volume.info['ref_cnt'] = params.get('ref_cnt', 0) volume.info['format'] = params['format'] volume.info['path'] = os.path.join( pool.info['path'], name) @@ -884,6 +885,7 @@ class MockVMTemplate(VMTemplate): disk_paths = [] for vol_info in volumes: vol_info['capacity'] = vol_info['capacity'] << 10 + vol_info['ref_cnt'] = 1 self.model.storagevolumes_create(pool.name, vol_info) disk_paths.append({'pool': pool.name, 'volume': vol_info['name']}) return disk_paths @@ -1002,6 +1004,7 @@ class MockStorageVolume(object): 'capacity': capacity << 20, 'allocation': params.get('allocation', '512'), 'path': params.get('path'), + 'ref_cnt': params.get('ref_cnt'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 20c65b9..9906f33 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -29,6 +29,8 @@ from kimchi.exception import InvalidOperation, IsoFormatError from kimchi.exception import MissingParameter, NotFoundError, OperationFailed from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel +from kimchi.model.vms import VMsModel +from kimchi.model.vmstorages import VMStoragesModel, VMStorageModel VOLUME_TYPE_MAP = {0: 'file', @@ -40,6 +42,7 @@ VOLUME_TYPE_MAP = {0: 'file', class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore'] def create(self, pool, params): vol_xml = """ @@ -58,6 +61,7 @@ class StorageVolumesModel(object): params.setdefault('format', 'qcow2') name = params['name'] + vol_id = '%s:%s' % (pool, name) try: pool = StoragePoolModel.get_storagepool(pool, self.conn) xml = vol_xml % params @@ -71,6 +75,10 @@ class StorageVolumesModel(object): raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) + + with self.objstore as session: + session.store('storagevolume', vol_id, {'ref_cnt': 0}) + return name def get_list(self, pool_name): @@ -89,6 +97,7 @@ class StorageVolumesModel(object): class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore'] def _get_storagevolume(self, pool, name): pool = StoragePoolModel.get_storagepool(pool, self.conn) @@ -103,16 +112,38 @@ class StorageVolumeModel(object): else: raise + def _get_ref_cnt(self, pool, name, path): + vol_id = '%s:%s' % (pool, name) + with self.objstore as session: + try: + ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] + except NotFoundError: + # Fix storage volume created outside kimchi scope + ref_cnt = 0 + args = {'conn': self.conn, 'objstore': self.objstore} + # try to find this volume in exsisted vm + vms = VMsModel(**args).get_list() + for vm in vms: + storages = VMStoragesModel(**args).get_list(vm) + for disk in storages: + if path == VMStorageModel(**args).lookup(vm, disk)['path']: + ref_cnt = ref_cnt + 1 + session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) + + return ref_cnt + def lookup(self, pool, name): vol = self._get_storagevolume(pool, name) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) fmt = xmlutils.xpath_get_text(xml, "/volume/target/format/@type")[0] + ref_cnt = self._get_ref_cnt(pool, name, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, + ref_cnt=ref_cnt, format=fmt) if fmt == 'iso': if os.path.islink(path): -- 1.8.1.2

volume ref_cnt is useful for storage pool integrity verification. But we should be care for ref_cnt. some questions about this ref_cnt. only when all volume ref_cnt of a storage pool(all types of pool) is zero 1. we can delete this storage pool, for all types of pool, right? 2. we can deactivate this storage pool, for all types of pool, right? when we decrease ref_cnt? 1. VM is deleted? the ref_cnt of attached volume will decrease? 2. VM is deleted outside kimchi scope, we should decrease the ref_cnt? you increase storage volume ref_cn when a vm is created outside kimchi scope. 3. A volume detached from a vm inside or outside kimchi scope. and now, the kimchi can avoid the ref_cnt race right? when a request get ref_cnt, and another request is decrease or increase ref_cnt at the same time? from your code: + vms = VMsModel(**args).get_list() which is better ? VMsModel(**args).get_list() or self.vms_get_list() On 02/25/2014 04:35 PM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Set ref_cnt to 0 when new volume created, if the ref_cnt cannot be found, which means it is created outside of kimchi scope, report it when lookup storage volume, search info about storage volumes used by vms and count the ref_cnt.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index b50bf31..7620d38 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -423,6 +423,7 @@ class MockModel(object): name = params['name'] volume = MockStorageVolume(pool, name, params) volume.info['type'] = params['type'] + volume.info['ref_cnt'] = params.get('ref_cnt', 0) volume.info['format'] = params['format'] volume.info['path'] = os.path.join( pool.info['path'], name) @@ -884,6 +885,7 @@ class MockVMTemplate(VMTemplate): disk_paths = [] for vol_info in volumes: vol_info['capacity'] = vol_info['capacity'] << 10 + vol_info['ref_cnt'] = 1 self.model.storagevolumes_create(pool.name, vol_info) disk_paths.append({'pool': pool.name, 'volume': vol_info['name']}) return disk_paths @@ -1002,6 +1004,7 @@ class MockStorageVolume(object): 'capacity': capacity << 20, 'allocation': params.get('allocation', '512'), 'path': params.get('path'), + 'ref_cnt': params.get('ref_cnt'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 20c65b9..9906f33 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -29,6 +29,8 @@ from kimchi.exception import InvalidOperation, IsoFormatError from kimchi.exception import MissingParameter, NotFoundError, OperationFailed from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel +from kimchi.model.vms import VMsModel +from kimchi.model.vmstorages import VMStoragesModel, VMStorageModel
VOLUME_TYPE_MAP = {0: 'file', @@ -40,6 +42,7 @@ VOLUME_TYPE_MAP = {0: 'file', class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def create(self, pool, params): vol_xml = """ @@ -58,6 +61,7 @@ class StorageVolumesModel(object): params.setdefault('format', 'qcow2')
name = params['name'] + vol_id = '%s:%s' % (pool, name) try: pool = StoragePoolModel.get_storagepool(pool, self.conn) xml = vol_xml % params @@ -71,6 +75,10 @@ class StorageVolumesModel(object): raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) + + with self.objstore as session: + session.store('storagevolume', vol_id, {'ref_cnt': 0}) + return name
def get_list(self, pool_name): @@ -89,6 +97,7 @@ class StorageVolumesModel(object): class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def _get_storagevolume(self, pool, name): pool = StoragePoolModel.get_storagepool(pool, self.conn) @@ -103,16 +112,38 @@ class StorageVolumeModel(object): else: raise
+ def _get_ref_cnt(self, pool, name, path): + vol_id = '%s:%s' % (pool, name) + with self.objstore as session: + try: + ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] + except NotFoundError: + # Fix storage volume created outside kimchi scope + ref_cnt = 0 + args = {'conn': self.conn, 'objstore': self.objstore} + # try to find this volume in exsisted vm + vms = VMsModel(**args).get_list() + for vm in vms: + storages = VMStoragesModel(**args).get_list(vm) + for disk in storages: + if path == VMStorageModel(**args).lookup(vm, disk)['path']: + ref_cnt = ref_cnt + 1 + session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) + + return ref_cnt + def lookup(self, pool, name): vol = self._get_storagevolume(pool, name) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) fmt = xmlutils.xpath_get_text(xml, "/volume/target/format/@type")[0] + ref_cnt = self._get_ref_cnt(pool, name, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, + ref_cnt=ref_cnt, format=fmt) if fmt == 'iso': if os.path.islink(path):
-- Thanks and best regards! Sheldon Feng(冯少合)<shaohef@linux.vnet.ibm.com> IBM Linux Technology Center

On 2014年02月25日 18:29, Sheldon wrote:
volume ref_cnt is useful for storage pool integrity verification.
But we should be care for ref_cnt.
some questions about this ref_cnt. only when all volume ref_cnt of a storage pool(all types of pool) is zero 1. we can delete this storage pool, for all types of pool, right? 2. we can deactivate this storage pool, for all types of pool, right? Right for me. I think I can think examples such as nfs pool, the nfs mount take effect on activate and umount on deactivate. So if some vm volume is in such pool, deactivate and delete pool will make the volume inaccessible.
when we decrease ref_cnt? 1. VM is deleted? the ref_cnt of attached volume will decrease?
2. VM is deleted outside kimchi scope, we should decrease the ref_cnt? Since we count the ref_cnt when kimchi started, I assume we can regard
ACK. that when kimchi starts to take control, there is no other "brain" controls vms. What do you think? So in this way we do not care about delete outside kimchi scope, as this will not happen after kimchi started.
you increase storage volume ref_cn when a vm is created outside kimchi scope. I haven't increased, I just count the ref_cnt when encounter the volume outside kimchi scope when first lookup. 3. A volume detached from a vm inside or outside kimchi scope. Same as the above
and now, the kimchi can avoid the ref_cnt race right? when a request get ref_cnt, and another request is decrease or increase ref_cnt at the same time? I think objstore make sure there will be no race.
from your code:
+ vms = VMsModel(**args).get_list()
which is better ? VMsModel(**args).get_list() or self.vms_get_list() This is a good point. I think "self.vms_get_list" is a better way to refer it, I will have a try.
On 02/25/2014 04:35 PM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Set ref_cnt to 0 when new volume created, if the ref_cnt cannot be found, which means it is created outside of kimchi scope, report it when lookup storage volume, search info about storage volumes used by vms and count the ref_cnt.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index b50bf31..7620d38 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -423,6 +423,7 @@ class MockModel(object): name = params['name'] volume = MockStorageVolume(pool, name, params) volume.info['type'] = params['type'] + volume.info['ref_cnt'] = params.get('ref_cnt', 0) volume.info['format'] = params['format'] volume.info['path'] = os.path.join( pool.info['path'], name) @@ -884,6 +885,7 @@ class MockVMTemplate(VMTemplate): disk_paths = [] for vol_info in volumes: vol_info['capacity'] = vol_info['capacity'] << 10 + vol_info['ref_cnt'] = 1 self.model.storagevolumes_create(pool.name, vol_info) disk_paths.append({'pool': pool.name, 'volume': vol_info['name']}) return disk_paths @@ -1002,6 +1004,7 @@ class MockStorageVolume(object): 'capacity': capacity << 20, 'allocation': params.get('allocation', '512'), 'path': params.get('path'), + 'ref_cnt': params.get('ref_cnt'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 20c65b9..9906f33 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -29,6 +29,8 @@ from kimchi.exception import InvalidOperation, IsoFormatError from kimchi.exception import MissingParameter, NotFoundError, OperationFailed from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel +from kimchi.model.vms import VMsModel +from kimchi.model.vmstorages import VMStoragesModel, VMStorageModel
VOLUME_TYPE_MAP = {0: 'file', @@ -40,6 +42,7 @@ VOLUME_TYPE_MAP = {0: 'file', class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def create(self, pool, params): vol_xml = """ @@ -58,6 +61,7 @@ class StorageVolumesModel(object): params.setdefault('format', 'qcow2')
name = params['name'] + vol_id = '%s:%s' % (pool, name) try: pool = StoragePoolModel.get_storagepool(pool, self.conn) xml = vol_xml % params @@ -71,6 +75,10 @@ class StorageVolumesModel(object): raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) + + with self.objstore as session: + session.store('storagevolume', vol_id, {'ref_cnt': 0}) + return name
def get_list(self, pool_name): @@ -89,6 +97,7 @@ class StorageVolumesModel(object): class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def _get_storagevolume(self, pool, name): pool = StoragePoolModel.get_storagepool(pool, self.conn) @@ -103,16 +112,38 @@ class StorageVolumeModel(object): else: raise
+ def _get_ref_cnt(self, pool, name, path): + vol_id = '%s:%s' % (pool, name) + with self.objstore as session: + try: + ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] + except NotFoundError: + # Fix storage volume created outside kimchi scope + ref_cnt = 0 + args = {'conn': self.conn, 'objstore': self.objstore} + # try to find this volume in exsisted vm + vms = VMsModel(**args).get_list() + for vm in vms: + storages = VMStoragesModel(**args).get_list(vm) + for disk in storages: + if path == VMStorageModel(**args).lookup(vm, disk)['path']: + ref_cnt = ref_cnt + 1 + session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) + + return ref_cnt + def lookup(self, pool, name): vol = self._get_storagevolume(pool, name) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) fmt = xmlutils.xpath_get_text(xml, "/volume/target/format/@type")[0] + ref_cnt = self._get_ref_cnt(pool, name, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, + ref_cnt=ref_cnt, format=fmt) if fmt == 'iso': if os.path.islink(path):

On 02/27/2014 12:21 AM, Royce Lv wrote:
On 2014年02月25日 18:29, Sheldon wrote:
volume ref_cnt is useful for storage pool integrity verification.
But we should be care for ref_cnt.
some questions about this ref_cnt. only when all volume ref_cnt of a storage pool(all types of pool) is zero 1. we can delete this storage pool, for all types of pool, right? 2. we can deactivate this storage pool, for all types of pool, right? Right for me. I think I can think examples such as nfs pool, the nfs mount take effect on activate and umount on deactivate. So if some vm volume is in such pool, deactivate and delete pool will make the volume inaccessible.
when we decrease ref_cnt? 1. VM is deleted? the ref_cnt of attached volume will decrease?
2. VM is deleted outside kimchi scope, we should decrease the ref_cnt? Since we count the ref_cnt when kimchi started, I assume we can regard
ACK. that when kimchi starts to take control, there is no other "brain" controls vms. What do you think? So in this way we do not care about delete outside kimchi scope, as this will not happen after kimchi started.
you increase storage volume ref_cn when a vm is created outside kimchi scope. I haven't increased, I just count the ref_cnt when encounter the volume outside kimchi scope when first lookup. 3. A volume detached from a vm inside or outside kimchi scope. Same as the above
and now, the kimchi can avoid the ref_cnt race right? when a request get ref_cnt, and another request is decrease or increase ref_cnt at the same time? I think objstore make sure there will be no race.
Unless the objstore has a lock, the race condition will still exist using it
from your code:
+ vms = VMsModel(**args).get_list()
which is better ? VMsModel(**args).get_list() or self.vms_get_list()
This is a good point. I think "self.vms_get_list" is a better way to refer it, I will have a try.
On 02/25/2014 04:35 PM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Set ref_cnt to 0 when new volume created, if the ref_cnt cannot be found, which means it is created outside of kimchi scope, report it when lookup storage volume, search info about storage volumes used by vms and count the ref_cnt.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index b50bf31..7620d38 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -423,6 +423,7 @@ class MockModel(object): name = params['name'] volume = MockStorageVolume(pool, name, params) volume.info['type'] = params['type'] + volume.info['ref_cnt'] = params.get('ref_cnt', 0) volume.info['format'] = params['format'] volume.info['path'] = os.path.join( pool.info['path'], name) @@ -884,6 +885,7 @@ class MockVMTemplate(VMTemplate): disk_paths = [] for vol_info in volumes: vol_info['capacity'] = vol_info['capacity'] << 10 + vol_info['ref_cnt'] = 1 self.model.storagevolumes_create(pool.name, vol_info) disk_paths.append({'pool': pool.name, 'volume': vol_info['name']}) return disk_paths @@ -1002,6 +1004,7 @@ class MockStorageVolume(object): 'capacity': capacity << 20, 'allocation': params.get('allocation', '512'), 'path': params.get('path'), + 'ref_cnt': params.get('ref_cnt'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 20c65b9..9906f33 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -29,6 +29,8 @@ from kimchi.exception import InvalidOperation, IsoFormatError from kimchi.exception import MissingParameter, NotFoundError, OperationFailed from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel +from kimchi.model.vms import VMsModel +from kimchi.model.vmstorages import VMStoragesModel, VMStorageModel
VOLUME_TYPE_MAP = {0: 'file', @@ -40,6 +42,7 @@ VOLUME_TYPE_MAP = {0: 'file', class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def create(self, pool, params): vol_xml = """ @@ -58,6 +61,7 @@ class StorageVolumesModel(object): params.setdefault('format', 'qcow2')
name = params['name'] + vol_id = '%s:%s' % (pool, name) try: pool = StoragePoolModel.get_storagepool(pool, self.conn) xml = vol_xml % params @@ -71,6 +75,10 @@ class StorageVolumesModel(object): raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) + + with self.objstore as session: + session.store('storagevolume', vol_id, {'ref_cnt': 0}) + return name
def get_list(self, pool_name): @@ -89,6 +97,7 @@ class StorageVolumesModel(object): class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def _get_storagevolume(self, pool, name): pool = StoragePoolModel.get_storagepool(pool, self.conn) @@ -103,16 +112,38 @@ class StorageVolumeModel(object): else: raise
+ def _get_ref_cnt(self, pool, name, path): + vol_id = '%s:%s' % (pool, name) + with self.objstore as session: + try: + ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] + except NotFoundError: + # Fix storage volume created outside kimchi scope + ref_cnt = 0 + args = {'conn': self.conn, 'objstore': self.objstore} + # try to find this volume in exsisted vm + vms = VMsModel(**args).get_list() + for vm in vms: + storages = VMStoragesModel(**args).get_list(vm) + for disk in storages: + if path == VMStorageModel(**args).lookup(vm, disk)['path']: + ref_cnt = ref_cnt + 1 + session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) + + return ref_cnt + def lookup(self, pool, name): vol = self._get_storagevolume(pool, name) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) fmt = xmlutils.xpath_get_text(xml, "/volume/target/format/@type")[0] + ref_cnt = self._get_ref_cnt(pool, name, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, + ref_cnt=ref_cnt, format=fmt) if fmt == 'iso': if os.path.islink(path):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

On 2014年02月28日 09:53, Aline Manera wrote:
On 02/27/2014 12:21 AM, Royce Lv wrote:
On 2014年02月25日 18:29, Sheldon wrote:
volume ref_cnt is useful for storage pool integrity verification.
But we should be care for ref_cnt.
some questions about this ref_cnt. only when all volume ref_cnt of a storage pool(all types of pool) is zero 1. we can delete this storage pool, for all types of pool, right? 2. we can deactivate this storage pool, for all types of pool, right? Right for me. I think I can think examples such as nfs pool, the nfs mount take effect on activate and umount on deactivate. So if some vm volume is in such pool, deactivate and delete pool will make the volume inaccessible.
when we decrease ref_cnt? 1. VM is deleted? the ref_cnt of attached volume will decrease?
ACK.
2. VM is deleted outside kimchi scope, we should decrease the ref_cnt? Since we count the ref_cnt when kimchi started, I assume we can regard that when kimchi starts to take control, there is no other "brain" controls vms. What do you think? So in this way we do not care about delete outside kimchi scope, as this will not happen after kimchi started. you increase storage volume ref_cn when a vm is created outside kimchi scope. I haven't increased, I just count the ref_cnt when encounter the volume outside kimchi scope when first lookup. 3. A volume detached from a vm inside or outside kimchi scope. Same as the above
and now, the kimchi can avoid the ref_cnt race right? when a request get ref_cnt, and another request is decrease or increase ref_cnt at the same time? I think objstore make sure there will be no race.
Unless the objstore has a lock, the race condition will still exist using it see objectstore.py:
class ObjectStore(object): def __init__(self, location=None): self._lock = threading.Semaphore()--->here is semaphore self._connections = OrderedDict() self.location = location or config.get_object_store() with self._lock: self._init_db() def __enter__(self): self._lock.acquire() return ObjectStoreSession(self._get_conn()) def __exit__(self, type, value, tb): self._lock.release() Every time we write: with self.objstore as session: we get the lock when entering and release the lock when exit. So according to me, there will be no race to use objectstore.
from your code:
+ vms = VMsModel(**args).get_list()
which is better ? VMsModel(**args).get_list() or self.vms_get_list()
This is a good point. I think "self.vms_get_list" is a better way to refer it, I will have a try.
On 02/25/2014 04:35 PM, lvroyce@linux.vnet.ibm.com wrote:
From: Royce Lv <lvroyce@linux.vnet.ibm.com>
Set ref_cnt to 0 when new volume created, if the ref_cnt cannot be found, which means it is created outside of kimchi scope, report it when lookup storage volume, search info about storage volumes used by vms and count the ref_cnt.
Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- src/kimchi/mockmodel.py | 3 +++ src/kimchi/model/storagevolumes.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+)
diff --git a/src/kimchi/mockmodel.py b/src/kimchi/mockmodel.py index b50bf31..7620d38 100644 --- a/src/kimchi/mockmodel.py +++ b/src/kimchi/mockmodel.py @@ -423,6 +423,7 @@ class MockModel(object): name = params['name'] volume = MockStorageVolume(pool, name, params) volume.info['type'] = params['type'] + volume.info['ref_cnt'] = params.get('ref_cnt', 0) volume.info['format'] = params['format'] volume.info['path'] = os.path.join( pool.info['path'], name) @@ -884,6 +885,7 @@ class MockVMTemplate(VMTemplate): disk_paths = [] for vol_info in volumes: vol_info['capacity'] = vol_info['capacity'] << 10 + vol_info['ref_cnt'] = 1 self.model.storagevolumes_create(pool.name, vol_info) disk_paths.append({'pool': pool.name, 'volume': vol_info['name']}) return disk_paths @@ -1002,6 +1004,7 @@ class MockStorageVolume(object): 'capacity': capacity << 20, 'allocation': params.get('allocation', '512'), 'path': params.get('path'), + 'ref_cnt': params.get('ref_cnt'), 'format': fmt} if fmt == 'iso': self.info['allocation'] = self.info['capacity'] diff --git a/src/kimchi/model/storagevolumes.py b/src/kimchi/model/storagevolumes.py index 20c65b9..9906f33 100644 --- a/src/kimchi/model/storagevolumes.py +++ b/src/kimchi/model/storagevolumes.py @@ -29,6 +29,8 @@ from kimchi.exception import InvalidOperation, IsoFormatError from kimchi.exception import MissingParameter, NotFoundError, OperationFailed from kimchi.isoinfo import IsoImage from kimchi.model.storagepools import StoragePoolModel +from kimchi.model.vms import VMsModel +from kimchi.model.vmstorages import VMStoragesModel, VMStorageModel
VOLUME_TYPE_MAP = {0: 'file', @@ -40,6 +42,7 @@ VOLUME_TYPE_MAP = {0: 'file', class StorageVolumesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def create(self, pool, params): vol_xml = """ @@ -58,6 +61,7 @@ class StorageVolumesModel(object): params.setdefault('format', 'qcow2')
name = params['name'] + vol_id = '%s:%s' % (pool, name) try: pool = StoragePoolModel.get_storagepool(pool, self.conn) xml = vol_xml % params @@ -71,6 +75,10 @@ class StorageVolumesModel(object): raise OperationFailed("KCHVOL0007E", {'name': name, 'pool': pool, 'err': e.get_error_message()}) + + with self.objstore as session: + session.store('storagevolume', vol_id, {'ref_cnt': 0}) + return name
def get_list(self, pool_name): @@ -89,6 +97,7 @@ class StorageVolumesModel(object): class StorageVolumeModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] + self.objstore = kargs['objstore']
def _get_storagevolume(self, pool, name): pool = StoragePoolModel.get_storagepool(pool, self.conn) @@ -103,16 +112,38 @@ class StorageVolumeModel(object): else: raise
+ def _get_ref_cnt(self, pool, name, path): + vol_id = '%s:%s' % (pool, name) + with self.objstore as session: + try: + ref_cnt = session.get('storagevolume', vol_id)['ref_cnt'] + except NotFoundError: + # Fix storage volume created outside kimchi scope + ref_cnt = 0 + args = {'conn': self.conn, 'objstore': self.objstore} + # try to find this volume in exsisted vm + vms = VMsModel(**args).get_list() + for vm in vms: + storages = VMStoragesModel(**args).get_list(vm) + for disk in storages: + if path == VMStorageModel(**args).lookup(vm, disk)['path']: + ref_cnt = ref_cnt + 1 + session.store('storagevolume', vol_id, {'ref_cnt': ref_cnt}) + + return ref_cnt + def lookup(self, pool, name): vol = self._get_storagevolume(pool, name) path = vol.path() info = vol.info() xml = vol.XMLDesc(0) fmt = xmlutils.xpath_get_text(xml, "/volume/target/format/@type")[0] + ref_cnt = self._get_ref_cnt(pool, name, path) res = dict(type=VOLUME_TYPE_MAP[info[0]], capacity=info[1], allocation=info[2], path=path, + ref_cnt=ref_cnt, format=fmt) if fmt == 'iso': if os.path.islink(path):
_______________________________________________ Kimchi-devel mailing list Kimchi-devel@ovirt.org http://lists.ovirt.org/mailman/listinfo/kimchi-devel

From: Royce Lv <lvroyce@linux.vnet.ibm.com> Add mockmodel test and model test to validate: 1. ref_cnt of storage volume forked internal vm creation is 1. 2. ref_cnt of storage volume created from REST API is 0. Signed-off-by: Royce Lv <lvroyce@linux.vnet.ibm.com> --- tests/test_model.py | 4 ++++ tests/test_rest.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/tests/test_model.py b/tests/test_model.py index 74e2424..3e39aa5 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -356,6 +356,7 @@ class ModelTests(unittest.TestCase): inst.storagevolume_wipe(pool, vol) volinfo = inst.storagevolume_lookup(pool, vol) self.assertEquals(0, volinfo['allocation']) + self.assertEquals(0, volinfo['ref_cnt']) volinfo = inst.storagevolume_lookup(pool, vol) # Define the size = capacity + 16M @@ -405,6 +406,9 @@ class ModelTests(unittest.TestCase): vm_info = inst.vm_lookup(params['name']) disk_path = '/tmp/kimchi-images/%s-0.img' % vm_info['uuid'] self.assertTrue(os.access(disk_path, os.F_OK)) + vol = '%s-0.img' % vm_info['uuid'] + volinfo = inst.storagevolume_lookup(pool, vol) + self.assertEquals(1, volinfo['ref_cnt']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_template_create(self): diff --git a/tests/test_rest.py b/tests/test_rest.py index ca96dc0..69bb569 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -246,6 +246,7 @@ class RestTests(unittest.TestCase): resp = self.request(vol_uri) vol = json.loads(resp.read()) self.assertEquals(1 << 30, vol['capacity']) + self.assertEquals(1, vol['ref_cnt']) # Start the VM resp = self.request('/vms/test-vm/start', '{}', 'POST') @@ -846,6 +847,7 @@ class RestTests(unittest.TestCase): storagevolume = json.loads(resp.read()) self.assertEquals('volume-1', storagevolume['name']) self.assertEquals('raw', storagevolume['format']) + self.assertEquals(0, storagevolume['ref_cnt']) self.assertEquals('/var/lib/libvirt/images/volume-1', storagevolume['path']) -- 1.8.1.2
participants (4)
-
Aline Manera
-
lvroyce@linux.vnet.ibm.com
-
Royce Lv
-
Sheldon