Re: Ovirt OVN help needed
by Strahil
Hi Dominik,
Thanks for your reply.
On ovirt1 I got the following:
[root@ovirt1 openvswitch]# less ovn-controller.log-20191216.gz
2019-12-15T01:49:02.988Z|00032|vlog|INFO|opened log file /var/log/openvswitch/ovn-controller.log
2019-12-16T01:18:02.114Z|00033|vlog|INFO|closing log file
ovn-controller.log-20191216.gz (END)
Same is on the other node:
[root@ovirt2 openvswitch]# less ovn-controller.log-20191216.gz
2019-12-15T01:26:03.477Z|00028|vlog|INFO|opened log file /var/log/openvswitch/ovn-controller.log
2019-12-16T01:30:01.718Z|00029|vlog|INFO|closing log file
ovn-controller.log-20191216.gz (END)
The strange thing is that the geneve tunnels are there:
[root@ovirt1 ~]# ovs-vsctl show
c0e938f1-b5b5-4d5a-9cda-29dae2986f29
Bridge br-int
fail_mode: secure
Port "ovn-25cc77-0"
Interface "ovn-25cc77-0"
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.1.64"} Port "ovn-566849-0"
Interface "ovn-566849-0"
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.1.41"} Port br-int
Interface br-int
type: internal
Port "vnet2"
Interface "vnet2"
ovs_version: "2.11.0"
[root@ovirt1 ~]# ovs-vsctl list ports
ovs-vsctl: unknown table "ports"
[root@ovirt1 ~]# ovs-vsctl list port
_uuid : fbf40569-925e-4430-a7c5-c78d58979bbc
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {}
fake_bridge : false
interfaces : [3207c0cb-3000-40f2-a850-83548f76f090]lacp : []
mac : []
name : "vnet2"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : 8947f82d-a089-429b-8843-71371314cb52
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {}
fake_bridge : false
interfaces : [ec6a6688-e5d6-4346-ac47-ece1b8379440]lacp : []
mac : []
name : br-int
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : 72d612be-853e-43e9-8f5c-ce66cef0bebe
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {ovn-chassis-id="5668499c-7dd0-41ee-bc5d-2e6ee9cd61c3(a)192.168.1.41"}
fake_bridge : false
interfaces : [a31574fe-515b-420b-859d-7f2ac729638f]lacp : []
mac : []
name : "ovn-566849-0"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : 2043a15f-ec39-4cc3-b875-7be00423dd7a
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {ovn-chassis-id="25cc77b3-046f-45c5-af0c-ffb2f77d73f1(a)192.168.1.64"}
fake_bridge : false
interfaces : [f9a9e3ff-070e-4044-b601-7f7394dc295f]lacp : []
mac : []
name : "ovn-25cc77-0"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
[root@ovirt1 ~]#
[root@ovirt2 ~]# ovs-vsctl show
3dbab138-6b90-44c5-af05-b8a944c9bf20
Bridge br-int
fail_mode: secure
Port "ovn-baa019-0"
Interface "ovn-baa019-0"
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.1.90"} Port br-int
Interface br-int
type: internal
Port "vnet5"
Interface "vnet5"
Port "ovn-566849-0"
Interface "ovn-566849-0"
type: geneve
options: {csum="true", key=flow, remote_ip="192.168.1.41"}
ovs_version: "2.11.0"
[root@ovirt2 ~]# ovs-vsctl list port
_uuid : 151e1188-f07a-4750-a620-392a08e7e7fe
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : [] bond_updelay : 0
cvlans : []
external_ids : {ovn-chassis-id="baa0199e-d1a4-484c-af13-a41bcad19dbc(a)192.168.1.90"}
fake_bridge : false
interfaces : [4d4bc12a-609a-4917-b839-d4f652acdc33]lacp : []
mac : []
name : "ovn-baa019-0"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : 3a862f96-b3ec-46a9-bcf6-f385e5def410
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {}
fake_bridge : false
interfaces : [777f2819-ca27-4890-8d2f-11349ca0d398]lacp : []
mac : []
name : br-int
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : a65109fa-f8b4-4670-8ae8-a2bd0bf6aba3
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {ovn-chassis-id="5668499c-7dd0-41ee-bc5d-2e6ee9cd61c3(a)192.168.1.41"}
fake_bridge : false
interfaces : [ed442077-f897-4e0b-97a1-a8051e9c3d56]lacp : []
mac : []
name : "ovn-566849-0"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
_uuid : a1622e6f-fcd0-4a8a-b259-ca4d0ccf1cd2
bond_active_slave : []
bond_downdelay : 0
bond_fake_iface : false
bond_mode : []
bond_updelay : 0
cvlans : []
external_ids : {}
fake_bridge : false
interfaces : [ca368654-54f3-49d0-a71c-8894426df6bf]lacp : []
mac : []
name : "vnet5"
other_config : {}
protected : false
qos : []
rstp_statistics : {}
rstp_status : {}
statistics : {}
status : {}
tag : []
trunks : []
vlan_mode : []
[root@ovirt2 ~]#
Best Regards,
Strahil Nikolov
On Dec 16, 2019 23:28, Dominik Holler <dholler(a)redhat.com> wrote:
>
>
>
> On Sat, Dec 14, 2019 at 11:36 AM Strahil Nikolov <hunter86_bg(a)yahoo.com> wrote:
>>
>> Hi Dominik,
>>
>> yes I was looking for those settings.
>>
>> I have added again the external provider , but I guess the mess is even bigger as I made some stupid decisions (like removing 2 port groups :) without knowing what I'm doing) .
>> Sadly I can't remove all packages on the engine and hosts and reinstall them from scratch.
>>
>> Pip fails to install the openstacksdk (centOS7 is not great for such tasks) on the engine and my lack of knowledge in OVN makes it even more difficult.
>>
>> So the symptoms are that 2 machines can communicate with each other only if they are on the same host ,while on separate - no communications is happening.
>>
>
> This indicates that the tunnels between the hosts are not created.
> Can you please check the /var/log/openvswitch/ovn-controller.log on both hosts for errors and warnings, or share parts of the files here?
> If this does not point us to a problem, ovn has to be reconfigured. If possible, most easy way to do this would be to ensure that
> ovirt-provider-ovn is the default network provider of the cluster of the hosts, put one host after another in maintance mode and reinstall.
>
>
>>
>> How I created the network via UI:
>>
>> 1. Networks - new
>> 2. Fill in the name
>> 3. Create on external provider
>> 4. Network Port security -> disabled (even undefined does not work)
>> 5.Connect to physical network -> ovirtmgmt
>>
>>
>> I would be happy to learn more about OVN and thus I would like to make it work.
>>
>> Here is some info from the engine:
>>
>> [root@engine ~]# ovn-nbctl show
>> switch 1288ed26-471c-4bc2-8a7d-4531f306f44c (ovirt-pxelan-2a88b2e0-d04b-4196-ad50-074501e4ed08)
>> port c1eba112-5eed-4c04-b25c-d3dcfb934546
>> addresses: ["56:6f:5a:65:00:06"]
>> port 8b52ab60-f474-4d51-b258-cb2e0a53c34a
>> type: localnet
>> addresses: ["unknown"]
>> port b2753040-881b-487a-92a1-9721da749be4
>> addresses: ["56:6f:5a:65:00:09"]
>> [root@engine ~]# ovn-sbctl show
>> Chassis "5668499c-7dd0-41ee-bc5d-2e6ee9cd61c3"
>> hostname: "ovirt3.localdomain"
>> Encap geneve
>> ip: "192.168.1.41"
>> options: {csum="true"}
>> Chassis "baa0199e-d1a4-484c-af13-a41bcad19dbc"
>> hostname: "ovirt1.localdomain"
>> Encap geneve
>> ip: "192.168.1.90"
>> options: {csum="true"}
>> Chassis "25cc77b3-046f-45c5-af0c-ffb2f77d73f1"
>> hostname: "ovirt2.localdomain"
>> Encap geneve
>> ip: "192.168.1.64"
>> options: {csum="true"}
>> Port_Binding "b2753040-881b-487a-92a1-9721da749be4"
>> Port_Binding &quo
4 years, 10 months
is there any feature of load balancing for engine??
by yam yam
Hello,
given massive oVirt environment, I think single engine looks too small to deal with all workloads.
so, I want to make active-active engine cluster for distributing workloads.
is it possible for an oVirt environment to be made up of multiple engines & DBs for load balancing?
4 years, 10 months
Re: is there any feature of load balancing for engine??
by Strahil
It is not a good idea.
If you think that the engine can't cope with the load, add a separate engine for the specific cluster(s).
What is the count of Datacenters, Clusters, Hosts, VMs & Storage domains ?
Best Regards,
Strahil NikolovOn Jan 8, 2020 03:51, yam yam <hyunooudy(a)gmail.com> wrote:
>
> Hello,
>
> given massive oVirt environment, I think single engine looks too small to deal with all workloads.
> so, I want to make active-active engine cluster for distributing workloads.
> is it possible for an oVirt environment to be made up of multiple engines & DBs for load balancing?
> _______________________________________________
> Users mailing list -- users(a)ovirt.org
> To unsubscribe send an email to users-leave(a)ovirt.org
> Privacy Statement: https://www.ovirt.org/site/privacy-policy/
> oVirt Code of Conduct: https://www.ovirt.org/community/about/community-guidelines/
> List Archives: https://lists.ovirt.org/archives/list/users@ovirt.org/message/XCO5DJS63YK...
4 years, 10 months
Re: Setting up cockpit?
by Strahil
On Jan 9, 2020 18:58, m.skrzetuski(a)gmail.com wrote:
>
> Hello everyone,
>
> I'd like to get cockpit to work because currently when I click "Host Console" on a host I just get "connection refused". I checked and after the engine installation the cockpit service was not running. When I start it, it runs and answers on port 9090, however the SSL certificate is broken.
>
> - How do I auto enable cockpit on installation?
systemctl enable --now cockpit.socket -H < destination server>
> - How do I supply my own SSL certification to cockpit?
Put them in /etc/cockpit/ws-certs.d/
Source:https://www.redhat.com/en/blog/linux-system-administration-managem...
>
> Kind regards
> Skrzetuski
Best Regards,
Strahil Nikolov
4 years, 10 months
Re: After NAS crash, one VM will not start up, Cannot prepare illegal volume
by David Johnson
Thank you again.
After updating legality to LEGAL,
[root@mx-ovirt-host2 ~]# vdsm-client Volume getInfo
storagepoolID=25cd9bfc-bab6-11e8-90f3-78acc0b47b4d
storagedomainID=6e627364-5e0c-4250-ac95-7cd914d0175f
imageID=4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6
volumeID=f8066c56-6db1-4605-8d7c-0739335d30b8
{
"status": "OK",
"lease": {
"path": "/rhev/data-center/mnt/192.168.2.220:
_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease",
"owners": [],
"version": null,
"offset": 0
},
"domain": "6e627364-5e0c-4250-ac95-7cd914d0175f",
"capacity": "1503238553600",
"voltype": "LEAF",
"description": "",
"parent": "a912e388-d80d-4f56-805b-ea5e2f35d741",
"format": "COW",
"generation": 0,
"image": "4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6",
"uuid": "f8066c56-6db1-4605-8d7c-0739335d30b8",
"disktype": "DATA",
"legality": "LEGAL",
"mtime": "0",
"apparentsize": "36440899584",
"truesize": "16916186624",
"type": "SPARSE",
"children": [],
"pool": "",
"ctime": "1571669201"
}
Attempt to start the VM result are:
Log excerpt:
2020-01-09 06:47:46,575-0600 INFO (vm/c5d0a42f) [storage.StorageDomain]
Creating symlink from
/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6
to
/var/run/vdsm/storage/6e627364-5e0c-4250-ac95-7cd914d0175f/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6
(fileSD:580)
2020-01-09 06:47:46,581-0600 INFO (vm/c5d0a42f) [vdsm.api] FINISH
prepareImage return={'info': {'path':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
'type': 'file'}, 'path':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
'imgVolumesInfo': [{'domainID': '6e627364-5e0c-4250-ac95-7cd914d0175f',
'leaseOffset': 0, 'path':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/a912e388-d80d-4f56-805b-ea5e2f35d741',
'volumeID': u'a912e388-d80d-4f56-805b-ea5e2f35d741', 'leasePath':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/a912e388-d80d-4f56-805b-ea5e2f35d741.lease',
'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6'}, {'domainID':
'6e627364-5e0c-4250-ac95-7cd914d0175f', 'leaseOffset': 0, 'path':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
'volumeID': u'f8066c56-6db1-4605-8d7c-0739335d30b8', 'leasePath':
u'/rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6'}]} from=internal,
task_id=865d2ff4-4e63-44dc-b8f8-9d93cad9892f (api:52)
2020-01-09 06:47:46,582-0600 INFO (vm/c5d0a42f) [vds] prepared volume
path: /rhev/data-center/mnt/192.168.2.220:_mnt_ovirt-freenas/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8
(clientIF:497)
2020-01-09 06:47:46,583-0600 INFO (vm/c5d0a42f) [vdsm.api] START
prepareImage(sdUUID='ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7',
spUUID='25cd9bfc-bab6-11e8-90f3-78acc0b47b4d',
imgUUID='60077050-6f99-41db-b280-446f018b104b',
leafUUID='a67eb40c-e0a1-49cc-9179-bebb263d6e9c', allowIllegal=False)
from=internal, task_id=08830292-0f75-4c5b-a411-695894c66475 (api:46)
2020-01-09 06:47:46,632-0600 INFO (vm/c5d0a42f) [vdsm.api] FINISH
prepareImage error=Cannot prepare illegal volume:
(u'a67eb40c-e0a1-49cc-9179-bebb263d6e9c',) from=internal,
task_id=08830292-0f75-4c5b-a411-695894c66475 (api:50)
2020-01-09 06:47:46,632-0600 ERROR (vm/c5d0a42f) [storage.TaskManager.Task]
(Task='08830292-0f75-4c5b-a411-695894c66475') Unexpected error (task:875)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/vdsm/storage/task.py", line 882,
in _run
return fn(*args, **kargs)
File "<string>", line 2, in prepareImage
File "/usr/lib/python2.7/site-packages/vdsm/common/api.py", line 48, in
method
ret = func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/vdsm/storage/hsm.py", line 3187,
in prepareImage
raise se.prepareIllegalVolumeError(volUUID)
prepareIllegalVolumeError: Cannot prepare illegal volume:
(u'a67eb40c-e0a1-49cc-9179-bebb263d6e9c',)
2020-01-09 06:47:46,633-0600 INFO (vm/c5d0a42f) [storage.TaskManager.Task]
(Task='08830292-0f75-4c5b-a411-695894c66475') aborting: Task is aborted:
"Cannot prepare illegal volume: (u'a67eb40c-e0a1-49cc-9179-bebb263d6e9c',)"
- code 227 (task:1181)
2020-01-09 06:47:46,634-0600 ERROR (vm/c5d0a42f) [storage.Dispatcher]
FINISH prepareImage error=Cannot prepare illegal volume:
(u'a67eb40c-e0a1-49cc-9179-bebb263d6e9c',) (dispatcher:82)
2020-01-09 06:47:46,634-0600 ERROR (vm/c5d0a42f) [virt.vm]
(vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') The vm start process failed
(vm:949)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 878, in
_startUnderlyingVm
self._run()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2798, in
_run
self._devices = self._make_devices()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2639, in
_make_devices
disk_objs = self._perform_host_local_adjustment()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2712, in
_perform_host_local_adjustment
self._preparePathsForDrives(disk_params)
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 1023, in
_preparePathsForDrives
drive['path'] = self.cif.prepareVolumePath(drive, self.id)
File "/usr/lib/python2.7/site-packages/vdsm/clientIF.py", line 417, in
prepareVolumePath
raise vm.VolumeError(drive)
VolumeError: Bad volume specification {'address': {'bus': '0',
'controller': '0', 'type': 'drive', 'target': '0', 'unit': '3'}, 'serial':
'60077050-6f99-41db-b280-446f018b104b', 'index': 1, 'iface': 'scsi',
'apparentsize': '16842752', 'specParams': {}, 'cache': 'writeback',
'imageID': '60077050-6f99-41db-b280-446f018b104b', 'truesize': '2840576',
'type': 'disk', 'domainID': 'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7',
'reqsize': '0', 'format': 'cow', 'poolID':
'25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
'/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/60077050-6f99-41db-b280-446f018b104b/a67eb40c-e0a1-49cc-9179-bebb263d6e9c',
'propagateErrors': 'off', 'name': 'sdb', 'volumeID':
'a67eb40c-e0a1-49cc-9179-bebb263d6e9c', 'diskType': 'file', 'alias':
'ua-60077050-6f99-41db-b280-446f018b104b', 'discard': False}
2020-01-09 06:47:46,634-0600 INFO (vm/c5d0a42f) [virt.vm]
(vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') Changed state to Down: Bad
volume specification {'address': {'bus': '0', 'controller': '0', 'type':
'drive', 'target': '0', 'unit': '3'}, 'serial':
'60077050-6f99-41db-b280-446f018b104b', 'index': 1, 'iface': 'scsi',
'apparentsize': '16842752', 'specParams': {}, 'cache': 'writeback',
'imageID': '60077050-6f99-41db-b280-446f018b104b', 'truesize': '2840576',
'type': 'disk', 'domainID': 'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7',
'reqsize': '0', 'format': 'cow', 'poolID':
'25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
'/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/60077050-6f99-41db-b280-446f018b104b/a67eb40c-e0a1-49cc-9179-bebb263d6e9c',
'propagateErrors': 'off', 'name': 'sdb', 'volumeID':
'a67eb40c-e0a1-49cc-9179-bebb263d6e9c', 'diskType': 'file', 'alias':
'ua-60077050-6f99-41db-b280-446f018b104b', 'discard': False} (code=1)
(vm:1689)
2
Regards,
David Johnson
Director of Development, Maxis Technology
844.696.2947 ext 702 (o) | 479.531.3590 (c)
djohnson(a)maxistechnology.com
[image: Maxis Techncology] <http://www.maxistechnology.com>
www.maxistechnology.com
*stay connected <http://www.linkedin.com/in/pojoguy>*
On Thu, Jan 9, 2020 at 6:39 AM Benny Zlotnik <bzlotnik(a)redhat.com> wrote:
> this `/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.meta`
>
>
> You can see here[1] an example for updating volume metadata using
> vdsm-client
>
> [1] https://bugzilla.redhat.com/show_bug.cgi?id=1409380#c3
>
> On Thu, Jan 9, 2020 at 2:26 PM David Johnson <djohnson(a)maxistechnology.com>
> wrote:
>
>> Thank you for the quick response.
>>
>> Where do I find that?
>>
>> Regards,
>> David Johnson
>> Director of Development, Maxis Technology
>> 844.696.2947 ext 702 (o) | 479.531.3590 (c)
>> djohnson(a)maxistechnology.com
>>
>>
>> [image: Maxis Techncology] <http://www.maxistechnology.com>
>> www.maxistechnology.com
>>
>>
>> *stay connected <http://www.linkedin.com/in/pojoguy>*
>>
>>
>> On Thu, Jan 9, 2020 at 6:24 AM Benny Zlotnik <bzlotnik(a)redhat.com> wrote:
>>
>>> Did you change the volume metadata to LEGAL on the storage as well?
>>>
>>>
>>> On Thu, Jan 9, 2020 at 2:19 PM David Johnson <
>>> djohnson(a)maxistechnology.com> wrote:
>>>
>>>> We had a drive in our NAS fail, but afterwards one of our VM's will not
>>>> start.
>>>>
>>>> The boot drive on the VM is (so near as I can tell) the only drive
>>>> affected.
>>>>
>>>> I confirmed that the disk images (active and snapshot) are both valid
>>>> with qemu.
>>>>
>>>> I followed the instructions at
>>>> https://www.canarytek.com/2017/07/02/Recover_oVirt_Illegal_Snapshots.html to
>>>> identify the snapshot images that were marked "invalid" and marked them as
>>>> valid.
>>>>
>>>> update images set imagestatus=1 where imagestatus=4;
>>>>
>>>>
>>>>
>>>> Log excerpt from attempt to start VM:
>>>> 2020-01-09 02:18:44,908-0600 INFO (vm/c5d0a42f) [vdsm.api] START
>>>> prepareImage(sdUUID='6e627364-5e0c-4250-ac95-7cd914d0175f',
>>>> spUUID='25cd9bfc-bab6-11e8-90f3-78acc0b47b4d',
>>>> imgUUID='4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6',
>>>> leafUUID='f8066c56-6db1-4605-8d7c-0739335d30b8', allowIllegal=False)
>>>> from=internal, task_id=26053225-6569-4b73-abdd-7d6c7e15d1e9 (api:46)
>>>> 2020-01-09 02:18:44,931-0600 INFO (vm/c5d0a42f) [vdsm.api] FINISH
>>>> prepareImage error=Cannot prepare illegal volume:
>>>> (u'f8066c56-6db1-4605-8d7c-0739335d30b8',) from=internal,
>>>> task_id=26053225-6569-4b73-abdd-7d6c7e15d1e9 (api:50)
>>>> 2020-01-09 02:18:44,932-0600 ERROR (vm/c5d0a42f)
>>>> [storage.TaskManager.Task] (Task='26053225-6569-4b73-abdd-7d6c7e15d1e9')
>>>> Unexpected error (task:875)
>>>> Traceback (most recent call last):
>>>> File "/usr/lib/python2.7/site-packages/vdsm/storage/task.py", line
>>>> 882, in _run
>>>> return fn(*args, **kargs)
>>>> File "<string>", line 2, in prepareImage
>>>> File "/usr/lib/python2.7/site-packages/vdsm/common/api.py", line 48,
>>>> in method
>>>> ret = func(*args, **kwargs)
>>>> File "/usr/lib/python2.7/site-packages/vdsm/storage/hsm.py", line
>>>> 3187, in prepareImage
>>>> raise se.prepareIllegalVolumeError(volUUID)
>>>> prepareIllegalVolumeError: Cannot prepare illegal volume:
>>>> (u'f8066c56-6db1-4605-8d7c-0739335d30b8',)
>>>> 2020-01-09 02:18:44,932-0600 INFO (vm/c5d0a42f)
>>>> [storage.TaskManager.Task] (Task='26053225-6569-4b73-abdd-7d6c7e15d1e9')
>>>> aborting: Task is aborted: "Cannot prepare illegal volume:
>>>> (u'f8066c56-6db1-4605-8d7c-0739335d30b8',)" - code 227 (task:1181)
>>>> 2020-01-09 02:18:44,933-0600 ERROR (vm/c5d0a42f) [storage.Dispatcher]
>>>> FINISH prepareImage error=Cannot prepare illegal volume:
>>>> (u'f8066c56-6db1-4605-8d7c-0739335d30b8',) (dispatcher:82)
>>>> 2020-01-09 02:18:44,933-0600 ERROR (vm/c5d0a42f) [virt.vm]
>>>> (vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') The vm start process failed
>>>> (vm:949)
>>>> Traceback (most recent call last):
>>>> File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 878, in
>>>> _startUnderlyingVm
>>>> self._run()
>>>> File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2798,
>>>> in _run
>>>> self._devices = self._make_devices()
>>>> File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2639,
>>>> in _make_devices
>>>> disk_objs = self._perform_host_local_adjustment()
>>>> File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2712,
>>>> in _perform_host_local_adjustment
>>>> self._preparePathsForDrives(disk_params)
>>>> File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 1023,
>>>> in _preparePathsForDrives
>>>> drive['path'] = self.cif.prepareVolumePath(drive, self.id)
>>>> File "/usr/lib/python2.7/site-packages/vdsm/clientIF.py", line 417,
>>>> in prepareVolumePath
>>>> raise vm.VolumeError(drive)
>>>> VolumeError: Bad volume specification {'address': {'bus': '0',
>>>> 'controller': '0', 'type': 'drive', 'target': '0', 'unit': '0'}, 'serial':
>>>> '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'index': 0, 'iface': 'scsi',
>>>> 'apparentsize': '36440899584', 'specParams': {}, 'cache': 'writeback',
>>>> 'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'truesize':
>>>> '16916186624', 'type': 'disk', 'domainID':
>>>> '6e627364-5e0c-4250-ac95-7cd914d0175f', 'reqsize': '0', 'format': 'cow',
>>>> 'poolID': '25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
>>>> '/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> 'propagateErrors': 'off', 'name': 'sda', 'bootOrder': '1', 'volumeID':
>>>> 'f8066c56-6db1-4605-8d7c-0739335d30b8', 'diskType': 'file', 'alias':
>>>> 'ua-4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'discard': False}
>>>> 2020-01-09 02:18:44,934-0600 INFO (vm/c5d0a42f) [virt.vm]
>>>> (vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') Changed state to Down: Bad
>>>> volume specification {'address': {'bus': '0', 'controller': '0', 'type':
>>>> 'drive', 'target': '0', 'unit': '0'}, 'serial':
>>>> '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'index': 0, 'iface': 'scsi',
>>>> 'apparentsize': '36440899584', 'specParams': {}, 'cache': 'writeback',
>>>> 'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'truesize':
>>>> '16916186624', 'type': 'disk', 'domainID':
>>>> '6e627364-5e0c-4250-ac95-7cd914d0175f', 'reqsize': '0', 'format': 'cow',
>>>> 'poolID': '25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
>>>> '/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> 'propagateErrors': 'off', 'name': 'sda', 'bootOrder': '1', 'volumeID':
>>>> 'f8066c56-6db1-4605-8d7c-0739335d30b8', 'diskType': 'file', 'alias':
>>>> 'ua-4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'discard': False} (code=1)
>>>> (vm:1689)
>>>> 2020-01-09 02:18:44,963-0600 INFO (vm/c5d0a42f) [virt.vm]
>>>> (vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') Stopping connection
>>>> (guestagent:442)
>>>> 2020-01-09 02:18:44,978-0600 INFO (jsonrpc/4) [api.virt] START
>>>> destroy(gracefulAttempts=1) from=::ffff:192.168.2.15,57858,
>>>> flow_id=4b57f1a6, vmId=c5d0a42f-3b1e-43ee-a567-7844654011f5 (api:46)
>>>> 2
>>>>
>>>>
>>>> Log excerpt from attempt to migrate the affected drive to another
>>>> storage device:
>>>> 2020-01-09 05:27:14,362-0600 INFO (jsonrpc/2) [storage.VolumeManifest]
>>>> ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8
>>>> info is {'status': 'OK', 'domain': 'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7',
>>>> 'voltype': 'LEAF', 'description': 'None', 'parent':
>>>> 'a912e388-d80d-4f56-805b-ea5e2f35d741', 'format': 'COW', 'generation': 0,
>>>> 'image': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'ctime': '1578559013',
>>>> 'disktype': 'DATA', 'legality': 'LEGAL', 'mtime': '0', 'apparentsize':
>>>> '219008', 'children': [], 'pool': '', 'capacity': '1503238553600', 'uuid':
>>>> u'f8066c56-6db1-4605-8d7c-0739335d30b8', 'truesize': '26624', 'type':
>>>> 'SPARSE', 'lease': {'path': u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> 'owners': [], 'version': None, 'offset': 0}} (volume:282)
>>>> 2020-01-09 05:27:14,362-0600 INFO (jsonrpc/2) [vdsm.api] FINISH
>>>> getVolumeInfo return={'info': {'status': 'OK', 'domain':
>>>> 'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7', 'voltype': 'LEAF', 'description':
>>>> 'None', 'parent': 'a912e388-d80d-4f56-805b-ea5e2f35d741', 'format': 'COW',
>>>> 'generation': 0, 'image': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'ctime':
>>>> '1578559013', 'disktype': 'DATA', 'legality': 'LEGAL', 'mtime': '0',
>>>> 'apparentsize': '219008', 'children': [], 'pool': '', 'capacity':
>>>> '1503238553600', 'uuid': u'f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> 'truesize': '26624', 'type': 'SPARSE', 'lease': {'path':
>>>> u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> 'owners': [], 'version': None, 'offset': 0}}}
>>>> from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
>>>> task_id=28e885f7-df18-4ea5-bb6e-eaee7385b519 (api:52)
>>>> 2020-01-09 05:27:14,363-0600 INFO (jsonrpc/2) [jsonrpc.JsonRpcServer]
>>>> RPC call Volume.getInfo succeeded in 0.04 seconds (__init__:573)
>>>> 2020-01-09 05:27:14,394-0600 INFO (jsonrpc/6) [vdsm.api] START
>>>> sdm_copy_data(job_id=u'9a47a14f-d398-4fe8-81fa-59cba7122aa0',
>>>> source={u'img_id': u'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', u'sd_id':
>>>> u'6e627364-5e0c-4250-ac95-7cd914d0175f', u'endpoint_type': u'div',
>>>> u'vol_id': u'f8066c56-6db1-4605-8d7c-0739335d30b8'},
>>>> destination={u'generation': 0, u'img_id':
>>>> u'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', u'sd_id':
>>>> u'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7', u'endpoint_type': u'div',
>>>> u'vol_id': u'f8066c56-6db1-4605-8d7c-0739335d30b8'})
>>>> from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
>>>> task_id=f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (api:46)
>>>> 2020-01-09 05:27:14,395-0600 INFO (jsonrpc/6) [vdsm.api] FINISH
>>>> sdm_copy_data return=None from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
>>>> task_id=f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (api:52)
>>>> 2020-01-09 05:27:14,395-0600 INFO (jsonrpc/6) [jsonrpc.JsonRpcServer]
>>>> RPC call SDM.copy_data succeeded in 0.00 seconds (__init__:573)
>>>> 2020-01-09 05:27:14,396-0600 INFO (tasks/4)
>>>> [storage.ThreadPool.WorkerThread] START task
>>>> f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (cmd=<bound method Task.commit of
>>>> <vdsm.storage.task.Task instance at 0x7ff3a42613b0>>, args=None)
>>>> (threadPool:208)
>>>> 2020-01-09 05:27:14,396-0600 INFO (tasks/4) [root] Running job
>>>> u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'... (jobs:183)
>>>> 2020-01-09 05:27:14,429-0600 INFO (jsonrpc/1) [api.host] START
>>>> getJobs(job_type=u'storage',
>>>> job_ids=[u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'])
>>>> from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:46)
>>>> 2020-01-09 05:27:14,430-0600 INFO (jsonrpc/1) [api.host] FINISH
>>>> getJobs return={'status': {'message': 'Done', 'code': 0}, 'jobs':
>>>> {u'9a47a14f-d398-4fe8-81fa-59cba7122aa0': {'status': 'running', 'job_type':
>>>> 'storage', 'id': u'9a47a14f-d398-4fe8-81fa-59cba7122aa0', 'description':
>>>> 'copy_data'}}} from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:52)
>>>> 2020-01-09 05:27:14,430-0600 INFO (jsonrpc/1) [jsonrpc.JsonRpcServer]
>>>> RPC call Host.getJobs succeeded in 0.00 seconds (__init__:573)
>>>> 2020-01-09 05:27:14,459-0600 INFO (tasks/4) [storage.SANLock]
>>>> Acquiring Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> offset=0) for host id 2 (clusterlock:386)
>>>> 2020-01-09 05:27:14,496-0600 INFO (tasks/4) [storage.SANLock]
>>>> Successfully acquired Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> offset=0) for host id 2 (clusterlock:424)
>>>> 2020-01-09 05:27:14,498-0600 INFO (tasks/4) [storage.VolumeManifest]
>>>> Volume: preparing volume
>>>> 6e627364-5e0c-4250-ac95-7cd914d0175f/f8066c56-6db1-4605-8d7c-0739335d30b8
>>>> (volume:569)
>>>> 2020-01-09 05:27:14,502-0600 INFO (tasks/4) [storage.SANLock]
>>>> Releasing Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> offset=0) (clusterlock:487)
>>>> 2020-01-09 05:27:14,504-0600 INFO (tasks/4) [storage.SANLock]
>>>> Successfully released Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
>>>> path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
>>>> offset=0) (clusterlock:496)
>>>> 2020-01-09 05:27:14,504-0600 ERROR (tasks/4) [root] Job
>>>> u'9a47a14f-d398-4fe8-81fa-59cba7122aa0' failed (jobs:221)
>>>> Traceback (most recent call last):
>>>> File "/usr/lib/python2.7/site-packages/vdsm/jobs.py", line 157, in run
>>>> self._run()
>>>> File
>>>> "/usr/lib/python2.7/site-packages/vdsm/storage/sdm/api/copy_data.py", line
>>>> 61, in _run
>>>> with self._source.prepare(), self._dest.prepare():
>>>> File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
>>>> return self.gen.next()
>>>> File
>>>> "/usr/lib/python2.7/site-packages/vdsm/storage/sdm/api/copy_data.py", line
>>>> 172, in prepare
>>>> self.volume.prepare(rw=self._writable, justme=False)
>>>> File "/usr/lib/python2.7/site-packages/vdsm/storage/volume.py", line
>>>> 574, in prepare
>>>> raise se.prepareIllegalVolumeError(self.volUUID)
>>>> prepareIllegalVolumeError: Cannot prepare illegal volume:
>>>> ('f8066c56-6db1-4605-8d7c-0739335d30b8',)
>>>> 2020-01-09 05:27:14,505-0600 INFO (tasks/4) [root] Job
>>>> u'9a47a14f-d398-4fe8-81fa-59cba7122aa0' will be deleted in 3600 seconds
>>>> (jobs:249)
>>>> 2020-01-09 05:27:14,506-0600 INFO (tasks/4)
>>>> [storage.ThreadPool.WorkerThread] FINISH task
>>>> f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (threadPool:210)
>>>> 2020-01-09 05:27:16,459-0600 INFO (jsonrpc/4) [api.host] START
>>>> getJobs(job_type=u'storage',
>>>> job_ids=[u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'])
>>>> from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:46)
>>>> 2020-01-09 05:27:16,460-0600 INFO (jsonrpc/4) [api.host] FINISH
>>>> getJobs return={'status': {'message': 'Done', 'code': 0}, 'jobs':
>>>> {u'9a47a14f-d398-4fe8-81fa-59cba7122aa0': {'status': 'failed', 'error':
>>>> {'message': "Cannot prepare illegal volume:
>>>> ('f8066c56-6db1-4605-8d7c-0739335d30b8',)", 'code': 227}, 'job_type':
>>>> 'storage', 'id': u'9a47a14f-d398-4fe8-81fa-59cba7122aa0', 'description':
>>>> 'copy_data'}}} from=::ffff:192.168.2.15,57858,
>>>> flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:52)
>>>> 2
>>>>
>>>> Regards,
>>>> David Johnson
>>>> Director of Development, Maxis Technology
>>>> 844.696.2947 ext 702 (o) | 479.531.3590 (c)
>>>> djohnson(a)maxistechnology.com
>>>>
>>>>
>>>> [image: Maxis Techncology] <http://www.maxistechnology.com>
>>>> www.maxistechnology.com
>>>>
>>>>
>>>> *stay connected <http://www.linkedin.com/in/pojoguy>*
>>>> _______________________________________________
>>>> Users mailing list -- users(a)ovirt.org
>>>> To unsubscribe send an email to users-leave(a)ovirt.org
>>>> Privacy Statement: https://www.ovirt.org/site/privacy-policy/
>>>> oVirt Code of Conduct:
>>>> https://www.ovirt.org/community/about/community-guidelines/
>>>> List Archives:
>>>> https://lists.ovirt.org/archives/list/users@ovirt.org/message/KKLZKSPCESY...
>>>>
>>>
4 years, 10 months
AWX and error using ovirt as an inventory source
by Gianluca Cecchi
Hello,
I have awx 9.0.1 and ansible 2.8.5 in container of a CentOS 7.7 server.
I'm trying to use oVirt 4.3.6.7-1.el7 as a source of an inventory in awx
but I get error when syncing
Find at bottom below the error messages.
I see that in recent past (around June this year) there were some problems,
but they should be solved now, correct?
There was also a problem in syncing when some powered off VMs were present
in oVirt env, but I think this solved too, correct?
Any way to replicate / test from command line of awx container?
I try some things but in command line I always get error regarding
oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0
that I think depends on not using correct command line and/or not setting
needed env.
Thanks in advance,
Gianluca
2.536 INFO Updating inventory 4: MYDC_OVIRT
3.011 INFO Reading Ansible inventory source:
/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/plugins/inventory/ovirt4.py
3.013 INFO Using VIRTUAL_ENV: /var/lib/awx/venv/ansible
3.013 INFO Using PATH:
/var/lib/awx/venv/ansible/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
3.013 INFO Using PYTHONPATH:
/var/lib/awx/venv/ansible/lib/python3.6/site-packages:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/bin/awx-manage", line 11, in <module>
load_entry_point('awx==9.0.1.0', 'console_scripts', 'awx-manage')()
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/__init__.py", line
158, in manage
execute_from_command_line(sys.argv)
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py",
line 381, in execute_from_command_line
utility.execute()
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py",
line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py",
line 323, in run_from_argv
self.execute(*args, **cmd_options)
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py",
line 364, in execute
output = self.handle(*args, **options)
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py",
line 1153, in handle
raise exc
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py",
line 1043, in handle
venv_path=venv_path, verbosity=self.verbosity).load()
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py",
line 214, in load
return self.command_to_json(base_args + ['--list'])
File
"/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py",
line 197, in command_to_json
self.method, proc.returncode, stdout, stderr))
RuntimeError: ansible-inventory failed (rc=1) with stdout:
stderr:
ansible-inventory 2.8.5
config file = /etc/ansible/ansible.cfg
configured module search path = ['/var/lib/awx/.ansible/plugins/modules',
'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible-inventory
python version = 3.6.8 (default, Oct 7 2019, 17:58:22) [GCC 8.2.1
20180905 (Red Hat 8.2.1-3)]
Using /etc/ansible/ansible.cfg as config file
[WARNING]: * Failed to parse /var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/ovirt4.py with script plugin: Inventory
script
(/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/ovirt4.py) had an execution error:
File "/usr/lib/python3.6/site-packages/ansible/inventory/manager.py",
line 268, in parse_source
plugin.parse(self._inventory, self._loader, source, cache=cache)
File
"/usr/lib/python3.6/site-packages/ansible/plugins/inventory/script.py",
line 161, in parse
raise AnsibleParserError(to_native(e))
[WARNING]: Unable to parse /var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/ovirt4.py as an inventory source
ERROR! No inventory was parsed, please check your configuration and options.
4 years, 10 months
After NAS crash, one VM will not start up, Cannot prepare illegal volume
by David Johnson
We had a drive in our NAS fail, but afterwards one of our VM's will not
start.
The boot drive on the VM is (so near as I can tell) the only drive affected.
I confirmed that the disk images (active and snapshot) are both valid with
qemu.
I followed the instructions at
https://www.canarytek.com/2017/07/02/Recover_oVirt_Illegal_Snapshots.html to
identify the snapshot images that were marked "invalid" and marked them as
valid.
update images set imagestatus=1 where imagestatus=4;
Log excerpt from attempt to start VM:
2020-01-09 02:18:44,908-0600 INFO (vm/c5d0a42f) [vdsm.api] START
prepareImage(sdUUID='6e627364-5e0c-4250-ac95-7cd914d0175f',
spUUID='25cd9bfc-bab6-11e8-90f3-78acc0b47b4d',
imgUUID='4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6',
leafUUID='f8066c56-6db1-4605-8d7c-0739335d30b8', allowIllegal=False)
from=internal, task_id=26053225-6569-4b73-abdd-7d6c7e15d1e9 (api:46)
2020-01-09 02:18:44,931-0600 INFO (vm/c5d0a42f) [vdsm.api] FINISH
prepareImage error=Cannot prepare illegal volume:
(u'f8066c56-6db1-4605-8d7c-0739335d30b8',) from=internal,
task_id=26053225-6569-4b73-abdd-7d6c7e15d1e9 (api:50)
2020-01-09 02:18:44,932-0600 ERROR (vm/c5d0a42f) [storage.TaskManager.Task]
(Task='26053225-6569-4b73-abdd-7d6c7e15d1e9') Unexpected error (task:875)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/vdsm/storage/task.py", line 882,
in _run
return fn(*args, **kargs)
File "<string>", line 2, in prepareImage
File "/usr/lib/python2.7/site-packages/vdsm/common/api.py", line 48, in
method
ret = func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/vdsm/storage/hsm.py", line 3187,
in prepareImage
raise se.prepareIllegalVolumeError(volUUID)
prepareIllegalVolumeError: Cannot prepare illegal volume:
(u'f8066c56-6db1-4605-8d7c-0739335d30b8',)
2020-01-09 02:18:44,932-0600 INFO (vm/c5d0a42f) [storage.TaskManager.Task]
(Task='26053225-6569-4b73-abdd-7d6c7e15d1e9') aborting: Task is aborted:
"Cannot prepare illegal volume: (u'f8066c56-6db1-4605-8d7c-0739335d30b8',)"
- code 227 (task:1181)
2020-01-09 02:18:44,933-0600 ERROR (vm/c5d0a42f) [storage.Dispatcher]
FINISH prepareImage error=Cannot prepare illegal volume:
(u'f8066c56-6db1-4605-8d7c-0739335d30b8',) (dispatcher:82)
2020-01-09 02:18:44,933-0600 ERROR (vm/c5d0a42f) [virt.vm]
(vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') The vm start process failed
(vm:949)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 878, in
_startUnderlyingVm
self._run()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2798, in
_run
self._devices = self._make_devices()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2639, in
_make_devices
disk_objs = self._perform_host_local_adjustment()
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 2712, in
_perform_host_local_adjustment
self._preparePathsForDrives(disk_params)
File "/usr/lib/python2.7/site-packages/vdsm/virt/vm.py", line 1023, in
_preparePathsForDrives
drive['path'] = self.cif.prepareVolumePath(drive, self.id)
File "/usr/lib/python2.7/site-packages/vdsm/clientIF.py", line 417, in
prepareVolumePath
raise vm.VolumeError(drive)
VolumeError: Bad volume specification {'address': {'bus': '0',
'controller': '0', 'type': 'drive', 'target': '0', 'unit': '0'}, 'serial':
'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'index': 0, 'iface': 'scsi',
'apparentsize': '36440899584', 'specParams': {}, 'cache': 'writeback',
'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'truesize':
'16916186624', 'type': 'disk', 'domainID':
'6e627364-5e0c-4250-ac95-7cd914d0175f', 'reqsize': '0', 'format': 'cow',
'poolID': '25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
'/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
'propagateErrors': 'off', 'name': 'sda', 'bootOrder': '1', 'volumeID':
'f8066c56-6db1-4605-8d7c-0739335d30b8', 'diskType': 'file', 'alias':
'ua-4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'discard': False}
2020-01-09 02:18:44,934-0600 INFO (vm/c5d0a42f) [virt.vm]
(vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') Changed state to Down: Bad
volume specification {'address': {'bus': '0', 'controller': '0', 'type':
'drive', 'target': '0', 'unit': '0'}, 'serial':
'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'index': 0, 'iface': 'scsi',
'apparentsize': '36440899584', 'specParams': {}, 'cache': 'writeback',
'imageID': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'truesize':
'16916186624', 'type': 'disk', 'domainID':
'6e627364-5e0c-4250-ac95-7cd914d0175f', 'reqsize': '0', 'format': 'cow',
'poolID': '25cd9bfc-bab6-11e8-90f3-78acc0b47b4d', 'device': 'disk', 'path':
'/rhev/data-center/25cd9bfc-bab6-11e8-90f3-78acc0b47b4d/6e627364-5e0c-4250-ac95-7cd914d0175f/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8',
'propagateErrors': 'off', 'name': 'sda', 'bootOrder': '1', 'volumeID':
'f8066c56-6db1-4605-8d7c-0739335d30b8', 'diskType': 'file', 'alias':
'ua-4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'discard': False} (code=1)
(vm:1689)
2020-01-09 02:18:44,963-0600 INFO (vm/c5d0a42f) [virt.vm]
(vmId='c5d0a42f-3b1e-43ee-a567-7844654011f5') Stopping connection
(guestagent:442)
2020-01-09 02:18:44,978-0600 INFO (jsonrpc/4) [api.virt] START
destroy(gracefulAttempts=1) from=::ffff:192.168.2.15,57858,
flow_id=4b57f1a6, vmId=c5d0a42f-3b1e-43ee-a567-7844654011f5 (api:46)
2
Log excerpt from attempt to migrate the affected drive to another storage
device:
2020-01-09 05:27:14,362-0600 INFO (jsonrpc/2) [storage.VolumeManifest]
ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8
info is {'status': 'OK', 'domain': 'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7',
'voltype': 'LEAF', 'description': 'None', 'parent':
'a912e388-d80d-4f56-805b-ea5e2f35d741', 'format': 'COW', 'generation': 0,
'image': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'ctime': '1578559013',
'disktype': 'DATA', 'legality': 'LEGAL', 'mtime': '0', 'apparentsize':
'219008', 'children': [], 'pool': '', 'capacity': '1503238553600', 'uuid':
u'f8066c56-6db1-4605-8d7c-0739335d30b8', 'truesize': '26624', 'type':
'SPARSE', 'lease': {'path':
u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
'owners': [], 'version': None, 'offset': 0}} (volume:282)
2020-01-09 05:27:14,362-0600 INFO (jsonrpc/2) [vdsm.api] FINISH
getVolumeInfo return={'info': {'status': 'OK', 'domain':
'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7', 'voltype': 'LEAF', 'description':
'None', 'parent': 'a912e388-d80d-4f56-805b-ea5e2f35d741', 'format': 'COW',
'generation': 0, 'image': '4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', 'ctime':
'1578559013', 'disktype': 'DATA', 'legality': 'LEGAL', 'mtime': '0',
'apparentsize': '219008', 'children': [], 'pool': '', 'capacity':
'1503238553600', 'uuid': u'f8066c56-6db1-4605-8d7c-0739335d30b8',
'truesize': '26624', 'type': 'SPARSE', 'lease': {'path':
u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
'owners': [], 'version': None, 'offset': 0}}}
from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
task_id=28e885f7-df18-4ea5-bb6e-eaee7385b519 (api:52)
2020-01-09 05:27:14,363-0600 INFO (jsonrpc/2) [jsonrpc.JsonRpcServer] RPC
call Volume.getInfo succeeded in 0.04 seconds (__init__:573)
2020-01-09 05:27:14,394-0600 INFO (jsonrpc/6) [vdsm.api] START
sdm_copy_data(job_id=u'9a47a14f-d398-4fe8-81fa-59cba7122aa0',
source={u'img_id': u'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', u'sd_id':
u'6e627364-5e0c-4250-ac95-7cd914d0175f', u'endpoint_type': u'div',
u'vol_id': u'f8066c56-6db1-4605-8d7c-0739335d30b8'},
destination={u'generation': 0, u'img_id':
u'4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6', u'sd_id':
u'ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7', u'endpoint_type': u'div',
u'vol_id': u'f8066c56-6db1-4605-8d7c-0739335d30b8'})
from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
task_id=f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (api:46)
2020-01-09 05:27:14,395-0600 INFO (jsonrpc/6) [vdsm.api] FINISH
sdm_copy_data return=None from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731,
task_id=f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (api:52)
2020-01-09 05:27:14,395-0600 INFO (jsonrpc/6) [jsonrpc.JsonRpcServer] RPC
call SDM.copy_data succeeded in 0.00 seconds (__init__:573)
2020-01-09 05:27:14,396-0600 INFO (tasks/4)
[storage.ThreadPool.WorkerThread] START task
f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (cmd=<bound method Task.commit of
<vdsm.storage.task.Task instance at 0x7ff3a42613b0>>, args=None)
(threadPool:208)
2020-01-09 05:27:14,396-0600 INFO (tasks/4) [root] Running job
u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'... (jobs:183)
2020-01-09 05:27:14,429-0600 INFO (jsonrpc/1) [api.host] START
getJobs(job_type=u'storage',
job_ids=[u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'])
from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:46)
2020-01-09 05:27:14,430-0600 INFO (jsonrpc/1) [api.host] FINISH getJobs
return={'status': {'message': 'Done', 'code': 0}, 'jobs':
{u'9a47a14f-d398-4fe8-81fa-59cba7122aa0': {'status': 'running', 'job_type':
'storage', 'id': u'9a47a14f-d398-4fe8-81fa-59cba7122aa0', 'description':
'copy_data'}}} from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:52)
2020-01-09 05:27:14,430-0600 INFO (jsonrpc/1) [jsonrpc.JsonRpcServer] RPC
call Host.getJobs succeeded in 0.00 seconds (__init__:573)
2020-01-09 05:27:14,459-0600 INFO (tasks/4) [storage.SANLock] Acquiring
Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
offset=0) for host id 2 (clusterlock:386)
2020-01-09 05:27:14,496-0600 INFO (tasks/4) [storage.SANLock] Successfully
acquired Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
offset=0) for host id 2 (clusterlock:424)
2020-01-09 05:27:14,498-0600 INFO (tasks/4) [storage.VolumeManifest]
Volume: preparing volume
6e627364-5e0c-4250-ac95-7cd914d0175f/f8066c56-6db1-4605-8d7c-0739335d30b8
(volume:569)
2020-01-09 05:27:14,502-0600 INFO (tasks/4) [storage.SANLock] Releasing
Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
offset=0) (clusterlock:487)
2020-01-09 05:27:14,504-0600 INFO (tasks/4) [storage.SANLock] Successfully
released Lease(name='f8066c56-6db1-4605-8d7c-0739335d30b8',
path=u'/rhev/data-center/mnt/192.168.2.223:_mnt_ovirt-main-pool/ec6ccb14-03c2-49cc-9cc0-b1a87d582ed7/images/4081ce8f-1ce1-4ee1-aa43-69af2dfc5ab6/f8066c56-6db1-4605-8d7c-0739335d30b8.lease',
offset=0) (clusterlock:496)
2020-01-09 05:27:14,504-0600 ERROR (tasks/4) [root] Job
u'9a47a14f-d398-4fe8-81fa-59cba7122aa0' failed (jobs:221)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/vdsm/jobs.py", line 157, in run
self._run()
File
"/usr/lib/python2.7/site-packages/vdsm/storage/sdm/api/copy_data.py", line
61, in _run
with self._source.prepare(), self._dest.prepare():
File "/usr/lib64/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File
"/usr/lib/python2.7/site-packages/vdsm/storage/sdm/api/copy_data.py", line
172, in prepare
self.volume.prepare(rw=self._writable, justme=False)
File "/usr/lib/python2.7/site-packages/vdsm/storage/volume.py", line 574,
in prepare
raise se.prepareIllegalVolumeError(self.volUUID)
prepareIllegalVolumeError: Cannot prepare illegal volume:
('f8066c56-6db1-4605-8d7c-0739335d30b8',)
2020-01-09 05:27:14,505-0600 INFO (tasks/4) [root] Job
u'9a47a14f-d398-4fe8-81fa-59cba7122aa0' will be deleted in 3600 seconds
(jobs:249)
2020-01-09 05:27:14,506-0600 INFO (tasks/4)
[storage.ThreadPool.WorkerThread] FINISH task
f1504b2d-c6c2-4866-97ce-e0476b0ba9a2 (threadPool:210)
2020-01-09 05:27:16,459-0600 INFO (jsonrpc/4) [api.host] START
getJobs(job_type=u'storage',
job_ids=[u'9a47a14f-d398-4fe8-81fa-59cba7122aa0'])
from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:46)
2020-01-09 05:27:16,460-0600 INFO (jsonrpc/4) [api.host] FINISH getJobs
return={'status': {'message': 'Done', 'code': 0}, 'jobs':
{u'9a47a14f-d398-4fe8-81fa-59cba7122aa0': {'status': 'failed', 'error':
{'message': "Cannot prepare illegal volume:
('f8066c56-6db1-4605-8d7c-0739335d30b8',)", 'code': 227}, 'job_type':
'storage', 'id': u'9a47a14f-d398-4fe8-81fa-59cba7122aa0', 'description':
'copy_data'}}} from=::ffff:192.168.2.15,57858,
flow_id=94ad3bf0-c5f5-4d45-af2a-49aa4eaa6731 (api:52)
2
Regards,
David Johnson
Director of Development, Maxis Technology
844.696.2947 ext 702 (o) | 479.531.3590 (c)
djohnson(a)maxistechnology.com
[image: Maxis Techncology] <http://www.maxistechnology.com>
www.maxistechnology.com
*stay connected <http://www.linkedin.com/in/pojoguy>*
4 years, 10 months
Storage I/O problem.
by Christian Reiss
Ugh,
After having rebooted the 3-way HCI Cluster everything came back online,
all the gluster volumes are online, no split-brain info detected. Also
they are mounted on all nodes.
The Volumes, Domains and Disks are all marked green in the Engine.
Launching a VM fails with
"VM test01 has been paused due to storage I/O problem."
A md5 sum of an uploaded ISO image from all three mounted cluster
members yield the same md5, so does the source file on my PC. Creating a
new VM and attaching that ISO fails: The ISO is not readable (says the
vm console).
The Disk Image from the test01 vm seems sounds (file has correct size,
file-tool shows correct magic header), other files (configs) are readable.
I did complete cluster reboot etc.
gluster> volume heal vms info split-brain
Brick node01:/gluster_bricks/vms/vms
Status: Connected
Number of entries in split-brain: 0
Brick node02:/gluster_bricks/vms/vms
Status: Connected
Number of entries in split-brain: 0
Brick node03:/gluster_bricks/vms/vms
Status: Connected
Number of entries in split-brain: 0
Brick node01:/gluster_bricks/vms/vms
Status: Connected
Total Number of entries: 0
Number of entries in heal pending: 0
Number of entries in split-brain: 0
Number of entries possibly healing: 0
Brick node02:/gluster_bricks/vms/vms
Status: Connected
Total Number of entries: 0
Number of entries in heal pending: 0
Number of entries in split-brain: 0
Number of entries possibly healing: 0
Brick node03:/gluster_bricks/vms/vms
Status: Connected
Total Number of entries: 0
Number of entries in heal pending: 0
Number of entries in split-brain: 0
Number of entries possibly healing: 0
How would I fix this issue?
Anyone got a clue on how to proceed?
-Chris.
--
with kind regards,
mit freundlichen Gruessen,
Christian Reiss
4 years, 10 months
Re: Question about HCI gluster_inventory.yml
by John Call
Simone, ovirt-users,
I'm running into an issue with the ovirt.hosted_engine_setup role. I can't
decide if my problem is 1) that the SSH connection to the HostedEngineLocal
VM repeatedly fails due to that new VM not having an entry in
~/.ssh/known_hosts or 2) that I don't have some variable defined
correctly. I can force my way past #1 by setting host_key_checking=false
in ansible.cfg, but if I do that, I immediately get an error about an
undefined hostvar variable... Can you point me in the right direction to
debug this? I would really like to be able to rebuild my RHHI/RHV cluster
using these gluster+ovirt roles.
Here is the undefined variable error, and the variable files I'm including
via the "ansible-playbook -e..." syntax:
TASK [ovirt.hosted_engine_setup : Wait for the local VM]
*****************************************************************************************
ok: [localhost -> rhvm.home.lab]
TASK [ovirt.hosted_engine_setup : Add an entry for this host on /etc/hosts
on the local VM] ******************************************************
fatal: [localhost]: FAILED! => {"msg": "The task includes an option with an
undefined variable. *The error was: u\"hostvars['rhhi1.home.lab']\" is
undefined*\n\nThe error appears to be in
'/usr/share/ansible/roles/ovirt.hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml':
line 8, column 5, but may\nbe elsewhere in the file depending on the exact
syntax problem.\n\nThe offending line appears to be:\n\n timeout:
180\n - name: Add an entry for this host on /etc/hosts on the local VM\n
^ here\n"}
Thank you,
John Call
Red Hat - Storage Domain Architect
jcall(a)redhat.com
(714) 267-8802
On Tue, Jan 7, 2020 at 12:27 AM Gobinda Das <godas(a)redhat.com> wrote:
> Hi John,
> Sorry misunderstood, I thought you are using cockpit based deployment.
> Your gluster_inventory is correct for end to end deployment.
>
> On Tue, Jan 7, 2020 at 12:43 PM Gobinda Das <godas(a)redhat.com> wrote:
>
>> Hi John,
>> I am seeing error:
>> TASK [ovirt.hosted_engine_setup : Get local VM dir path]
>> *****************************************************************************************
>> task path:
>> /usr/share/ansible/roles/ovirt.hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml:57
>> fatal: [localhost]: FAILED! => {
>> "msg": "The task includes an option with an undefined variable. The
>> error was: 'ansible.vars.hostvars.HostVarsVars object' has no attribute
>> 'he_local_vm_dir'\n\nThe error appears to be in
>> '/usr/share/ansible/roles/ovirt.hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml':
>> line 57, column 7, but may\nbe elsewhere in the file depending on the exact
>> syntax problem.\n\nThe offending line appears to be:\n\n delegate_to:
>> \"{{ he_ansible_host_name }}\"\n - name: Get local VM dir path\n ^
>> here\n"
>> }
>> Adding @Simone Tiraboschi <stirabos(a)redhat.com>
>> Simone do we need to explicitly specify he_local_vm_dir ?
>> In gluster_inventory you can't mention other hosts and SD informations to
>> auto add. For that you need to create separate file under:
>> /usr/share/ovirt-hosted-engine-setup/gdeploy-inventory.yml
>> and add entries there like:
>>
>> gluster:
>> hosts:
>> host2:
>> host3:
>> vars:
>> storage_domains:
>> [{"name":"data","host":"host1","address":"host1","path":"/data","mount_options":"backup-volfile-servers=host2:host3"},{"name":"vmstore","host":"host1","address":"host1","path":"/vmstore","mount_options":"backup-volfile-servers=host2:host3"}]
>>
>> Then hook will automatically read this file and perform action
>> accordingly.
>>
>> On Tue, Jan 7, 2020 at 11:53 AM John Call <jcall(a)redhat.com> wrote:
>>
>>> Hi Gobinda, ovirt-users,
>>>
>>> I think I'm close, but I'm running into a timeout/error when the
>>> ovirt.hosted-engine-setup tasks try to connect to the new
>>> "HostedEngineLocal" VM. It seems to complain about host_key_checking. I
>>> disabled that via ansible.cfg, but it fails on the next task (see short
>>> logs.) Do you see anything obviously wrong in my inventory and json
>>> files? I've attached the verbose playbook logs when I run this from the
>>> first HCI host...
>>>
>>> [root@rhhi1 hc-ansible-deployment]# pwd
>>> /etc/ansible/roles/gluster.ansible/playbooks/hc-ansible-deployment
>>>
>>> [root@rhhi1 hc-ansible-deployment]# ansible-playbook -b -vvv -i
>>> gluster_inventory.yml hc_deployment.yml
>>> --extra-vars='@he_gluster_vars.json' | tee playbook.logs
>>>
>>>
>>> Thank you,
>>> John Call
>>> Red Hat - Storage Domain Architect
>>> jcall(a)redhat.com
>>> (714) 267-8802
>>>
>>>
>>> On Wed, Dec 11, 2019 at 11:09 PM Gobinda Das <godas(a)redhat.com> wrote:
>>>
>>>> Hi John,
>>>> You need to specify storage-fqdn(Which should mapped to storage
>>>> network) and ovirtmgmt-fqdn (Which should mapped to frontend network) like
>>>> this:
>>>> hc_nodes:
>>>> hosts:
>>>> host1-STORAGE-fqdn:
>>>> host2-STORAGE-fqdn:
>>>> host3-STORAGE-fqdn:
>>>> vars:
>>>> cluster_nodes:
>>>> - host1-STORAGE-fqdn
>>>> - host2-STORAGE-fqdn
>>>> - host3-STORAGE-fqdn
>>>> gluster_features_hci_cluster: "{{ cluster_nodes }}"
>>>>
>>>> gluster:
>>>> host2-ovirtmgmt-fqdn:
>>>> host3-ovirtmgmt-fqdn:
>>>> storage_domains:
>>>> [{"name":"data","host":"host1-STORAGE-fqdn","address":"host1-STORAGE-fqdn","path":"/data","mount_options":"backup-volfile-servers=host2-STORAGE-fqdn:host3-STORAGE-fqdn"},{"name":"vmstore","host":"host1-STORAGE-fqdn","address":"host1-STORAGE-fqdn","path":"/vmstore","mount_options":"backup-volfile-servers=host2-STORAGE-fqdn:host3-STORAGE-fqdn"}]
>>>>
>>>>
>>>> On Thu, Dec 12, 2019 at 2:47 AM John Call <jcall(a)redhat.com> wrote:
>>>>
>>>>> Hi ovirt-users,
>>>>>
>>>>> I'm trying to automate my HCI deployment, but can't figure out how to
>>>>> specify multiple network interfaces in gluster_inventory.yml. My servers
>>>>> have two NICs, one for ovirtmgmt (and everything else), and the other is
>>>>> just for Gluster. How should I populate the inventory/vars file? Is this
>>>>> correct?
>>>>>
>>>>> [root@rhhi1 hc-ansible-deployment]# pwd
>>>>> /etc/ansible/roles/gluster.ansible/playbooks/hc-ansible-deployment
>>>>>
>>>>> [root@rhhi1 hc-ansible-deployment]# cat gluster_inventory.yml
>>>>> --lots of stuff omitted--
>>>>> hc_nodes:
>>>>> hosts:
>>>>> host1-STORAGE-fqdn:
>>>>> host2-STORAGE-fqdn:
>>>>> host3-STORAGE-fqdn:
>>>>> vars:
>>>>> cluster_nodes:
>>>>> - host1-ovirtmgmt-fqdn
>>>>> - host2-ovirtmgmt-fqdn
>>>>> - host3-ovirtmgmt-fqdn
>>>>> gluster_features_hci_cluster: "{{ cluster_nodes }}"
>>>>>
>>>>> gluster:
>>>>> host2-STORAGE-fqdn:
>>>>> host3-STORAGE-fqdn:
>>>>> storage_domains:
>>>>> [{"name":"data","host":"host1-STORAGE-fqdn","address":"host1-STORAGE-fqdn","path":"/data","mount_options":"backup-volfile-servers=host2-STORAGE-fqdn:host3-STORAGE-fqdn"},{"name":"vmstore","host":"host1-STORAGE-fqdn","address":"host1-STORAGE-fqdn","path":"/vmstore","mount_options":"backup-volfile-servers=host2-STORAGE-fqdn:host3-STORAGE-fqdn"}]
>>>>> _______________________________________________
>>>>> Users mailing list -- users(a)ovirt.org
>>>>> To unsubscribe send an email to users-leave(a)ovirt.org
>>>>> Privacy Statement: https://www.ovirt.org/site/privacy-policy/
>>>>> oVirt Code of Conduct:
>>>>> https://www.ovirt.org/community/about/community-guidelines/
>>>>> List Archives:
>>>>> https://lists.ovirt.org/archives/list/users@ovirt.org/message/TEAHUX6LSPM...
>>>>>
>>>>
>>>>
>>>> --
>>>>
>>>>
>>>> Thanks,
>>>> Gobinda
>>>>
>>>
>>
>> --
>>
>>
>> Thanks,
>> Gobinda
>>
>
>
> --
>
>
> Thanks,
> Gobinda
>
4 years, 10 months
upgrade the host with ovirt node ng update
by jingjie.jiang@oracle.com
Hi,
I have a 4.2.8 host which not installed with ovirt node ng installer.
Can I use 4.3 ovirt node ng update to upgrade it?
If so, is there a instruction to follow?
Thanks,
Jingjie
4 years, 10 months