[ovirt-users] Confused / Hosted-Engine won't start
Christopher Young
mexigabacho at gmail.com
Thu Jul 2 00:36:49 UTC 2015
I'm sure I have worked through this before, but I've been banging my head
against this one for a while now, and I think I'm too close to the issue.
Basically, my hosted-engine won't start anymore. I do recall attempting to
migrate it to a new gluster-based NFS share recently, but I could have
sworn that was successful and working. I _think_ I have some sort of id
issue with storage/volume/whatever id's, but I need some help digging
through it if someone would be so kind.
I have the following error in /var/log/libvirt/qemu/HostedEngine.log:
----------
2015-07-02T00:01:13.080952Z qemu-kvm: -drive
file=/var/run/vdsm/storage/4e3017eb-d062-4ad1-9df8-7057fcee412c/5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836/eeb2d821-a432-4df6-8856-fdb14df260e3,if=none,id=drive-virtio-disk0,format=raw,serial=5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836,cache=none,werror=stop,rerror=stop,aio=threads:
could not open disk image
/var/run/vdsm/storage/4e3017eb-d062-4ad1-9df8-7057fcee412c/5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836/eeb2d821-a432-4df6-8856-fdb14df260e3:
Could not refresh total sector count: Input/output error
----------
I also am including a few command outputs that I'm sure might help:
----------
[root at orldc-dev-vnode02 4e3017eb-d062-4ad1-9df8-7057fcee412c]# ls -la
/var/run/vdsm/storage/4e3017eb-d062-4ad1-9df8-7057fcee412c/
total 8
drwxr-xr-x. 2 vdsm kvm 80 Jul 1 20:04 .
drwxr-xr-x. 3 vdsm kvm 60 Jul 1 20:04 ..
lrwxrwxrwx. 1 vdsm kvm 128 Jul 1 20:04
1d80a60c-8f26-4448-9460-2c7b00ff75bf ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/1d80a60c-8f26-4448-9460-2c7b00ff75bf
lrwxrwxrwx. 1 vdsm kvm 128 Jul 1 20:04
23ac8897-b0c7-41d6-a7de-19f46ed78400 ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/23ac8897-b0c7-41d6-a7de-19f46ed78400
----------
[root at orldc-dev-vnode02 4e3017eb-d062-4ad1-9df8-7057fcee412c]# cat
/etc/ovirt-hosted-engine/vm.conf
vmId=6b7329f9-518a-4488-b1c4-2cd809f2f580
memSize=5120
display=vnc
devices={index:2,iface:ide,address:{ controller:0, target:0,unit:0, bus:1,
type:drive},specParams:{},readonly:true,deviceId:77924fc2-c5c9-408b-97d3-cd0b0d11a62c,path:/home/tmp/CentOS-6.6-x86_64-minimal.iso,device:cdrom,shared:false,type:disk}
devices={index:0,iface:virtio,format:raw,poolID:00000000-0000-0000-0000-000000000000,volumeID:eeb2d821-a432-4df6-8856-fdb14df260e3,imageID:5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836,specParams:{},readonly:false,domainID:4e3017eb-d062-4ad1-9df8-7057fcee412c,optional:false,deviceId:5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836,address:{bus:0x00,
slot:0x06, domain:0x0000, type:pci,
function:0x0},device:disk,shared:exclusive,propagateErrors:off,type:disk,bootOrder:1}
devices={device:scsi,model:virtio-scsi,type:controller}
devices={nicModel:pv,macAddr:00:16:3e:0e:d0:68,linkActive:true,network:ovirtmgmt,filter:vdsm-no-mac-spoofing,specParams:{},deviceId:f70ba622-6ac8-4c06-a005-0ebd940a15b2,address:{bus:0x00,
slot:0x03, domain:0x0000, type:pci,
function:0x0},device:bridge,type:interface}
devices={device:console,specParams:{},type:console,deviceId:98386e6c-ae56-4b6d-9bfb-c72bbd299ad1,alias:console0}
vmName=HostedEngine
spiceSecureChannels=smain,sdisplay,sinputs,scursor,splayback,srecord,ssmartcard,susbredir
smp=2
cpuType=Westmere
emulatedMachine=pc
----------
[root at orldc-dev-vnode02 4e3017eb-d062-4ad1-9df8-7057fcee412c]# cat
/etc/ovirt-hosted-engine/hosted-engine.conf
fqdn=orldc-dev-vengine01.*******
vm_disk_id=5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836
vmid=6b7329f9-518a-4488-b1c4-2cd809f2f580
storage=ovirt-gluster-nfs:/engine
conf=/etc/ovirt-hosted-engine/vm.conf
service_start_time=0
host_id=1
console=vnc
domainType=nfs3
spUUID=379cf161-d5b1-4c20-bb71-e3ca5d2ccd6b
sdUUID=4e3017eb-d062-4ad1-9df8-7057fcee412c
connectionUUID=0d1b50ac-cf3f-4cd7-90df-3c3a6d11a984
ca_cert=/etc/pki/vdsm/libvirt-spice/ca-cert.pem
ca_subject="C=EN, L=Test, O=Test, CN=Test"
vdsm_use_ssl=true
gateway=10.16.3.1
bridge=ovirtmgmt
metadata_volume_UUID=dd9f373c-d161-4fa0-aab1-3cb52305dba7
metadata_image_UUID=23ac8897-b0c7-41d6-a7de-19f46ed78400
lockspace_volume_UUID=d9bacbf6-c2f4-4f74-a91f-3a3a52f255bf
lockspace_image_UUID=1d80a60c-8f26-4448-9460-2c7b00ff75bf
# The following are used only for iSCSI storage
iqn=
portal=
user=
password=
port=
----------
(mount output for the NFS share this should be running on (gluster-based):
ovirt-gluster-nfs:/engine on
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine type nfs
(rw,relatime,vers=3,rsize=1048576,wsize=1048576,namlen=255,soft,nolock,nosharecache,proto=tcp,timeo=600,retrans=6,sec=sys,mountaddr=10.16.3.30,mountvers=3,mountport=38465,mountproto=tcp,local_lock=all,addr=10.16.3.30)
----------
[root at orldc-dev-vnode02 4e3017eb-d062-4ad1-9df8-7057fcee412c]# tree
/rhev/data-center/mnt/ovirt-gluster-nfs\:_engine/
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/
├── 4e3017eb-d062-4ad1-9df8-7057fcee412c
│ ├── dom_md
│ │ ├── ids
│ │ ├── inbox
│ │ ├── leases
│ │ ├── metadata
│ │ └── outbox
│ ├── ha_agent
│ │ ├── hosted-engine.lockspace ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/1d80a60c-8f26-4448-9460-2c7b00ff75bf/d9bacbf6-c2f4-4f74-a91f-3a3a52f255bf
│ │ └── hosted-engine.metadata ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/23ac8897-b0c7-41d6-a7de-19f46ed78400/dd9f373c-d161-4fa0-aab1-3cb52305dba7
│ ├── images
│ │ ├── 1d80a60c-8f26-4448-9460-2c7b00ff75bf
│ │ │ ├── d9bacbf6-c2f4-4f74-a91f-3a3a52f255bf
│ │ │ ├── d9bacbf6-c2f4-4f74-a91f-3a3a52f255bf.lease
│ │ │ └── d9bacbf6-c2f4-4f74-a91f-3a3a52f255bf.meta
│ │ ├── 23ac8897-b0c7-41d6-a7de-19f46ed78400
│ │ │ ├── dd9f373c-d161-4fa0-aab1-3cb52305dba7 ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/23ac8897-b0c7-41d6-a7de-19f46ed78400
│ │ │ ├── dd9f373c-d161-4fa0-aab1-3cb52305dba7.lease
│ │ │ └── dd9f373c-d161-4fa0-aab1-3cb52305dba7.meta
│ │ ├── 3278c444-d92a-4cb9-87d6-9669c6e4993e
│ │ │ ├── 1a4b6a5d-9c8f-4d54-91a7-3dd87377c362
│ │ │ ├── 1a4b6a5d-9c8f-4d54-91a7-3dd87377c362.lease
│ │ │ └── 1a4b6a5d-9c8f-4d54-91a7-3dd87377c362.meta
│ │ ├── 5ead7b5d-50e8-4d6c-a0e5-bbe6d93dd836
│ │ │ ├── eeb2d821-a432-4df6-8856-fdb14df260e3.lease
│ │ │ └── eeb2d821-a432-4df6-8856-fdb14df260e3.meta
│ │ ├── 6064179f-2720-4db9-a7c4-a97e044c2238
│ │ │ ├── 05afaa26-95af-4226-9a00-5383d8937a83
│ │ │ ├── 05afaa26-95af-4226-9a00-5383d8937a83.lease
│ │ │ └── 05afaa26-95af-4226-9a00-5383d8937a83.meta
│ │ └── bb9d9a37-4f91-4973-ba9e-72ee81aed0b6
│ │ ├── 5acb27b3-62c5-46ac-8978-576a8a4a0399
│ │ ├── 5acb27b3-62c5-46ac-8978-576a8a4a0399.lease
│ │ └── 5acb27b3-62c5-46ac-8978-576a8a4a0399.meta
│ └── master
│ ├── tasks
│ │ └── fef13299-0e7f-4c7a-a399-092a1235faab
│ │ ├── fef13299-0e7f-4c7a-a399-092a1235faab.job.0
│ │ ├── fef13299-0e7f-4c7a-a399-092a1235faab.recover.0
│ │ ├── fef13299-0e7f-4c7a-a399-092a1235faab.recover.1
│ │ ├── fef13299-0e7f-4c7a-a399-092a1235faab.result
│ │ └── fef13299-0e7f-4c7a-a399-092a1235faab.task
│ └── vms
└── __DIRECT_IO_TEST__
16 directories, 28 files
----------
[root at orldc-dev-vnode02 4e3017eb-d062-4ad1-9df8-7057fcee412c]# tree
/var/run/vdsm/storage/4e3017eb-d062-4ad1-9df8-7057fcee412c
/var/run/vdsm/storage/4e3017eb-d062-4ad1-9df8-7057fcee412c
├── 1d80a60c-8f26-4448-9460-2c7b00ff75bf ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/1d80a60c-8f26-4448-9460-2c7b00ff75bf
└── 23ac8897-b0c7-41d6-a7de-19f46ed78400 ->
/rhev/data-center/mnt/ovirt-gluster-nfs:_engine/4e3017eb-d062-4ad1-9df8-7057fcee412c/images/23ac8897-b0c7-41d6-a7de-19f46ed78400
2 directories, 0 files
----------
Can you anyone point me in the right direction here? Eventually, I'd like
to get this hosted-engine running in HA mode (I had started this process
with ctdb/gluster/etc. via the well-known guide out there), but I'm
thinking that at this point, I should get this up and running again, export
everything, and perhaps just build a new hosted-engine and import things
into (if that is possible). Right now, though, I just need to get this
hosted-engine run.
Many many thanks!
Chris
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.ovirt.org/pipermail/users/attachments/20150701/d0f92744/attachment-0001.html>
More information about the Users
mailing list