[Users] unable to start vm in 3.3 and f19 with gluster

Gianluca Cecchi gianluca.cecchi at gmail.com
Wed Sep 25 02:06:43 EDT 2013


oVirt hosts are VMs inside an ESX 5.1 infra.
I think all is ok in terms of nested virtualization though
CPU of ESX host is E7-4870 and cluster defined as "Intel Nehalem Family"

selinux is in permissive mode

[root at ovnode01 libvirt]# vdsClient -s localhost getVdsCapabilities
        HBAInventory = {'FC': [], 'iSCSI': [{'InitiatorName':
'iqn.1994-05.com.redhat:6344c23973df'}]}
        ISCSIInitiatorName = 'iqn.1994-05.com.redhat:6344c23973df'
        bondings = {'bond0': {'addr': '',
                              'cfg': {},
                              'hwaddr': '8e:a1:3b:0c:83:47',
                              'ipv6addrs': [],
                              'mtu': '1500',
                              'netmask': '',
                              'slaves': []}}
        bridges = {'ovirtmgmt': {'addr': '192.168.33.41',
                                 'cfg': {'BOOTPROTO': 'none',
                                         'DEFROUTE': 'yes',
                                         'DELAY': '0',
                                         'DEVICE': 'ovirtmgmt',
                                         'GATEWAY': '192.168.33.15',
                                         'IPADDR': '192.168.33.41',
                                         'NETMASK': '255.255.255.0',
                                         'NM_CONTROLLED': 'no',
                                         'ONBOOT': 'yes',
                                         'STP': 'no',
                                         'TYPE': 'Bridge'},
                                 'gateway': '192.168.33.15',
                                 'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'],
                                 'ipv6gateway': '::',
                                 'mtu': '1500',
                                 'netmask': '255.255.255.0',
                                 'ports': ['eth0'],
                                 'stp': 'off'}}
        clusterLevels = ['3.0', '3.1', '3.2', '3.3']
        cpuCores = '4'
        cpuFlags =
'fpu,vme,de,pse,tsc,msr,pae,mce,cx8,apic,sep,mtrr,pge,mca,cmov,pat,pse36,clflush,dts,mmx,fxsr,sse,sse2,ss,syscall,nx,rdtscp,lm,constant_tsc,arch_perfmon,pebs,bts,nopl,xtopology,tsc_reliable,nonstop_tsc,aperfmperf,pni,monitor,vmx,ssse3,cx16,sse4_1,sse4_2,x2apic,popcnt,lahf_lm,ida,arat,epb,dtherm,tpr_shadow,vnmi,ept,vpid,model_Nehalem,model_Conroe,model_coreduo,model_core2duo,model_Penryn,model_n270'
        cpuModel = 'Intel(R) Xeon(R) CPU E7- 4870  @ 2.40GHz'
        cpuSockets = '4'
        cpuSpeed = '2394.000'
        cpuThreads = '4'
        emulatedMachines = ['pc',
                            'q35',
                            'isapc',
                            'pc-0.10',
                            'pc-0.11',
                            'pc-0.12',
                            'pc-0.13',
                            'pc-0.14',
                            'pc-0.15',
                            'pc-1.0',
                            'pc-1.1',
                            'pc-1.2',
                            'pc-1.3',
                            'none']
        guestOverhead = '65'
        hooks = {}
        kvmEnabled = 'true'
        lastClient = '192.168.33.40'
        lastClientIface = 'ovirtmgmt'
        management_ip = '0.0.0.0'
        memSize = '16050'
        netConfigDirty = 'False'
        networks = {'ovirtmgmt': {'addr': '192.168.33.41',
                                  'bridged': True,
                                  'cfg': {'BOOTPROTO': 'none',
                                          'DEFROUTE': 'yes',
                                          'DELAY': '0',
                                          'DEVICE': 'ovirtmgmt',
                                          'GATEWAY': '192.168.33.15',
                                          'IPADDR': '192.168.33.41',
                                          'NETMASK': '255.255.255.0',
                                          'NM_CONTROLLED': 'no',
                                          'ONBOOT': 'yes',
                                          'STP': 'no',
                                          'TYPE': 'Bridge'},
                                  'gateway': '192.168.33.15',
                                  'iface': 'ovirtmgmt',
                                  'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'],
                                  'ipv6gateway': '::',
                                  'mtu': '1500',
                                  'netmask': '255.255.255.0',
                                  'ports': ['eth0'],
                                  'stp': 'off'}}
        nics = {'ens224': {'addr': '192.168.230.31',
                           'cfg': {'BOOTPROTO': 'static',
                                   'DEVICE': 'ens224',
                                   'HWADDR': '00:50:56:9F:3C:B0',
                                   'IPADDR': '192.168.230.31',
                                   'NETMASK': '255.255.255.0',
                                   'NM_CONTROLLED': 'no',
                                   'ONBOOT': 'yes',
                                   'TYPE': 'Ethernet'},
                           'hwaddr': '00:50:56:9f:3c:b0',
                           'ipv6addrs': ['fe80::250:56ff:fe9f:3cb0/64'],
                           'mtu': '1500',
                           'netmask': '255.255.255.0',
                           'speed': 10000},
                'eth0': {'addr': '',
                         'cfg': {'BRIDGE': 'ovirtmgmt',
                                 'DEVICE': 'eth0',
                                 'HWADDR': '00:50:56:9f:68:6b',
                                 'MTU': '1500',
                                 'NM_CONTROLLED': 'no',
                                 'ONBOOT': 'yes',
                                 'STP': 'no'},
                         'hwaddr': '00:50:56:9f:68:6b',
                         'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'],
                         'mtu': '1500',
                         'netmask': '',
                         'speed': 10000},
                'p256p1': {'addr': '',
                           'cfg': {},
                           'hwaddr': '00:50:56:9f:3b:86',
                           'ipv6addrs': [],
                           'mtu': '1500',
                           'netmask': '',
                           'speed': 0}}
        operatingSystem = {'name': 'Fedora', 'release': '3', 'version': '19'}
        packages2 = {'glusterfs': {'buildtime': 1375786850,
                                   'release': '8.fc19',
                                   'version': '3.4.0'},
                     'glusterfs-fuse': {'buildtime': 1375786850,
                                        'release': '8.fc19',
                                        'version': '3.4.0'},
                     'glusterfs-rdma': {'buildtime': 1375786850,
                                        'release': '8.fc19',
                                        'version': '3.4.0'},
                     'glusterfs-server': {'buildtime': 1375786850,
                                          'release': '8.fc19',
                                          'version': '3.4.0'},
                     'kernel': {'buildtime': 1379163891.0,
                                'release': '200.fc19.x86_64',
                                'version': '3.11.1'},
                     'libvirt': {'buildtime': 1375400611,
                                 'release': '1.fc19',
                                 'version': '1.0.5.5'},
                     'mom': {'buildtime': 1375215820, 'release':
'3.fc19', 'version': '0.3.2'},
                     'qemu-img': {'buildtime': 1378374475,
                                  'release': '9.fc19',
                                  'version': '1.4.2'},
                     'qemu-kvm': {'buildtime': 1378374475,
                                  'release': '9.fc19',
                                  'version': '1.4.2'},
                     'spice-server': {'buildtime': 1375454091,
                                      'release': '1.fc19',
                                      'version': '0.12.4'},
                     'vdsm': {'buildtime': 1378818342, 'release':
'2.fc19', 'version': '4.12.1'}}
        reservedMem = '321'
        software_revision = '2'
        software_version = '4.12'
        supportedENGINEs = ['3.0', '3.1', '3.2', '3.3']
        supportedProtocols = ['2.2', '2.3']
        uuid = '421F7170-C703-34E3-9628-4588D841F8B1'
        version_name = 'Snow Man'
        vlans = {}
        vmTypes = ['kvm']

qemu log for the VM:
2013-09-25 05:42:29.130+0000: starting up
LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
QEMU_AUDIO_DRV=spice /usr/bin/qemu-kvm -name C6 -S -machine
pc-1.0,accel=kvm,usb=off -cpu Nehalem -m 2048 -smp
1,sockets=1,cores=1,threads=1 -uuid
409c5dbe-5e70-40de-bf73-46ef484ea2d7 -smbios
type=1,manufacturer=oVirt,product=oVirt
Node,version=19-3,serial=421F7170-C703-34E3-9628-4588D841F8B1,uuid=409c5dbe-5e70-40de-bf73-46ef484ea2d7
-no-user-config -nodefaults -chardev
socket,id=charmonitor,path=/var/lib/libvirt/qemu/C6.monitor,server,nowait
-mon chardev=charmonitor,id=monitor,mode=control -rtc
base=2013-09-25T05:42:28,driftfix=slew -no-shutdown -device
piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device
virtio-scsi-pci,id=scsi0,bus=pci.0,addr=0x4 -device
virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x5 -drive
if=none,id=drive-ide0-1-0,readonly=on,format=raw,serial= -device
ide-cd,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -drive
file=gluster://ovnode01/gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161,if=none,id=drive-virtio-disk0,format=raw,serial=d004045e-620b-4d90-8a7f-6c6d26393a08,cache=none,werror=stop,rerror=stop,aio=threads
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x6,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1
-netdev tap,fd=27,id=hostnet0,vhost=on,vhostfd=28 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=00:1a:4a:bb:9f:10,bus=pci.0,addr=0x3
-chardev socket,id=charchannel0,path=/var/lib/libvirt/qemu/channels/409c5dbe-5e70-40de-bf73-46ef484ea2d7.com.redhat.rhevm.vdsm,server,nowait
-device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=com.redhat.rhevm.vdsm
-chardev socket,id=charchannel1,path=/var/lib/libvirt/qemu/channels/409c5dbe-5e70-40de-bf73-46ef484ea2d7.org.qemu.guest_agent.0,server,nowait
-device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,id=channel1,name=org.qemu.guest_agent.0
-chardev spicevmc,id=charchannel2,name=vdagent -device
virtserialport,bus=virtio-serial0.0,nr=3,chardev=charchannel2,id=channel2,name=com.redhat.spice.0
-spice tls-port=5900,addr=0,x509-dir=/etc/pki/vdsm/libvirt-spice,tls-channel=main,tls-channel=display,tls-channel=inputs,tls-channel=cursor,tls-channel=playback,tls-channel=record,tls-channel=smartcard,tls-channel=usbredir,seamless-migration=on
-k en-us -vga qxl -global qxl-vga.ram_size=67108864 -global
qxl-vga.vram_size=67108864 -device
virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x7
qemu-system-x86_64: -drive
file=gluster://ovnode01/gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161,if=none,id=drive-virtio-disk0,format=raw,serial=d004045e-620b-4d90-8a7f-6c6d26393a08,cache=none,werror=stop,rerror=stop,aio=threads:
Gluster connection failed for server=ovnode01 port=0 volume=gv01
image=20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161
transport=tcp
qemu-system-x86_64: -drive
file=gluster://ovnode01/gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161,if=none,id=drive-virtio-disk0,format=raw,serial=d004045e-620b-4d90-8a7f-6c6d26393a08,cache=none,werror=stop,rerror=stop,aio=threads:
could not open disk image
gluster://ovnode01/gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161:
No data available
2013-09-25 05:42:32.291+0000: shutting down


More information about the Users mailing list