
Hello, I revamp this thread putting a subject more in line with the real problem. Previous thread subject was " unable to start vm in 3.3 and f19 with gluster " and began here on ovirt users mailing list: http://lists.ovirt.org/pipermail/users/2013-September/016628.html Now I updated all to final 3.3.3 and I see that the problem is here yet. So now I have updated Fedora 19 hosts that are VMs (virtual hw version 9) inside vSphere infra version 5.1. CPU of ESX host is E7-4870 and cluster in oVirt is defined as "Intel Nehalem Family" On oVirt host VM [root@ovnode01 qemu]# rpm -q libvirt qemu-kvm libvirt-1.0.5.9-1.fc19.x86_64 qemu-kvm-1.4.2-15.fc19.x86_64 [root@ovnode01 qemu]# uname -r 3.12.9-201.fc19.x86_64 flags of cpuinfo: flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts mmx fxsr sse sse2 ss syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts nopl xtopology tsc_reliable nonstop_tsc aperfmperf pni monitor vmx ssse3 cx16 sse4_1 sse4_2 x2apic popcnt lahf_lm ida arat epb dtherm tpr_shadow vnmi ept vpid [root@ovnode01 ~]# vdsClient -s localhost getVdsCapabilities HBAInventory = {'FC': [], 'iSCSI': [{'InitiatorName': 'iqn.1994-05.com.redhat:6344c23973df'}]} ISCSIInitiatorName = 'iqn.1994-05.com.redhat:6344c23973df' bondings = {'bond0': {'addr': '', 'cfg': {}, 'hwaddr': '32:5c:6a:20:cd:21', 'ipv6addrs': [], 'mtu': '1500', 'netmask': '', 'slaves': []}} bridges = {'ovirtmgmt': {'addr': '192.168.33.41', 'cfg': {'BOOTPROTO': 'none', 'DEFROUTE': 'yes', 'DELAY': '0', 'DEVICE': 'ovirtmgmt', 'GATEWAY': '192.168.33.15', 'IPADDR': '192.168.33.41', 'NETMASK': '255.255.255.0', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no', 'TYPE': 'Bridge'}, 'gateway': '192.168.33.15', 'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'], 'ipv6gateway': '::', 'mtu': '1500', 'netmask': '255.255.255.0', 'ports': ['eth0', 'vnet1'], 'stp': 'off'}, 'vlan172': {'addr': '', 'cfg': {'DEFROUTE': 'no', 'DELAY': '0', 'DEVICE': 'vlan172', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no', 'TYPE': 'Bridge'}, 'gateway': '0.0.0.0', 'ipv6addrs': ['fe80::250:56ff:fe9f:3b86/64'], 'ipv6gateway': '::', 'mtu': '1500', 'netmask': '', 'ports': ['ens256.172', 'vnet0'], 'stp': 'off'}} clusterLevels = ['3.0', '3.1', '3.2', '3.3'] cpuCores = '4' cpuFlags = 'fpu,vme,de,pse,tsc,msr,pae,mce,cx8,apic,sep,mtrr,pge,mca,cmov,pat,pse36,clflush,dts,mmx,fxsr,sse,sse2,ss,syscall,nx,rdtscp,lm,constant_tsc,arch_perfmon,pebs,bts,nopl,xtopology,tsc_reliable,nonstop_tsc,aperfmperf,pni,monitor,vmx,ssse3,cx16,sse4_1,sse4_2,x2apic,popcnt,lahf_lm,ida,arat,epb,dtherm,tpr_shadow,vnmi,ept,vpid,model_Nehalem,model_Conroe,model_coreduo,model_core2duo,model_Penryn,model_n270' cpuModel = 'Intel(R) Xeon(R) CPU E7- 4870 @ 2.40GHz' cpuSockets = '4' cpuSpeed = '2394.000' cpuThreads = '4' emulatedMachines = ['pc', 'q35', 'isapc', 'pc-0.10', 'pc-0.11', 'pc-0.12', 'pc-0.13', 'pc-0.14', 'pc-0.15', 'pc-1.0', 'pc-1.1', 'pc-1.2', 'pc-1.3', 'none'] guestOverhead = '65' hooks = {} kvmEnabled = 'true' lastClient = '127.0.0.1' lastClientIface = 'lo' management_ip = '0.0.0.0' memSize = '16050' netConfigDirty = 'False' networks = {'ovirtmgmt': {'addr': '192.168.33.41', 'bridged': True, 'cfg': {'BOOTPROTO': 'none', 'DEFROUTE': 'yes', 'DELAY': '0', 'DEVICE': 'ovirtmgmt', 'GATEWAY': '192.168.33.15', 'IPADDR': '192.168.33.41', 'NETMASK': '255.255.255.0', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no', 'TYPE': 'Bridge'}, 'gateway': '192.168.33.15', 'iface': 'ovirtmgmt', 'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'], 'ipv6gateway': '::', 'mtu': '1500', 'netmask': '255.255.255.0', 'ports': ['eth0', 'vnet1'], 'qosInbound': '', 'qosOutbound': '', 'stp': 'off'}, 'vlan172': {'addr': '', 'bridged': True, 'cfg': {'DEFROUTE': 'no', 'DELAY': '0', 'DEVICE': 'vlan172', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no', 'TYPE': 'Bridge'}, 'gateway': '0.0.0.0', 'iface': 'vlan172', 'ipv6addrs': ['fe80::250:56ff:fe9f:3b86/64'], 'ipv6gateway': '::', 'mtu': '1500', 'netmask': '', 'ports': ['ens256.172', 'vnet0'], 'qosInbound': '', 'qosOutbound': '', 'stp': 'off'}} nics = {'ens224': {'addr': '192.168.230.31', 'cfg': {'BOOTPROTO': 'static', 'DEVICE': 'ens224', 'HWADDR': '00:50:56:9F:3C:B0', 'IPADDR': '192.168.230.31', 'NETMASK': '255.255.255.0', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'TYPE': 'Ethernet'}, 'hwaddr': '00:50:56:9f:3c:b0', 'ipv6addrs': ['fe80::250:56ff:fe9f:3cb0/64'], 'mtu': '1500', 'netmask': '255.255.255.0', 'speed': 10000}, 'ens256': {'addr': '', 'cfg': {'DEVICE': 'ens256', 'HWADDR': '00:50:56:9f:3b:86', 'MTU': '1500', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no'}, 'hwaddr': '00:50:56:9f:3b:86', 'ipv6addrs': ['fe80::250:56ff:fe9f:3b86/64'], 'mtu': '1500', 'netmask': '', 'speed': 10000}, 'eth0': {'addr': '', 'cfg': {'BRIDGE': 'ovirtmgmt', 'DEVICE': 'eth0', 'HWADDR': '00:50:56:9f:68:6b', 'MTU': '1500', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no'}, 'hwaddr': '00:50:56:9f:68:6b', 'ipv6addrs': ['fe80::250:56ff:fe9f:686b/64'], 'mtu': '1500', 'netmask': '', 'speed': 10000}} operatingSystem = {'name': 'Fedora', 'release': '6', 'version': '19'} packages2 = {'kernel': {'buildtime': 1391006675.0, 'release': '201.fc19.x86_64', 'version': '3.12.9'}, 'libvirt': {'buildtime': 1389924902, 'release': '1.fc19', 'version': '1.0.5.9'}, 'mom': {'buildtime': 1385055339, 'release': '6.fc19', 'version': '0.3.2'}, 'qemu-img': {'buildtime': 1387388596, 'release': '15.fc19', 'version': '1.4.2'}, 'qemu-kvm': {'buildtime': 1387388596, 'release': '15.fc19', 'version': '1.4.2'}, 'spice-server': {'buildtime': 1383130020, 'release': '3.fc19', 'version': '0.12.4'}, 'vdsm': {'buildtime': 1391430691, 'release': '3.fc19', 'version': '4.13.3'}} reservedMem = '321' software_revision = '3' software_version = '4.13' supportedENGINEs = ['3.0', '3.1', '3.2', '3.3'] supportedProtocols = ['2.2', '2.3'] uuid = '421F7170-C703-34E3-9628-4588D841F8B1' version_name = 'Snow Man' vlans = {'ens256.172': {'addr': '', 'cfg': {'BRIDGE': 'vlan172', 'DEVICE': 'ens256.172', 'NM_CONTROLLED': 'no', 'ONBOOT': 'yes', 'STP': 'no', 'VLAN': 'yes'}, 'iface': 'ens256', 'ipv6addrs': ['fe80::250:56ff:fe9f:3b86/64'], 'mtu': '1500', 'netmask': '', 'vlanid': 172}} vmTypes = ['kvm'] I have a pre-booted VM that is configured as VNC. As soon as I start another VM (CentOS 6.4) defined as spice console all the two go into paused mode In qemulog of spice VM I have 2014-02-05 08:05:45.965+0000: starting up LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin QEMU_AUDIO_DRV=spice /usr/bin/q emu-kvm -name C2prealloc -S -machine pc-1.0,accel=kvm,usb=off -cpu Nehalem -m 1024 -smp 1,sockets=1,cores=1,threads=1 -uuid 1107ce34-46e6-4989-a5cf-de601ea71cae -smbios type=1,manufacturer=oVirt,product=oVirt Node,version=19-6,serial=421F7170-C703-34E3-9628-4588D841F8B1,uuid=1107ce34-46e6-4989-a5cf-de601ea71cae -no-user-config -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/C2prealloc.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=2014-02-05T08:05:45,driftfix=slew -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x4 -drive if=none,id=drive-ide0-1-0,readonly=on,format=raw,serial= -device ide-cd,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -drive file=/rhev/data-center/mnt/glusterSD/ovnode01:gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/e8a52eea-5531-4d12-8747-061c2136b6fd/14707e58-aedf-4059-a815-605a0df4b396,if=none,id=drive-virtio-disk0,format=raw,serial=e8a52eea-5531-4d12-8747-061c2136b6fd,cache=none,werror=stop,rerror=stop,aio=threads -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -netdev tap,fd=29,id=hostnet0,vhost=on,vhostfd=30 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=00:1a:4a:bb:9f:19,bus=pci.0,addr=0x3 -chardev socket,id=charchannel0,path=/var/lib/libvirt/qemu/channels/1107ce34-46e6-4989-a5cf-de601ea71cae.com.redhat.rhevm.vdsm,server,nowait -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=com.redhat.rhevm.vdsm -chardev socket,id=charchannel1,path=/var/lib/libvirt/qemu/channels/1107ce34-46e6-4989-a5cf-de601ea71cae.org.qemu.guest_agent.0,server,nowait -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,id=channel 1,name=org.qemu.guest_agent.0 -chardev spicevmc,id=charchannel2,name=vdagent -device virtserialport,bus=virtio-serial0.0,nr=3,chardev=charchannel2,id=channel2,name=com.redhat.spice.0 -spice tls-port=5901,addr=0,x509-dir=/etc/pki/vdsm/libvirt-spice,tls-channel=main,tls-channel=display,tls-channel=inputs,tls-channel=cursor,tls-channel=playback,tls-channel=record,tls-channel=smartcard,tls-channel=usbredir,seamless-migration=on -k en-us -vga qxl -global qxl-vga.ram_size=67108864 -global qxl-vga.vram_size=67108864 -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6 KVM: unknown exit, hardware reason 3 EAX=00000037 EBX=00006e44 ECX=0000001a EDX=00000511 ESI=00000000 EDI=00006df8 EBP=00006e08 ESP=00006dd4 EIP=3ffe1464 EFL=00000017 [----APC] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] CS =0008 00000000 ffffffff 00c09b00 DPL=0 CS32 [-RA] SS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] DS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] FS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] GS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] LDT=0000 00000000 0000ffff 00008200 DPL=0 LDT TR =0000 00000000 0000ffff 00008b00 DPL=0 TSS32-busy GDT= 000fd3a8 00000037 IDT= 000fd3e6 00000000 CR0=00000011 CR2=00000000 CR3=00000000 CR4=00000000 DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 DR6=00000000ffff0ff0 DR7=0000000000000400 EFER=0000000000000000 Code=eb be 83 c4 08 5b 5e 5f 5d c3 89 c1 ba 11 05 00 00 eb 01 ec <49> 83 f9 ff 75 f9 c3 57 56 53 89 c3 8b b0 84 00 00 00 39 ce 77 1e 89 d7 0f b7 80 8c 00 00 In messages I get when I start the spice VM: Feb 5 09:05:46 ovnode01 vdsm vm.Vm WARNING vmId=`1107ce34-46e6-4989-a5cf-de601ea71cae`::_readPauseCode unsupported by libvirt vm In VNC VM qemu.log, when I started it yestaerday: 2014-02-04 23:56:48.635+0000: starting up LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin QEMU_AUDIO_DRV=none /usr/bin/qemu-kvm -name C6 -S -machine pc-1.0,accel=kvm,usb=off -cpu Nehalem -m 2048 -smp 1,sockets=1,cores=1,threads=1 -uuid 409c5dbe-5e70-40de-bf73-46ef484ea2d7 -smbios type=1,manufacturer=oVirt,product=oVirt Node,version=19-6,serial=421F7170-C703-34E3-9628-4588D841F8B1,uuid=409c5dbe-5e70-40de-bf73-46ef484ea2d7 -no-user-config -nodefaults -chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/C6.monitor,server,nowait -mon chardev=charmonitor,id=monitor,mode=control -rtc base=2014-02-04T23:56:48,driftfix=slew -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x4 -drive if=none,id=drive-ide0-1-0,readonly=on,format=raw,serial= -device ide-cd,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -drive file=/rhev/data-center/mnt/glusterSD/ovnode01:gv01/20042e7b-0929-48ca-ad40-2a2aa22f0689/images/d004045e-620b-4d90-8a7f-6c6d26393a08/dff09892-bc60-4de5-85c0-2a1fa215a161,if=none,id=drive-virtio-disk0,format=raw,serial=d004045e-620b-4d90-8a7f-6c6d26393a08,cache=none,werror=stop,rerror=stop,aio=threads -device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1 -netdev tap,fd=27,id=hostnet0,vhost=on,vhostfd=28 -device virtio-net-pci,netdev=hostnet0,id=net0,mac=00:1a:4a:bb:9f:10,bus=pci.0,addr=0x3 -chardev socket,id=charchannel0,path=/var/lib/libvirt/qemu/channels/409c5dbe-5e70-40de-bf73-46ef484ea2d7.com.redhat.rhevm.vdsm,server,nowait -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=com.redhat.rhevm.vdsm -chardev socket,id=charchannel1,path=/var/lib/libvirt/qemu/channels/409c5dbe-5e70-40de-bf73-46ef484ea2d7.org.qemu.guest_agent.0,server,nowait -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,id=channel1,name=org.qemu.guest_agent.0 -device usb-tablet,id=input0 -vnc 0:0,password -k en-us -vga cirrus -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x6 when I start the spice one KVM: unknown exit, hardware reason 3 EAX=00000011 EBX=0000ffea ECX=00000000 EDX=000fc5b9 ESI=000d7c2a EDI=00000000 EBP=00000000 ESP=00006f80 EIP=0000c489 EFL=00000006 [-----P-] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0000 00000000 ffffffff 00809300 DPL=0 DS16 [-WA] CS =f000 000f0000 ffffffff 00809b00 DPL=0 CS16 [-RA] SS =0000 00000000 ffffffff 00809300 DPL=0 DS16 [-WA] DS =0000 00000000 ffffffff 00809300 DPL=0 DS16 [-WA] FS =0000 00000000 ffffffff 00809300 DPL=0 DS16 [-WA] GS =0000 00000000 ffffffff 00809300 DPL=0 DS16 [-WA] LDT=0000 00000000 0000ffff 00008200 DPL=0 LDT TR =0000 00000000 0000ffff 00008b00 DPL=0 TSS32-busy GDT= 000fd3a8 00000037 IDT= 000fd3e6 00000000 CR0=00000011 CR2=00000000 CR3=00000000 CR4=00000000 DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 DR6=00000000ffff0ff0 DR7=0000000000000400 EFER=0000000000000000 Code=01 1e e0 d3 2e 0f 01 16 a0 d3 0f 20 c0 66 83 c8 01 0f 22 c0 <66> ea 91 c4 0f 00 08 00 b8 10 00 00 00 8e d8 8e c0 8e d0 8e e0 8e e8 89 c8 ff e2 89 c1 b8 Thanks in advance, Gianluca On Thu, Oct 3, 2013 at 2:54 PM, Itamar Heim <iheim@redhat.com> wrote:
On 10/03/2013 01:21 AM, Gianluca Cecchi wrote:
On Wed, Oct 2, 2013 at 9:16 PM, Itamar Heim wrote:
On 10/02/2013 12:57 AM, Gianluca Cecchi wrote:
Today I was able to work again on this matter and it seems related to spice Every time I start the VM (that is defined with spice) it goes in
and this doesn't happen if the VM is defined with vnc?
No, reproduced both from oVirt and through virsh. with spice defined in boot options or in xml (for virsh) the vm remains in paused state and after a few minutes it seems the node hangs... with vnc the VM goes in runnign state I'm going to put same config on 2 physical nodes with only local storage and see what happens and report...
Gianluca
adding spice-devel mailing list as the VM only hangs if started with spice and not with vnc, from virsh as well.