Hi all,

Nested KVM on the hyervisor host is there:
[xxx@bigvirt ~]$ cat /sys/module/kvm_amd/parameters/nested
1


[root@bigvirt ~]# virsh -r dumpxml RHEV
<domain type='kvm' id='2'>
  <name>RHEV</name>
  <uuid>98a86cc8-efbe-43b5-a8e2-4c636b6556a8</uuid>
  <metadata xmlns:ovirt="http://ovirt.org/vm/tune/1.0">
    <ovirt:qos/>
  </metadata>
  <maxMemory slots='16' unit='KiB'>4294967296</maxMemory>
  <memory unit='KiB'>2097152</memory>
  <currentMemory unit='KiB'>2097152</currentMemory>
  <vcpu placement='static' current='2'>16</vcpu>
  <cputune>
    <shares>1020</shares>
  </cputune>
  <numatune>
    <memory mode='interleave' nodeset='0'/>
  </numatune>
  <resource>
    <partition>/machine</partition>
  </resource>
  <sysinfo type='smbios'>
    <system>
      <entry name='manufacturer'>oVirt</entry>
      <entry name='product'>oVirt Node</entry>
      <entry name='version'>7-2.1511.el7.centos.2.10</entry>
      <entry name='serial'>60E59E92-3256-BD11-B8BD-14DAE9ED31D2</entry>
      <entry name='uuid'>98a86cc8-efbe-43b5-a8e2-4c636b6556a8</entry>
    </system>
  </sysinfo>
  <os>
    <type arch='x86_64' machine='pc-i440fx-rhel7.2.0'>hvm</type>
    <smbios mode='sysinfo'/>
  </os>
  <features>
    <acpi/>
  </features>
  <cpu mode='custom' match='exact'>
    <model fallback='allow'>Opteron_G3</model>
    <topology sockets='16' cores='1' threads='1'/>
    <feature policy='require' name='svm'/>
    <numa>
      <cell id='0' cpus='0-1' memory='2097152' unit='KiB'/>
    </numa>
  </cpu>
  <clock offset='variable' adjustment='0' basis='utc'>
    <timer name='rtc' tickpolicy='catchup'/>
    <timer name='pit' tickpolicy='delay'/>
    <timer name='hpet' present='no'/>
  </clock>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='cdrom'>
      <driver name='qemu' type='raw'/>
      <source startupPolicy='optional'/>
      <backingStore/>
      <target dev='hdc' bus='ide'/>
      <readonly/>
      <serial></serial>
      <alias name='ide0-1-0'/>
      <address type='drive' controller='0' bus='1' target='0' unit='0'/>
    </disk>
    <disk type='file' device='disk' snapshot='no'>
      <driver name='qemu' type='raw' cache='none' error_policy='stop' io='threads'/>
      <source file='/rhev/data-center/21ecac04-6bc8-4e20-88da-48bcb5b9a5a4/cb05b616-71bc-457f-9401-d18748f68678/images/bd8f017e-a33e-49c7-a487-34b1d110b678/8e72df98-7dba-47e7-baec-2b034d208496'/>
      <backingStore/>
      <target dev='vda' bus='virtio'/>
      <serial>bd8f017e-a33e-49c7-a487-34b1d110b678</serial>
      <boot order='1'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
    </disk>
    <controller type='scsi' index='0' model='virtio-scsi'>
      <alias name='scsi0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </controller>
    <controller type='virtio-serial' index='0' ports='16'>
      <alias name='virtio-serial0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
    </controller>
    <controller type='usb' index='0'>
      <alias name='usb'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
    </controller>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <controller type='ide' index='0'>
      <alias name='ide'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
    </controller>
    <interface type='bridge'>
      <mac address='00:1a:4a:16:01:57'/>
      <source bridge='ovirtmgmt'/>
      <target dev='vnet0'/>
      <model type='virtio'/>
      <filterref filter='vdsm-no-mac-spoofing'/>
      <link state='up'/>
      <alias name='net0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
    </interface>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/channels/98a86cc8-efbe-43b5-a8e2-4c636b6556a8.com.redhat.rhevm.vdsm'/>
      <target type='virtio' name='com.redhat.rhevm.vdsm' state='disconnected'/>
      <alias name='channel0'/>
      <address type='virtio-serial' controller='0' bus='0' port='1'/>
    </channel>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/channels/98a86cc8-efbe-43b5-a8e2-4c636b6556a8.org.qemu.guest_agent.0'/>
      <target type='virtio' name='org.qemu.guest_agent.0' state='disconnected'/>
      <alias name='channel1'/>
      <address type='virtio-serial' controller='0' bus='0' port='2'/>
    </channel>
    <channel type='spicevmc'>
      <target type='virtio' name='com.redhat.spice.0' state='disconnected'/>
      <alias name='channel2'/>
      <address type='virtio-serial' controller='0' bus='0' port='3'/>
    </channel>
    <input type='mouse' bus='ps2'/>
    <input type='keyboard' bus='ps2'/>
    <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='192.168.1.7' passwdValidTo='1970-01-01T00:00:01'>
      <listen type='network' address='192.168.1.7' network='vdsm-ovirtmgmt'/>
    </graphics>
    <video>
      <model type='qxl' ram='65536' vram='32768' vgamem='16384' heads='1'/>
      <alias name='video0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <memballoon model='none'>
      <alias name='balloon0'/>
    </memballoon>
  </devices>
  <seclabel type='dynamic' model='selinux' relabel='yes'>
    <label>system_u:system_r:svirt_t:s0:c147,c512</label>
    <imagelabel>system_u:object_r:svirt_image_t:s0:c147,c512</imagelabel>
  </seclabel>
</domain>

Removed the machine, I was not shure if I created the machine after adding "nested=1" to kvm_amd

[root@bigvirt ~]# virsh -r dumpxml RHEV
<domain type='kvm' id='5'>
  <name>RHEV</name>
  <uuid>a583ec5a-50ba-447a-bc4f-f88b2704847f</uuid>
  <metadata xmlns:ovirt="http://ovirt.org/vm/tune/1.0">
    <ovirt:qos/>
  </metadata>
  <maxMemory slots='16' unit='KiB'>4294967296</maxMemory>
  <memory unit='KiB'>2097152</memory>
  <currentMemory unit='KiB'>2097152</currentMemory>
  <vcpu placement='static' current='2'>16</vcpu>
  <cputune>
    <shares>1020</shares>
  </cputune>
  <numatune>
    <memory mode='interleave' nodeset='0'/>
  </numatune>
  <resource>
    <partition>/machine</partition>
  </resource>
  <sysinfo type='smbios'>
    <system>
      <entry name='manufacturer'>oVirt</entry>
      <entry name='product'>oVirt Node</entry>
      <entry name='version'>7-2.1511.el7.centos.2.10</entry>
      <entry name='serial'>60E59E92-3256-BD11-B8BD-14DAE9ED31D2</entry>
      <entry name='uuid'>a583ec5a-50ba-447a-bc4f-f88b2704847f</entry>
    </system>
  </sysinfo>
  <os>
    <type arch='x86_64' machine='pc-i440fx-rhel7.2.0'>hvm</type>
    <smbios mode='sysinfo'/>
  </os>
  <features>
    <acpi/>
  </features>
  <cpu mode='custom' match='exact'>
    <model fallback='allow'>Opteron_G3</model>
    <topology sockets='16' cores='1' threads='1'/>
    <feature policy='require' name='svm'/>
    <numa>
      <cell id='0' cpus='0-1' memory='2097152' unit='KiB'/>
    </numa>
  </cpu>
  <clock offset='variable' adjustment='0' basis='utc'>
    <timer name='rtc' tickpolicy='catchup'/>
    <timer name='pit' tickpolicy='delay'/>
    <timer name='hpet' present='no'/>
  </clock>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='cdrom'>
      <driver name='qemu' type='raw'/>
      <source file='/rhev/data-center/mnt/10.0.0.3:_nfs_iso/8cdabe73-5676-4820-b876-7af86854ea39/images/11111111-1111-1111-1111-111111111111/rhev-hypervisor6-6.7-20151123.0.iso' startupPolicy='optional'>
        <seclabel model='selinux' labelskip='yes'/>
      </source>
      <backingStore/>
      <target dev='hdc' bus='ide'/>
      <readonly/>
      <serial></serial>
      <boot order='1'/>
      <alias name='ide0-1-0'/>
      <address type='drive' controller='0' bus='1' target='0' unit='0'/>
    </disk>
    <disk type='file' device='disk' snapshot='no'>
      <driver name='qemu' type='raw' cache='none' error_policy='stop' io='threads'/>
      <source file='/rhev/data-center/21ecac04-6bc8-4e20-88da-48bcb5b9a5a4/cb05b616-71bc-457f-9401-d18748f68678/images/bd8f017e-a33e-49c7-a487-34b1d110b678/8e72df98-7dba-47e7-baec-2b034d208496'/>
      <backingStore/>
      <target dev='vda' bus='virtio'/>
      <serial>bd8f017e-a33e-49c7-a487-34b1d110b678</serial>
      <boot order='2'/>
      <alias name='virtio-disk0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
    </disk>
    <controller type='scsi' index='0' model='virtio-scsi'>
      <alias name='scsi0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
    </controller>
    <controller type='virtio-serial' index='0' ports='16'>
      <alias name='virtio-serial0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
    </controller>
    <controller type='usb' index='0'>
      <alias name='usb'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
    </controller>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <controller type='ide' index='0'>
      <alias name='ide'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
    </controller>
    <interface type='bridge'>
      <mac address='00:1a:4a:16:01:57'/>
      <source bridge='ovirtmgmt'/>
      <target dev='vnet0'/>
      <model type='virtio'/>
      <filterref filter='vdsm-no-mac-spoofing'/>
      <link state='up'/>
      <boot order='3'/>
      <alias name='net0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
    </interface>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/channels/a583ec5a-50ba-447a-bc4f-f88b2704847f.com.redhat.rhevm.vdsm'/>
      <target type='virtio' name='com.redhat.rhevm.vdsm' state='disconnected'/>
      <alias name='channel0'/>
      <address type='virtio-serial' controller='0' bus='0' port='1'/>
    </channel>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/channels/a583ec5a-50ba-447a-bc4f-f88b2704847f.org.qemu.guest_agent.0'/>
      <target type='virtio' name='org.qemu.guest_agent.0' state='disconnected'/>
      <alias name='channel1'/>
      <address type='virtio-serial' controller='0' bus='0' port='2'/>
    </channel>
    <channel type='spicevmc'>
      <target type='virtio' name='com.redhat.spice.0' state='disconnected'/>
      <alias name='channel2'/>
      <address type='virtio-serial' controller='0' bus='0' port='3'/>
    </channel>
    <input type='mouse' bus='ps2'/>
    <input type='keyboard' bus='ps2'/>
    <graphics type='spice' port='5900' tlsPort='5901' autoport='yes' listen='192.168.1.7' passwdValidTo='2016-01-05T16:58:47' connected='disconnect'>
      <listen type='network' address='192.168.1.7' network='vdsm-ovirtmgmt'/>
    </graphics>
    <video>
      <model type='qxl' ram='65536' vram='32768' vgamem='16384' heads='1'/>
      <alias name='video0'/>
      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
    </video>
    <memballoon model='none'>
      <alias name='balloon0'/>
    </memballoon>
  </devices>
  <seclabel type='dynamic' model='selinux' relabel='yes'>
    <label>system_u:system_r:svirt_t:s0:c19,c668</label>
    <imagelabel>system_u:object_r:svirt_image_t:s0:c19,c668</imagelabel>
  </seclabel>
</domain>

Still the same, on the Hypervisor prompt "modprobe -v kvm_amd" will tell "operation not supported".

But mmmm, the xml file tells "
<feature policy='require' name='svm'/>

grep svm /proc/cpuinfo on the hypervisor shows it is available. On the virtual machine (using nested kvm) it is not available despite kvm_amd is loaded with "nested=1"
Mmm, does qemu not pickup the nested stuff?

Thankz allready!

Winny

Op 04-01-16 om 14:56 schreef Martin Polednik:
----- Original Message -----
From: "Martin Polednik" <mpolednik@redhat.com>
To: wdh@dds.nl
Cc: "users" <users@ovirt.org>, "Martin Polednik" <mpolednik@redhat.com>
Sent: Monday, January 4, 2016 2:52:03 PM
Subject: Re: [ovirt-users] Nested KVM - vdsm-hook-nestedvt

On 04/01/16 14:38 +0100, wdh@dds.nl wrote:
Hi all,

OK, it seems I am confused. Nested KVM is NOT the same as fake KVM....

If so, I installed vdsm-hook-nestedvt on the hypervisor (only). Trying
to run ovirt-Node on a VM it complains there 's no virtualization,
despite the kvm_amd module is loaded with nested=1

How to enable nested-kvm for oVirt 3.6?
First, make sure that you're enabling the nested feature for amd (just
noticed that the example used intel). Please show us the output of

# cat /sys/module/kvm_intel/parameters/nested
and sorry, did the same mistake again - use

# cat /sys/module/kvm_amd/parameters/nested

on the host (physical machine), lscpu inside the VM and VM XML - use

# virsh -r list
# virsh -r dumpxml {machine_id found in the list}

Thanks,
mpolednik

Winny


Martin Polednik schreef op 04-01-2016 13:29:
On 04/01/16 09:52 +0100, Simone Tiraboschi wrote:
On Fri, Jan 1, 2016 at 3:32 PM, Winfried de Heiden <wdh@dds.nl> wrote:

Hi all and happy new year!

I'm running oVirt 3.6 on EL7.

In order to test nested-kvm I installed vdsm-hook-nestedvt on the
hypervisor and put "fake_kvm_support = true" in /etc/vdsm/vdsm.conf.



fake_kvm_support is just for developing purposes and it's not
needed as it
prevent guests to run.
Not really true - fake_kvm_support enables QEMU emulation instead of
hardware virtualization (e.g. VT-x) instructions. It is also quite
broken in it's current state and fix is pending in cpuinfo branch at
gerrit.

I don't really understand the reason why fake_kvm would be used in
this case as AMD-v is enabled according to svm flag present -
following instructions should be correct for getting the flag passed
to the VMs.

Please check to understand if nested support is enabled on your host:
/sys/module/kvm_intel/parameters/nested

if not
 echo "options kvm-intel nested=1" > /etc/modprobe.d/kvm-intel.conf
and reload the module

            
Than install vdsm-hook-nestedvt on your hosts.
If you want also to use the network on your L2 guest please
install and
configure also macspoof vdsm hook on your physical hosts otherwise
all the
traffic to/from your L2 guests will be dropped.
https://github.com/oVirt/vdsm/tree/master/vdsm_hooks/macspoof




However, afterwards the hypervisor host cannot connect to the ovirt
Manager, on ovirt Manager the engine.log will tell:

2016-01-01 15:21:42,105 WARN
[org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector]
(DefaultQuartzScheduler_Worker-76) [71d1638c] Correlation ID:
29daf92c,
Call Stack: null, Custom Event ID: -1, Message: Host bigvirt moved to
Non-Operational state as host does not meet the cluster's minimum CPU
level. Missing CPU features : model_Opteron_G3

After removing the option all works well again, but I'ḿ lacking
the nested
kvm option :( Is nested kvm only supported on a limited cpu's??

Winny

Information from /proc/cpuinfo:

processor       : 0
vendor_id       : AuthenticAMD
cpu family      : 16
model           : 10
model name      : AMD Phenom(tm) II X6 1055T Processor
stepping        : 0
microcode       : 0x10000bf
cpu MHz         : 2800.000
cache size      : 512 KB
physical id     : 0
siblings        : 6
core id         : 0
cpu cores       : 6
apicid          : 0
initial apicid  : 0
fpu             : yes
fpu_exception   : yes
cpuid level     : 6
wp              : yes
flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
mtrr pge mca
cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext
fxsr_opt
pdpe1gb rdtscp lm 3dnowext 3dnow constant_tsc rep_good nopl
nonstop_tsc
extd_apicid aperfmperf pni monitor cx16 popcnt lahf_lm cmp_legacy svm
extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs
skinit wdt
cpb hw_pstate npt lbrv svm_lock nrip_save pausefilter
bogomips        : 5625.54
TLB size        : 1024 4K pages
clflush size    : 64
cache_alignment : 64
address sizes   : 48 bits physical, 48 bits virtual
power management: ts ttp tm stc 100mhzsteps hwpstate cpb

_______________________________________________
Users mailing list
Users@ovirt.org
http://lists.ovirt.org/mailman/listinfo/users


            
_______________________________________________
Users mailing list
Users@ovirt.org
http://lists.ovirt.org/mailman/listinfo/users
_______________________________________________
Users mailing list
Users@ovirt.org
http://lists.ovirt.org/mailman/listinfo/users