Ok, i fixed the ssl problem, my ovirt manager machine iptables was blocking the 8443 port.

I also reinstalled the lastest version of the node (ovirt-node-iso-2.5.1-1.0.fc17.iso), but ovirt manager still doesn't recognize the CPU.

The host status remains Non Operational :

Host localhost.localdomain moved to Non-Operational state as host does not meet the cluster's minimum CPU level. Missing CPU features : model_Nehalem

Here are the outputs of the commands vdsClient and virsh:

[root@blade4 ~]# vdsClient -s 0 getVdsCaps
HBAInventory = {'iSCSI': [{'InitiatorName': 'iqn.1994-05.com.redhat:blade4.vi.pt'}], 'FC': []}
ISCSIInitiatorName = iqn.1994-05.com.redhat:blade4.vi.pt
bondings = {'bond4': {'addr': '', 'cfg': {}, 'mtu': '1500', 'netmask': '', 'slaves': [], 'hwaddr': '00:00:00:00:00:00'}, 'bond0': {'addr': '', 'cfg': {}, 'mtu': '1500', 'netmask': '', 'slaves': [], 'hwaddr': '00:00:00:00:00:00'}, 'bond1': {'addr': '', 'cfg': {}, 'mtu': '1500', 'netmask': '', 'slaves': [], 'hwaddr': '00:00:00:00:00:00'}, 'bond2': {'addr': '', 'cfg': {}, 'mtu': '1500', 'netmask': '', 'slaves': [], 'hwaddr': '00:00:00:00:00:00'}, 'bond3': {'addr': '', 'cfg': {}, 'mtu': '1500', 'netmask': '', 'slaves': [], 'hwaddr': '00:00:00:00:00:00'}}
clusterLevels = ['3.0', '3.1']
cpuCores = 4
cpuFlags = fpu,vme,de,pse,tsc,msr,pae,mce,cx8,apic,sep,mtrr,pge,mca,cmov,pat,pse36,clflush,dts,acpi,mmx,fxsr,sse,sse2,ss,ht,tm,pbe,syscall,nx,rdtscp,lm,constant_tsc,arch_perfmon,pebs,bts,rep_good,nopl,xtopology,nonstop_tsc,aperfmperf,pni,dtes64,monitor,ds_cpl,vmx,est,tm2,ssse3,cx16,xtpr,pdcm,dca,sse4_1,sse4_2,popcnt,lahf_lm,ida,dtherm,tpr_shadow,vnmi,flexpriority,ept,vpid,model_coreduo,model_Conroe
cpuModel = Intel(R) Xeon(R) CPU           E5530  @ 2.40GHz
cpuSockets = 1
cpuSpeed = 1600.000
emulatedMachines = ['pc-0.15', 'pc-1.0', 'pc', 'pc-0.14', 'pc-0.13', 'pc-0.12', 'pc-0.11', 'pc-0.10', 'isapc', 'pc-0.15', 'pc-1.0', 'pc', 'pc-0.14', 'pc-0.13', 'pc-0.12', 'pc-0.11', 'pc-0.10', 'isapc']
guestOverhead = 65
hooks = {}
kvmEnabled = true
lastClient = 192.168.10.40
lastClientIface = ovirtmgmt
management_ip =
memSize = 17926
netConfigDirty = False
networks = {'ovirtmgmt': {'addr': '192.168.10.24', 'cfg': {'IPV6FORWARDING': 'no', 'IPV6INIT': 'no', 'IPADDR': '192.168.10.24', 'ONBOOT': 'yes', 'IPV6_AUTOCONF': 'no', 'DELAY': '0', 'NM_CONTROLLED': 'no', 'NETMASK': '255.255.255.0', 'BOOTPROTO': 'static', 'DEVICE': 'ovirtmgmt', 'PEERNTP': 'yes', 'TYPE': 'Bridge', 'GATEWAY': '192.168.10.254'}, 'mtu': '1500', 'netmask': '255.255.255.0', 'stp': 'off', 'bridged': True, 'gateway': '192.168.10.254', 'ports': ['em1.10']}}
nics = {'p1p1': {'hwaddr': 'd8:d3:85:67:e3:b8', 'netmask': '', 'speed': 0, 'addr': '', 'mtu': '1500'}, 'em1': {'hwaddr': 'd8:d3:85:bf:e9:b0', 'netmask': '', 'speed': 1000, 'addr': '', 'mtu': '1500'}, 'rename3': {'hwaddr': 'd8:d3:85:67:e3:ba', 'netmask': '', 'speed': 0, 'addr': '', 'mtu': '1500'}, 'em2': {'hwaddr': 'd8:d3:85:bf:e9:b4', 'netmask': '', 'speed': 0, 'addr': '', 'mtu': '1500'}}
operatingSystem = {'release': '1', 'version': '17', 'name': 'oVirt Node'}
packages2 = {'kernel': {'release': '2.fc17.x86_64', 'buildtime': 1343659739.0, 'version': '3.5.0'}, 'spice-server': {'release': '5.fc17', 'buildtime': '1336983054', 'version': '0.10.1'}, 'vdsm': {'release': '6.fc17', 'buildtime': '1343817997', 'version': '4.10.0'}, 'qemu-kvm': {'release': '18.fc17', 'buildtime': '1342650221', 'version': '1.0'}, 'libvirt': {'release': '3.fc17', 'buildtime': '1340891887', 'version': '0.9.11.4'}, 'qemu-img': {'release': '18.fc17', 'buildtime': '1342650221', 'version': '1.0'}}
reservedMem = 321
software_revision = 6
software_version = 4.10
supportedProtocols = ['2.2', '2.3']
supportedRHEVMs = ['3.0', '3.1']
uuid = 37373035-3038-5A43-4A30-30333035455A_d8:d3:85:67:e3:b8
version_name = Snow Man
vlans = {'em1.10': {'netmask': '', 'iface': 'em1', 'addr': '', 'mtu': '1500'}}
vmTypes = ['kvm']


[root@blade4 ~]# virsh capabilities
<capabilities>

  <host>
    <uuid>35303737-3830-435a-4a30-30333035455a</uuid>
    <cpu>
      <arch>x86_64</arch>
      <model>Nehalem</model>
      <vendor>Intel</vendor>
      <topology sockets='1' cores='4' threads='2'/>
      <feature name='rdtscp'/>
      <feature name='dca'/>
      <feature name='pdcm'/>
      <feature name='xtpr'/>
      <feature name='tm2'/>
      <feature name='est'/>
      <feature name='vmx'/>
      <feature name='ds_cpl'/>
      <feature name='monitor'/>
      <feature name='dtes64'/>
      <feature name='pbe'/>
      <feature name='tm'/>
      <feature name='ht'/>
      <feature name='ss'/>
      <feature name='acpi'/>
      <feature name='ds'/>
      <feature name='vme'/>
    </cpu>
    <power_management/>
    <migration_features>
      <live/>
      <uri_transports>
        <uri_transport>tcp</uri_transport>
      </uri_transports>
    </migration_features>
    <topology>
      <cells num='1'>
        <cell id='0'>
          <cpus num='8'>
            <cpu id='0'/>
            <cpu id='1'/>
            <cpu id='2'/>
            <cpu id='3'/>
            <cpu id='4'/>
            <cpu id='5'/>
            <cpu id='6'/>
            <cpu id='7'/>
          </cpus>
        </cell>
      </cells>
    </topology>
    <secmodel>
      <model>selinux</model>
      <doi>0</doi>
    </secmodel>
  </host>

  <guest>
    <os_type>hvm</os_type>
    <arch name='i686'>
      <wordsize>32</wordsize>
      <emulator>/usr/bin/qemu-system-x86_64</emulator>
      <machine>pc-0.15</machine>
      <machine>pc-1.0</machine>
      <machine canonical='pc-1.0'>pc</machine>
      <machine>pc-0.14</machine>
      <machine>pc-0.13</machine>
      <machine>pc-0.12</machine>
      <machine>pc-0.11</machine>
      <machine>pc-0.10</machine>
      <machine>isapc</machine>
      <domain type='qemu'>
      </domain>
      <domain type='kvm'>
        <emulator>/usr/bin/qemu-kvm</emulator>
        <machine>pc-0.15</machine>
        <machine>pc-1.0</machine>
        <machine canonical='pc-1.0'>pc</machine>
        <machine>pc-0.14</machine>
        <machine>pc-0.13</machine>
        <machine>pc-0.12</machine>
        <machine>pc-0.11</machine>
        <machine>pc-0.10</machine>
        <machine>isapc</machine>
      </domain>
    </arch>
    <features>
      <cpuselection/>
      <deviceboot/>
      <pae/>
      <nonpae/>
      <acpi default='on' toggle='yes'/>
      <apic default='on' toggle='no'/>
    </features>
  </guest>

  <guest>
    <os_type>hvm</os_type>
    <arch name='x86_64'>
      <wordsize>64</wordsize>
      <emulator>/usr/bin/qemu-system-x86_64</emulator>
      <machine>pc-0.15</machine>
      <machine>pc-1.0</machine>
      <machine canonical='pc-1.0'>pc</machine>
      <machine>pc-0.14</machine>
      <machine>pc-0.13</machine>
      <machine>pc-0.12</machine>
      <machine>pc-0.11</machine>
      <machine>pc-0.10</machine>
      <machine>isapc</machine>
      <domain type='qemu'>
      </domain>
      <domain type='kvm'>
        <emulator>/usr/bin/qemu-kvm</emulator>
        <machine>pc-0.15</machine>
        <machine>pc-1.0</machine>
        <machine canonical='pc-1.0'>pc</machine>
        <machine>pc-0.14</machine>
        <machine>pc-0.13</machine>
        <machine>pc-0.12</machine>
        <machine>pc-0.11</machine>
        <machine>pc-0.10</machine>
        <machine>isapc</machine>
      </domain>
    </arch>
    <features>
      <cpuselection/>
      <deviceboot/>
      <acpi default='on' toggle='yes'/>
      <apic default='on' toggle='no'/>
    </features>
  </guest>

</capabilities>
-
----Original Message-----
From: Dan Kenigsberg <danken@redhat.com>
To: Justin Clift <jclift@redhat.com>
Cc: Ricardo Esteves <maverick.pt@gmail.com>, users@ovirt.org
Subject: Re: [Users] oVIrt 3.1 - Xeon E5530 - Wrong cpu identification
Date: Sun, 5 Aug 2012 11:18:25 +0300

On Fri, Aug 03, 2012 at 06:47:44AM +1000, Justin Clift wrote:
> On 02/08/2012, at 2:29 AM, Ricardo Esteves wrote:
> > And now, after reboot of the node, i get this:
> > 
> > [root@blade4 ~]# virsh capabilities
> > Segmentation fault
> 
> When that seg fault happens, does anything get printed to
> /var/log/messages?
> 
> Kind of wondering if there's something else at play here,
> which might show up there.  Worth a look at. :)
> 
> Regards and best wishes,
> 
> Justin Clift

Please note that vdsm hacks libvirt to use sasl authentication, which
may be related to this crash.

Does anything look better with `virsh -r`?