In the 'hosted-engine' script itself, in the function
cmd_vm_start,
there is a comment:
# TODO: Check first the sanlock status, and if allows:
Perhaps ha-agent checks sanlock status before starting the VM?
Adding Martin.
QEMU does that by itself. It starts, asks for a lease and dies if it
can't get it.
So:
>> I see that the qemu-kvm process for the engine starts on two
hosts and
>> then on one of them it gets a "kill -15" and stops
>> Is it expected behaviour?
This is how it should behave, unless the reason for it is something else.
Martin
On Mon, Jul 10, 2017 at 8:11 AM, Yedidyah Bar David <didi(a)redhat.com> wrote:
> On Sun, Jul 9, 2017 at 11:12 PM, Gianluca Cecchi
> <gianluca.cecchi(a)gmail.com> wrote:
>>
>>
>> On Sun, Jul 9, 2017 at 9:54 PM, Gianluca Cecchi
<gianluca.cecchi(a)gmail.com>
>> wrote:
>>>
>>> Hello.
>>> I'm on 4.1.3 with self hosted engine and glusterfs as storage.
>>> I updated the kernel on engine so I executed these steps:
>>>
>>> - enable global maintenace from the web admin gui
>>> - wait some minutes
>>> - shutdown the engine vm from inside its OS
>>> - wait some minutes
>>> - execute on one host
>>> [root@ovirt02 ~]# hosted-engine --set-maintenance --mode=none
>>>
>> I see that the qemu-kvm process for the engine starts on two
hosts and
>> then on one of them it gets a "kill -15" and stops
>> Is it expected behaviour?
>
In the 'hosted-engine' script itself, in the function
cmd_vm_start,
there is a comment:
# TODO: Check first the sanlock status, and if allows:
Perhaps ha-agent checks sanlock status before starting the VM?
Adding Martin.
>
> Please also check/share agent.log.
>
>>> It seems somehow dangerous to me..
>>
>>
>> And I don't know how related, but the engine vm doesn't come up.
>> Connecting to its vnc console I get it "booting from hard disk" ....:
>>
https://drive.google.com/file/d/0BwoPbcrMv8mvOEJWeVRvNThmTWc/view?usp=sha...
>>
>> Gluster volume for the engine vm storage domain seems ok...
>>
>> [root@ovirt01 vdsm]# gluster volume heal engine info
>> Brick ovirt01.localdomain.local:/gluster/brick1/engine
>> Status: Connected
>> Number of entries: 0
>>
>> Brick ovirt02.localdomain.local:/gluster/brick1/engine
>> Status: Connected
>> Number of entries: 0
>>
>> Brick ovirt03.localdomain.local:/gluster/brick1/engine
>> Status: Connected
>> Number of entries: 0
>>
>> [root@ovirt01 vdsm]#
>>
>>
>> and in HostedEngine.log
>>
>> 2017-07-09 19:59:20.660+0000: starting up libvirt version: 2.0.0, package:
>> 10.el7_3.9 (CentOS BuildSystem <
http://bugs.centos.org>,
>> 2017-05-25-20:52:28,
c1bm.rdu2.centos.org), qemu version: 2.6.0
>> (qemu-kvm-ev-2.6.0-28.el7.10.1), hostname: ovirt01.localdomain.local
>> LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin
>> QEMU_AUDIO_DRV=none /usr/libexec/qemu-kvm -name
>> guest=HostedEngine,debug-threads=on -S -object
>>
secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-3-HostedEngine/master-key.aes
>> -machine pc-i440fx-rhel7.3.0,accel=kvm,usb=off -cpu Broadwell,+rtm,+hle -m
>> 6144 -realtime mlock=off -smp 1,maxcpus=16,sockets=16,cores=1,threads=1
>> -uuid 87fd6bdb-535d-45b8-81d4-7e3101a6c364 -smbios
>> 'type=1,manufacturer=oVirt,product=oVirt
>>
Node,version=7-3.1611.el7.centos,serial=564D777E-B638-E808-9044-680BA4957704,uuid=87fd6bdb-535d-45b8-81d4-7e3101a6c364'
>> -no-user-config -nodefaults -chardev
>>
socket,id=charmonitor,path=/var/lib/libvirt/qemu/domain-3-HostedEngine/monitor.sock,server,nowait
>> -mon chardev=charmonitor,id=monitor,mode=control -rtc
>> base=2017-07-09T19:59:20,driftfix=slew -global
>> kvm-pit.lost_tick_policy=discard -no-hpet -no-reboot -boot strict=on -device
>> piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device
>> virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x4 -drive
>>
file=/var/run/vdsm/storage/e9e4a478-f391-42e5-9bb8-ed22a33e5cab/cf8b8f4e-fa01-457e-8a96-c5a27f8408f8/94c46bac-0a9f-49e8-9188-627fa0caf2b6,format=raw,if=none,id=drive-virtio-disk0,serial=cf8b8f4e-fa01-457e-8a96-c5a27f8408f8,cache=none,werror=stop,rerror=stop,aio=threads
>> -device
>>
virtio-blk-pci,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1
>> -drive if=none,id=drive-ide0-1-0,readonly=on -device
>> ide-cd,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -netdev
>> tap,fd=30,id=hostnet0,vhost=on,vhostfd=32 -device
>> virtio-net-pci,netdev=hostnet0,id=net0,mac=00:16:3e:0a:e7:ba,bus=pci.0,addr=0x3
>> -chardev
>>
socket,id=charchannel0,path=/var/lib/libvirt/qemu/channels/87fd6bdb-535d-45b8-81d4-7e3101a6c364.com.redhat.rhevm.vdsm,server,nowait
>> -device
>>
virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=com.redhat.rhevm.vdsm
>> -chardev
>>
socket,id=charchannel1,path=/var/lib/libvirt/qemu/channels/87fd6bdb-535d-45b8-81d4-7e3101a6c364.org.qemu.guest_agent.0,server,nowait
>> -device
>>
virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,id=channel1,name=org.qemu.guest_agent.0
>> -chardev
>>
socket,id=charchannel2,path=/var/lib/libvirt/qemu/channels/87fd6bdb-535d-45b8-81d4-7e3101a6c364.org.ovirt.hosted-engine-setup.0,server,nowait
>> -device
>>
virtserialport,bus=virtio-serial0.0,nr=3,chardev=charchannel2,id=channel2,name=org.ovirt.hosted-engine-setup.0
>> -chardev pty,id=charconsole0 -device
>> virtconsole,chardev=charconsole0,id=console0 -vnc 0:0,password -device
>> cirrus-vga,id=video0,bus=pci.0,addr=0x2 -object
>> rng-random,id=objrng0,filename=/dev/urandom -device
>> virtio-rng-pci,rng=objrng0,id=rng0,bus=pci.0,addr=0x6 -msg timestamp=on
>> char device redirected to /dev/pts/1 (label charconsole0)
>> warning: host doesn't support requested feature: CPUID.07H:EBX.erms [bit 9]
>>
>>
>> _______________________________________________
>> Users mailing list
>> Users(a)ovirt.org
>>
http://lists.ovirt.org/mailman/listinfo/users
>>
>
>
>
> --
> Didi