Hyperconvergend Setup stuck
by Stefan Wolf
Hello
I like to setup hyperconvergend
I ve 3 hosts, everyone is fresh installed
Kvm320 has one additional harddrive with 1TB SATA
And kvm360 and kvm380 with two additional harddrives with 300gb and 600gb
SAS
#gdeploy configuration generated by cockpit-gluster plugin
[hosts]
kvm380.durchhalten.intern
kvm360.durchhalten.intern
kvm320.durchhalten.intern
[script1:kvm380.durchhalten.intern]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern
[script1:kvm360.durchhalten.intern]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern
[script1:kvm320.durchhalten.intern]
action=execute
ignore_script_errors=no
file=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern
[disktype]
raid6
[diskcount]
12
[stripesize]
256
[service1]
action=enable
service=chronyd
[service2]
action=restart
service=chronyd
[shell2]
action=execute
command=vdsm-tool configure --force
[script3]
action=execute
file=/usr/share/gdeploy/scripts/blacklist_all_disks.sh
ignore_script_errors=no
[pv1:kvm380.durchhalten.intern]
action=create
devices=sdb
ignore_pv_errors=no
[pv2:kvm380.durchhalten.intern]
action=create
devices=sdc
ignore_pv_errors=no
[pv1:kvm360.durchhalten.intern]
action=create
devices=sdb
ignore_pv_errors=no
[pv2:kvm360.durchhalten.intern]
action=create
devices=sdc
ignore_pv_errors=no
[pv1:kvm320.durchhalten.intern]
action=create
devices=sdb
ignore_pv_errors=no
[vg1:kvm380.durchhalten.intern]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[vg2:kvm380.durchhalten.intern]
action=create
vgname=gluster_vg_sdc
pvname=sdc
ignore_vg_errors=no
[vg1:kvm360.durchhalten.intern]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[vg2:kvm360.durchhalten.intern]
action=create
vgname=gluster_vg_sdc
pvname=sdc
ignore_vg_errors=no
[vg1:kvm320.durchhalten.intern]
action=create
vgname=gluster_vg_sdb
pvname=sdb
ignore_vg_errors=no
[lv1:kvm380.durchhalten.intern]
action=create
poolname=gluster_thinpool_sdc
ignore_lv_errors=no
vgname=gluster_vg_sdc
lvtype=thinpool
size=1005GB
poolmetadatasize=5GB
[lv2:kvm360.durchhalten.intern]
action=create
poolname=gluster_thinpool_sdc
ignore_lv_errors=no
vgname=gluster_vg_sdc
lvtype=thinpool
size=1005GB
poolmetadatasize=5GB
[lv3:kvm320.durchhalten.intern]
action=create
poolname=gluster_thinpool_sdb
ignore_lv_errors=no
vgname=gluster_vg_sdb
lvtype=thinpool
size=41GB
poolmetadatasize=1GB
[lv4:kvm380.durchhalten.intern]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[lv5:kvm380.durchhalten.intern]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdc
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdc
virtualsize=500GB
[lv6:kvm380.durchhalten.intern]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdc
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdc
virtualsize=500GB
[lv7:kvm360.durchhalten.intern]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=100GB
lvtype=thick
[lv8:kvm360.durchhalten.intern]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdc
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdc
virtualsize=500GB
[lv9:kvm360.durchhalten.intern]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdc
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdc
virtualsize=500GB
[lv10:kvm320.durchhalten.intern]
action=create
lvname=gluster_lv_engine
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/engine
size=20GB
lvtype=thick
[lv11:kvm320.durchhalten.intern]
action=create
lvname=gluster_lv_data
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/data
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=20GB
[lv12:kvm320.durchhalten.intern]
action=create
lvname=gluster_lv_vmstore
ignore_lv_errors=no
vgname=gluster_vg_sdb
mount=/gluster_bricks/vmstore
lvtype=thinlv
poolname=gluster_thinpool_sdb
virtualsize=20GB
[selinux]
yes
[service3]
action=restart
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,5
4322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/gdeploy/scripts/disable-gluster-hooks.sh
[shell3]
action=execute
command=usermod -a -G gluster qemu
[volume1]
action=create
volname=engine
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performan
ce.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=kvm380.durchhalten.intern:/gluster_bricks/engine/engine,kvm360.du
rchhalten.intern:/gluster_bricks/engine/engine,kvm320.durchhalten.intern:/gl
uster_bricks/engine/engine
ignore_volume_errors=no
arbiter_count=1
[volume2]
action=create
volname=data
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performan
ce.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=kvm380.durchhalten.intern:/gluster_bricks/data/data,kvm360.durchh
alten.intern:/gluster_bricks/data/data,kvm320.durchhalten.intern:/gluster_br
icks/data/data
ignore_volume_errors=no
arbiter_count=1
[volume3]
action=create
volname=vmstore
transport=tcp
replica=yes
replica_count=3
key=group,storage.owner-uid,storage.owner-gid,network.ping-timeout,performan
ce.strict-o-direct,network.remote-dio,cluster.granular-entry-heal
value=virt,36,36,30,on,off,enable
brick_dirs=kvm380.durchhalten.intern:/gluster_bricks/vmstore/vmstore,kvm360.
durchhalten.intern:/gluster_bricks/vmstore/vmstore,kvm320.durchhalten.intern
:/gluster_bricks/vmstore/vmstore
ignore_volume_errors=no
arbiter_count=1
I prepared kvm380 with ssh-keygen and copy the key to ever 3 hosts
This is the output and at the end nothing happends any
PLAY [gluster_servers]
*********************************************************
TASK [Run a shell script]
******************************************************
changed: [kvm380.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern)
PLAY RECAP
*********************************************************************
kvm380.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Run a shell script]
******************************************************
changed: [kvm360.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb,sdc -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern)
PLAY RECAP
*********************************************************************
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Run a shell script]
******************************************************
changed: [kvm320.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/grafton-sanity-check.sh -d sdb -h
kvm380.durchhalten.intern, kvm360.durchhalten.intern,
kvm320.durchhalten.intern)
PLAY RECAP
*********************************************************************
kvm320.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Enable or disable services]
**********************************************
ok: [kvm320.durchhalten.intern] => (item=chronyd)
ok: [kvm380.durchhalten.intern] => (item=chronyd)
ok: [kvm360.durchhalten.intern] => (item=chronyd)
PLAY RECAP
*********************************************************************
kvm320.durchhalten.intern : ok=1 changed=0 unreachable=0 failed=0
kvm360.durchhalten.intern : ok=1 changed=0 unreachable=0 failed=0
kvm380.durchhalten.intern : ok=1 changed=0 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [start/stop/restart/reload services]
**************************************
changed: [kvm320.durchhalten.intern] => (item=chronyd)
changed: [kvm380.durchhalten.intern] => (item=chronyd)
changed: [kvm360.durchhalten.intern] => (item=chronyd)
PLAY RECAP
*********************************************************************
kvm320.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm380.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Run a command in the shell]
**********************************************
changed: [kvm320.durchhalten.intern] => (item=vdsm-tool configure --force)
changed: [kvm360.durchhalten.intern] => (item=vdsm-tool configure --force)
changed: [kvm380.durchhalten.intern] => (item=vdsm-tool configure --force)
PLAY RECAP
*********************************************************************
kvm320.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm380.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Run a shell script]
******************************************************
changed: [kvm320.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/blacklist_all_disks.sh)
changed: [kvm380.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/blacklist_all_disks.sh)
changed: [kvm360.durchhalten.intern] =>
(item=/usr/share/gdeploy/scripts/blacklist_all_disks.sh)
PLAY RECAP
*********************************************************************
kvm320.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
kvm380.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Clean up filesystem signature]
*******************************************
skipping: [kvm380.durchhalten.intern] => (item=/dev/sdb)
TASK [Create Physical Volume]
**************************************************
ok: [kvm380.durchhalten.intern] => (item=/dev/sdb)
PLAY RECAP
*********************************************************************
kvm380.durchhalten.intern : ok=1 changed=0 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Clean up filesystem signature]
*******************************************
skipping: [kvm380.durchhalten.intern] => (item=/dev/sdc)
TASK [Create Physical Volume]
**************************************************
ok: [kvm380.durchhalten.intern] => (item=/dev/sdc)
PLAY RECAP
*********************************************************************
kvm380.durchhalten.intern : ok=1 changed=0 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Clean up filesystem signature]
*******************************************
skipping: [kvm360.durchhalten.intern] => (item=/dev/sdb)
TASK [Create Physical Volume]
**************************************************
changed: [kvm360.durchhalten.intern] => (item=/dev/sdb)
PLAY RECAP
*********************************************************************
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Clean up filesystem signature]
*******************************************
skipping: [kvm360.durchhalten.intern] => (item=/dev/sdc)
TASK [Create Physical Volume]
**************************************************
changed: [kvm360.durchhalten.intern] => (item=/dev/sdc)
PLAY RECAP
*********************************************************************
kvm360.durchhalten.intern : ok=1 changed=1 unreachable=0 failed=0
PLAY [gluster_servers]
*********************************************************
TASK [Clean up filesystem signature]
*******************************************
skipping: [kvm320.durchhalten.intern] => (item=/dev/sdb)
TASK [Create Physical Volume]
*************************************************
/var/log/messages
Dec 18 12:02:55 kvm320 network: Schnittstelle eno1 hochfahren: [ OK ]
Dec 18 12:02:55 kvm320 systemd: Started LSB: Bring up/down networking.
Dec 18 12:02:55 kvm320 systemd: Reached target Network.
Dec 18 12:02:55 kvm320 systemd: Starting Postfix Mail Transport Agent...
Dec 18 12:02:55 kvm320 systemd: Started Gofer Agent.
Dec 18 12:02:55 kvm320 systemd: Started Link Layer Discovery Protocol Agent
Daemon..
Dec 18 12:02:55 kvm320 systemd: Starting Open-iSCSI...
Dec 18 12:02:55 kvm320 systemd: Starting Open-FCoE Inititator....
Dec 18 12:02:55 kvm320 systemd: Starting Enable periodic update of
entitlement certificates....
Dec 18 12:02:55 kvm320 systemd: Starting Dynamic System Tuning Daemon...
Dec 18 12:02:55 kvm320 systemd: Reached target Network is Online.
Dec 18 12:02:55 kvm320 systemd: Starting Cockpit motd updater service...
Dec 18 12:02:55 kvm320 systemd: Starting Notify NFS peers of a restart...
Dec 18 12:02:56 kvm320 systemd: Starting System Logging Service...
Dec 18 12:02:56 kvm320 systemd: Starting OpenSSH server daemon...
Dec 18 12:02:56 kvm320 systemd: Started Cockpit motd updater service.
Dec 18 12:02:56 kvm320 iscsid: iSCSI logger with pid=6222 started!
Dec 18 12:02:56 kvm320 systemd: Started Open-iSCSI.
Dec 18 12:02:56 kvm320 systemd: Starting Logout off all iSCSI sessions on
shutdown...
Dec 18 12:02:56 kvm320 sm-notify[6213]: Version 1.3.0 starting
Dec 18 12:02:56 kvm320 systemd: Started Enable periodic update of
entitlement certificates..
Dec 18 12:02:56 kvm320 systemd: Started Notify NFS peers of a restart.
Dec 18 12:02:56 kvm320 systemd: Started Logout off all iSCSI sessions on
shutdown.
Dec 18 12:02:56 kvm320 systemd: Reached target Remote File Systems (Pre).
Dec 18 12:02:56 kvm320 systemd: Reached target Remote File Systems.
Dec 18 12:02:56 kvm320 systemd: Starting Crash recovery kernel arming...
Dec 18 12:02:56 kvm320 systemd: Starting Virtualization daemon...
Dec 18 12:02:56 kvm320 systemd: Starting Permit User Sessions...
Dec 18 12:02:56 kvm320 systemd: Started Permit User Sessions.
Dec 18 12:02:56 kvm320 systemd: Started Command Scheduler.
Dec 18 12:02:56 kvm320 systemd: Starting Wait for Plymouth Boot Screen to
Quit...
Dec 18 12:02:56 kvm320 systemd: Starting Terminate Plymouth Boot Screen...
Dec 18 12:02:56 kvm320 systemd: Received SIGRTMIN+21 from PID 572
(plymouthd).
Dec 18 12:02:56 kvm320 systemd: Started Wait for Plymouth Boot Screen to
Quit.
Dec 18 12:02:56 kvm320 systemd: Started Terminate Plymouth Boot Screen.
Dec 18 12:02:56 kvm320 systemd: Started Getty on tty1.
Dec 18 12:02:56 kvm320 systemd: Reached target Login Prompts.
Dec 18 12:02:56 kvm320 kdumpctl: No kdump initial ramdisk found.
Dec 18 12:02:56 kvm320 kdumpctl: Rebuilding
/boot/initramfs-3.10.0-957.1.3.el7.x86_64kdump.img
Dec 18 12:02:57 kvm320 systemd: Started OpenSSH server daemon.
Dec 18 12:02:57 kvm320 rsyslogd: [origin software="rsyslogd"
swVersion="8.24.0-34.el7" x-pid="6215" x-info="http://www.rsyslog.com"]
start
Dec 18 12:02:57 kvm320 systemd: Started System Logging Service.
Dec 18 12:02:57 kvm320 iscsid: iSCSI daemon with pid=6223 started!
Dec 18 12:02:57 kvm320 kernel: cnic: QLogic cnicDriver v2.5.22 (July 20,
2015)
Dec 18 12:02:57 kvm320 kernel: bnx2fc: QLogic FCoE Driver bnx2fc v2.11.8
(October 15, 2015)
Dec 18 12:02:57 kvm320 systemd: Started Open-FCoE Inititator..
Dec 18 12:02:57 kvm320 systemd: Starting Availability of block devices...
Dec 18 12:02:57 kvm320 systemd: Started Availability of block devices.
Dec 18 12:02:58 kvm320 goferd: [WARNING][MainThread] gofer.agent.plugin:639
- plugin:demo, DISABLED
Dec 18 12:02:58 kvm320 goferd: [INFO][Thread-1] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/demo
Dec 18 12:02:58 kvm320 goferd: [INFO][Thread-2] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/katelloplugin
Dec 18 12:02:58 kvm320 goferd: [INFO][Thread-3] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/katelloplugin
Dec 18 12:02:58 kvm320 systemd: Created slice User Slice of root.
Dec 18 12:02:58 kvm320 systemd: Started Session 1 of user root.
Dec 18 12:02:58 kvm320 systemd: Removed slice User Slice of root.
Dec 18 12:02:59 kvm320 systemd: Started Virtualization daemon.
Dec 18 12:02:59 kvm320 systemd: Starting Virtual Desktop Server Manager
network restoration...
Dec 18 12:02:59 kvm320 systemd: Starting Suspend/Resume Running libvirt
Guests...
Dec 18 12:03:00 kvm320 systemd: Started Suspend/Resume Running libvirt
Guests.
Dec 18 12:03:01 kvm320 systemd: Started Dynamic System Tuning Daemon.
Dec 18 13:03:02 kvm320 chronyd[4367]: Selected source 131.188.3.220
Dec 18 13:03:02 kvm320 chronyd[4367]: System clock wrong by 3599.903842
seconds, adjustment started
Dec 18 13:03:02 kvm320 systemd: Time has been changed
Dec 18 13:03:02 kvm320 chronyd[4367]: System clock was stepped by
3599.903842 seconds
Dec 18 13:03:03 kvm320 goferd: [INFO][MainThread] gofer.agent.plugin:682 -
plugin:katelloplugin loaded using: /usr/lib/gofer/plugins/katelloplugin.py
Dec 18 13:03:03 kvm320 goferd: [INFO][MainThread] gofer.agent.main:87 -
agent started.
Dec 18 13:03:03 kvm320 goferd: [INFO][worker-0] katelloplugin:249 -
Restarting goferd.
Dec 18 13:03:03 kvm320 goferd: Redirecting to /bin/systemctl restart
goferd.service
Dec 18 13:03:03 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:03:03 kvm320 systemd: Stopping Gofer Agent...
Dec 18 13:03:03 kvm320 systemd: Stopped Gofer Agent.
Dec 18 13:03:03 kvm320 systemd: Started Gofer Agent.
Dec 18 13:03:03 kvm320 goferd: [INFO][Thread-1] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/demo
Dec 18 13:03:03 kvm320 goferd: [WARNING][MainThread] gofer.agent.plugin:639
- plugin:demo, DISABLED
Dec 18 13:03:03 kvm320 goferd: [INFO][Thread-2] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/katelloplugin
Dec 18 13:03:03 kvm320 goferd: [INFO][Thread-3] gofer.rmi.store:114 - Using:
/var/lib/gofer/messaging/pending/katelloplugin
Dec 18 13:03:03 kvm320 goferd: [INFO][MainThread] gofer.agent.plugin:682 -
plugin:katelloplugin loaded using: /usr/lib/gofer/plugins/katelloplugin.py
Dec 18 13:03:03 kvm320 systemd: Unit multipathd.service cannot be reloaded
because it is inactive.
Dec 18 13:03:03 kvm320 goferd: [INFO][MainThread] gofer.agent.main:87 -
agent started.
Dec 18 13:03:04 kvm320 dracut: dracut-033-554.el7
Dec 18 13:03:04 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:03:04 kvm320 systemd: Starting Shared Storage Lease Manager...
Dec 18 13:03:04 kvm320 systemd: Started Shared Storage Lease Manager.
Dec 18 13:03:04 kvm320 vdsm-tool: Checking configuration status...
Dec 18 13:03:04 kvm320 vdsm-tool: abrt is not configured for vdsm
Dec 18 13:03:04 kvm320 vdsm-tool: WARNING: LVM local configuration:
/etc/lvm/lvmlocal.conf is not based on vdsm configuration
Dec 18 13:03:04 kvm320 vdsm-tool: lvm requires configuration
Dec 18 13:03:04 kvm320 vdsm-tool: libvirt is not configured for vdsm yet
Dec 18 13:03:04 kvm320 vdsm-tool: FAILED: conflicting vdsm and libvirt-qemu
tls configuration.
Dec 18 13:03:04 kvm320 vdsm-tool: vdsm.conf with ssl=True requires the
following changes:
Dec 18 13:03:04 kvm320 vdsm-tool: libvirtd.conf: listen_tcp=0,
auth_tcp="sasl", listen_tls=1
Dec 18 13:03:04 kvm320 vdsm-tool: qemu.conf: spice_tls=1.
Dec 18 13:03:04 kvm320 vdsm-tool: multipath requires configuration
Dec 18 13:03:04 kvm320 vdsm-tool: Running configure...
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of abrt is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of passwd is done.
Dec 18 13:03:04 kvm320 vdsm-tool: WARNING: LVM local configuration:
/etc/lvm/lvmlocal.conf is not based on vdsm configuration
Dec 18 13:03:04 kvm320 vdsm-tool: Backing up /etc/lvm/lvmlocal.conf to
/etc/lvm/lvmlocal.conf.201812181202
Dec 18 13:03:04 kvm320 vdsm-tool: Installing /usr/share/vdsm/lvmlocal.conf
at /etc/lvm/lvmlocal.conf
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of lvm is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of sebool is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of certificates is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of libvirt is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of sanlock is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Reconfiguration of multipath is done.
Dec 18 13:03:04 kvm320 vdsm-tool: Done configuring modules to VDSM.
Dec 18 13:03:04 kvm320 systemd: Started Reconfigure vdsm.
Dec 18 13:03:04 kvm320 systemd: Started Auxiliary vdsm service for running
helper functions as root.
Dec 18 13:03:04 kvm320 dracut: Executing: /usr/sbin/dracut --hostonly
--hostonly-cmdline --hostonly-i18n --hostonly-mode strict -o "plymouth dash
resume ifcfg" --mount "/dev/mapper/onn_kvm320-var_crash
/kdumproot//var/crash ext4
rw,relatime,seclabel,discard,stripe=16,data=ordered"
--no-hostonly-default-device -f
/boot/initramfs-3.10.0-957.1.3.el7.x86_64kdump.img 3.10.0-957.1.3.el7.x86_64
Dec 18 13:03:04 kvm320 systemd: Started Postfix Mail Transport Agent.
Dec 18 13:03:05 kvm320 dracut: dracut module 'busybox' will not be
installed, because command 'busybox' could not be found!
Dec 18 13:03:05 kvm320 dracut: dracut module 'ifcfg' will not be installed,
because it's in the list to be omitted!
Dec 18 13:03:05 kvm320 dracut: dracut module 'plymouth' will not be
installed, because it's in the list to be omitted!
Dec 18 13:03:05 kvm320 dracut: dracut module 'dmsquash-live-ntfs' will not
be installed, because command 'ntfs-3g' could not be found!
Dec 18 13:03:05 kvm320 journal: failed to load module nvdimm:
libbd_nvdimm.so.2: cannot open shared object file: No such file or directory
Dec 18 13:03:06 kvm320 dracut: dracut module 'cifs' will not be installed,
because command 'mount.cifs' could not be found!
Dec 18 13:03:07 kvm320 dracut: dracut module 'resume' will not be installed,
because it's in the list to be omitted!
Dec 18 13:03:07 kvm320 dracut: dracut module 'busybox' will not be
installed, because command 'busybox' could not be found!
Dec 18 13:03:07 kvm320 dracut: dracut module 'dmsquash-live-ntfs' will not
be installed, because command 'ntfs-3g' could not be found!
Dec 18 13:03:08 kvm320 dracut: dracut module 'cifs' will not be installed,
because command 'mount.cifs' could not be found!
Dec 18 13:03:08 kvm320 dracut: *** Including module: bash ***
Dec 18 13:03:08 kvm320 dracut: *** Including module: fips ***
Dec 18 13:03:09 kvm320 kernel: L1TF CPU bug present and SMT on, data leak
possible. See CVE-2018-3646 and
https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.
Dec 18 13:03:09 kvm320 kvm: 1 guest now active
Dec 18 13:03:09 kvm320 kvm: 0 guests now active
Dec 18 13:03:10 kvm320 dracut: *** Including module: nss-softokn ***
Dec 18 13:03:10 kvm320 dracut: *** Including module: i18n ***
Dec 18 13:03:10 kvm320 dracut: *** Including module: drm ***
Dec 18 13:03:11 kvm320 systemd: Started Virtual Desktop Server Manager
network restoration.
Dec 18 13:03:12 kvm320 dracut: *** Including module: dm ***
Dec 18 13:03:12 kvm320 dracut: Skipping udev rule: 64-device-mapper.rules
Dec 18 13:03:12 kvm320 dracut: Skipping udev rule:
60-persistent-storage-dm.rules
Dec 18 13:03:12 kvm320 dracut: Skipping udev rule: 55-dm.rules
Dec 18 13:03:12 kvm320 dracut: *** Including module: kernel-modules ***
Dec 18 13:03:18 kvm320 dracut: *** Including module: lvm ***
Dec 18 13:03:18 kvm320 dracut: Skipping udev rule: 64-device-mapper.rules
Dec 18 13:03:18 kvm320 dracut: Skipping udev rule: 56-lvm.rules
Dec 18 13:03:18 kvm320 dracut: Skipping udev rule:
60-persistent-storage-lvm.rules
Dec 18 13:03:19 kvm320 dracut: *** Including module: fstab-sys ***
Dec 18 13:03:19 kvm320 dracut: *** Including module: rootfs-block ***
Dec 18 13:03:19 kvm320 dracut: *** Including module: terminfo ***
Dec 18 13:03:19 kvm320 dracut: *** Including module: udev-rules ***
Dec 18 13:03:20 kvm320 dracut: Skipping udev rule:
40-redhat-cpu-hotplug.rules
Dec 18 13:03:20 kvm320 dracut: Skipping udev rule: 91-permissions.rules
Dec 18 13:03:20 kvm320 dracut: *** Including module: biosdevname ***
Dec 18 13:03:20 kvm320 dracut: *** Including module: systemd ***
Dec 18 13:03:21 kvm320 dracut: *** Including module: usrmount ***
Dec 18 13:03:21 kvm320 dracut: *** Including module: base ***
Dec 18 13:03:21 kvm320 dracut: *** Including module: fs-lib ***
Dec 18 13:03:21 kvm320 dracut: *** Including module: kdumpbase ***
Dec 18 13:03:22 kvm320 dracut: *** Including module:
microcode_ctl-fw_dir_override ***
Dec 18 13:03:22 kvm320 dracut: microcode_ctl module: mangling fw_dir
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: reset fw_dir to
"/lib/firmware/updates /lib/firmware"
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: processing data directory
"/usr/share/microcode_ctl/ucode_with_caveats/intel-06-4f-01"...
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: kernel version
"3.10.0-957.1.3.el7.x86_64" failed early load check for "intel-06-4f-01",
skipping
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: processing data directory
"/usr/share/microcode_ctl/ucode_with_caveats/intel"...
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: intel: Host-Only mode is
enabled and "intel-ucode/06-3a-09" matches "intel-ucode/*"
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: intel: caveats check for
kernel version "3.10.0-957.1.3.el7.x86_64" passed, adding
"/usr/share/microcode_ctl/ucode_with_caveats/intel" to fw_dir variable
Dec 18 13:03:22 kvm320 dracut: microcode_ctl: final fw_dir:
"/usr/share/microcode_ctl/ucode_with_caveats/intel /lib/firmware/updates
/lib/firmware"
Dec 18 13:03:22 kvm320 dracut: *** Including module: shutdown ***
Dec 18 13:03:22 kvm320 dracut: *** Including modules done ***
Dec 18 13:03:22 kvm320 dracut: *** Installing kernel module dependencies and
firmware ***
Dec 18 13:03:22 kvm320 dracut: *** Installing kernel module dependencies and
firmware done ***
Dec 18 13:03:22 kvm320 dracut: *** Resolving executable dependencies ***
Dec 18 13:03:23 kvm320 dracut: *** Resolving executable dependencies done***
Dec 18 13:03:23 kvm320 dracut: *** Hardlinking files ***
Dec 18 13:03:23 kvm320 dracut: *** Hardlinking files done ***
Dec 18 13:03:23 kvm320 dracut: *** Generating early-microcode cpio image
contents ***
Dec 18 13:03:23 kvm320 dracut: *** Constructing GenuineIntel.bin ****
Dec 18 13:03:23 kvm320 dracut: *** Constructing GenuineIntel.bin ****
Dec 18 13:03:23 kvm320 dracut: *** Store current command line parameters ***
Dec 18 13:03:23 kvm320 dracut: *** Creating image file ***
Dec 18 13:03:23 kvm320 dracut: *** Creating microcode section ***
Dec 18 13:03:23 kvm320 dracut: *** Created microcode section ***
Dec 18 13:03:30 kvm320 dracut: *** Creating image file done ***
Dec 18 13:03:32 kvm320 dracut: *** Creating initramfs image file
'/boot/initramfs-3.10.0-957.1.3.el7.x86_64kdump.img' done ***
Dec 18 13:03:34 kvm320 kdumpctl: kexec: loaded kdump kernel
Dec 18 13:03:34 kvm320 kdumpctl: Starting kdump: [OK]
Dec 18 13:03:34 kvm320 systemd: Started Crash recovery kernel arming.
Dec 18 13:03:34 kvm320 systemd: Reached target Multi-User System.
Dec 18 13:03:34 kvm320 systemd: Starting Update UTMP about System Runlevel
Changes...
Dec 18 13:03:34 kvm320 systemd: Started Stop Read-Ahead Data Collection 10s
After Completed Startup.
Dec 18 13:03:34 kvm320 systemd: Started Update UTMP about System Runlevel
Changes.
Dec 18 13:03:34 kvm320 systemd: Startup finished in 682ms (kernel) + 4.947s
(initrd) + 1min 7.876s (userspace) = 1min 13.505s.
Dec 18 13:03:37 kvm320 systemd: getty(a)tty1.service has no holdoff time,
scheduling restart.
Dec 18 13:03:37 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:03:37 kvm320 systemd: Stopped Getty on tty1.
Dec 18 13:03:37 kvm320 systemd: Started Getty on tty1.
Dec 18 13:03:51 kvm320 systemd: Created slice User Slice of root.
Dec 18 13:03:51 kvm320 systemd-logind: New session 2 of user root.
Dec 18 13:03:51 kvm320 systemd: Started Session 2 of user root.
Dec 18 13:04:04 kvm320 systemd: Starting Stop Read-Ahead Data Collection...
Dec 18 13:04:04 kvm320 systemd: Started Stop Read-Ahead Data Collection.
Dec 18 13:05:03 kvm320 systemd-logind: New session 3 of user root.
Dec 18 13:05:03 kvm320 systemd: Started Session 3 of user root.
Dec 18 13:05:05 kvm320 systemd-logind: Removed session 3.
Dec 18 13:05:14 kvm320 systemd-logind: New session 4 of user root.
Dec 18 13:05:14 kvm320 systemd: Started Session 4 of user root.
Dec 18 13:05:16 kvm320 systemd-logind: Removed session 4.
Dec 18 13:06:59 kvm320 systemd-logind: New session 5 of user root.
Dec 18 13:06:59 kvm320 systemd: Started Session 5 of user root.
Dec 18 13:07:10 kvm320 python: ansible-setup Invoked with
filter=ansible_service_mgr gather_subset=['!all']
fact_path=/etc/ansible/facts.d gather_timeout=10
Dec 18 13:07:13 kvm320 python: ansible-systemd Invoked with no_block=False
force=None name=chronyd enabled=True daemon_reload=False state=None
masked=None scope=None user=None
Dec 18 13:07:23 kvm320 python: ansible-setup Invoked with
filter=ansible_service_mgr gather_subset=['!all']
fact_path=/etc/ansible/facts.d gather_timeout=10
Dec 18 13:07:26 kvm320 python: ansible-systemd Invoked with no_block=False
force=None name=chronyd enabled=None daemon_reload=False state=restarted
masked=None scope=None user=None
Dec 18 13:07:26 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:07:26 kvm320 chronyd[4367]: chronyd exiting
Dec 18 13:07:26 kvm320 systemd: Stopping NTP client/server...
Dec 18 13:07:26 kvm320 systemd: Stopped NTP client/server.
Dec 18 13:07:26 kvm320 systemd: Starting NTP client/server...
Dec 18 13:07:26 kvm320 chronyd[20489]: chronyd version 3.2 starting (+CMDMON
+NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SECHASH +SIGND +ASYNCDNS +IPV6
+DEBUG)
Dec 18 13:07:26 kvm320 chronyd[20489]: Frequency -26.525 +/- 1.227 ppm read
from /var/lib/chrony/drift
Dec 18 13:07:26 kvm320 systemd: Started NTP client/server.
Dec 18 13:07:33 kvm320 chronyd[20489]: Selected source 213.209.109.44
Dec 18 13:07:36 kvm320 python: ansible-command Invoked with warn=True
executable=None _uses_shell=True _raw_params=vdsm-tool configure --force
removes=None argv=None creates=None chdir=None stdin=None
Dec 18 13:07:37 kvm320 systemd: Stopping Auxiliary vdsm service for running
helper functions as root...
Dec 18 13:07:37 kvm320 daemonAdapter: Traceback (most recent call last):
Dec 18 13:07:37 kvm320 daemonAdapter: File
"/usr/lib64/python2.7/multiprocessing/util.py", line 268, in _run_finalizers
Dec 18 13:07:37 kvm320 daemonAdapter: finalizer()
Dec 18 13:07:37 kvm320 daemonAdapter: File
"/usr/lib64/python2.7/multiprocessing/util.py", line 201, in __call__
Dec 18 13:07:37 kvm320 daemonAdapter: res = self._callback(*self._args,
**self._kwargs)
Dec 18 13:07:37 kvm320 daemonAdapter: OSError: [Errno 2] No such file or
directory: '/var/run/vdsm/svdsm.sock'
Dec 18 13:07:37 kvm320 systemd: Stopped Auxiliary vdsm service for running
helper functions as root.
Dec 18 13:07:37 kvm320 systemd: Stopped Virtual Desktop Server Manager
network restoration.
Dec 18 13:07:37 kvm320 systemd: Stopping Virtualization daemon...
Dec 18 13:07:37 kvm320 systemd: Stopped Virtualization daemon.
Dec 18 13:07:37 kvm320 saslpasswd2: error deleting entry from sasldb:
BDB0073 DB_NOTFOUND: No matching key/data pair found
Dec 18 13:07:37 kvm320 saslpasswd2: error deleting entry from sasldb:
BDB0073 DB_NOTFOUND: No matching key/data pair found
Dec 18 13:07:37 kvm320 saslpasswd2: error deleting entry from sasldb:
BDB0073 DB_NOTFOUND: No matching key/data pair found
Dec 18 13:07:38 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:07:38 kvm320 systemd: Starting Virtual Machine and Container
Registration Service...
Dec 18 13:07:38 kvm320 systemd: Started Virtual Machine and Container
Registration Service.
Dec 18 13:07:38 kvm320 systemd: Starting Virtualization daemon...
Dec 18 13:07:38 kvm320 systemd: Started Virtualization daemon.
Dec 18 13:07:38 kvm320 systemd: Cannot add dependency job for unit
lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 18 13:07:38 kvm320 systemd: Started Auxiliary vdsm service for running
helper functions as root.
Dec 18 13:07:38 kvm320 journal: failed to load module nvdimm:
libbd_nvdimm.so.2: cannot open shared object file: No such file or directory
Dec 18 13:08:48 kvm320 python: ansible-pv Invoked with action=create
disks=/dev/sdb options=--dataalignment 3072k size=None
Dec 18 13:08:48 kvm320 kernel: sdb:
Dec 18 13:10:01 kvm320 systemd: Started Session 6 of user root.
Dec 18 13:10:48 kvm320 chronyd[20489]: Selected source 178.63.9.110
Dec 18 13:14:16 kvm320 systemd-logind: New session 7 of user root.
Dec 18 13:14:16 kvm320 systemd: Started Session 7 of user root.
Maybe somebody could point me to the right direction
Stefan
5 years, 11 months
oVirt Node 4.2.7 upgrade fails with broken dependencies ?
by Rogério Ceni Coelho
Hi all !
Broken dependencies ?
[root@nscovirtkvm41prdpoa ~]# yum update
Loaded plugins: enabled_repos_upload, fastestmirror, package_upload,
product-id, search-disabled-repos, subscription-manager, vdsmupgrade
This system is not registered with an entitlement server. You can use
subscription-manager to register.
Loading mirror speeds from cached hostfile
* base: mirror.ufscar.br
* epel: mirror.ci.ifes.edu.br
* extras: centos.brnet.net.br
* ovirt-4.2: www.gtlib.gatech.edu
* ovirt-4.2-epel: mirror.ci.ifes.edu.br
* updates: mirror.ufscar.br
Resolving Dependencies
--> Running transaction check
---> Package collectd.x86_64 0:5.8.0-6.1.el7 will be updated
--> Processing Dependency: collectd(x86-64) = 5.8.0-6.1.el7 for package:
collectd-disk-5.8.0-6.1.el7.x86_64
--> Processing Dependency: collectd(x86-64) = 5.8.0-6.1.el7 for package:
collectd-write_http-5.8.0-6.1.el7.x86_64
---> Package collectd.x86_64 0:5.8.1-1.el7 will be an update
---> Package collectd-netlink.x86_64 0:5.8.0-6.1.el7 will be updated
---> Package collectd-netlink.x86_64 0:5.8.1-1.el7 will be an update
---> Package collectd-virt.x86_64 0:5.8.0-6.1.el7 will be updated
---> Package collectd-virt.x86_64 0:5.8.1-1.el7 will be an update
---> Package ovirt-hosted-engine-setup.noarch 0:2.2.30-1.el7 will be updated
---> Package ovirt-hosted-engine-setup.noarch 0:2.2.32-1.el7 will be an
update
--> Finished Dependency Resolution
Error: Package: collectd-write_http-5.8.0-6.1.el7.x86_64
(@ovirt-4.2-centos-opstools)
Requires: collectd(x86-64) = 5.8.0-6.1.el7
Removing: collectd-5.8.0-6.1.el7.x86_64
(@ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-6.1.el7
Updated By: collectd-5.8.1-1.el7.x86_64 (epel)
collectd(x86-64) = 5.8.1-1.el7
Available: collectd-5.7.2-1.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.7.2-1.el7
Available: collectd-5.7.2-3.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.7.2-3.el7
Available: collectd-5.8.0-2.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-2.el7
Available: collectd-5.8.0-3.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-3.el7
Available: collectd-5.8.0-5.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-5.el7
Error: Package: collectd-disk-5.8.0-6.1.el7.x86_64
(@ovirt-4.2-centos-opstools)
Requires: collectd(x86-64) = 5.8.0-6.1.el7
Removing: collectd-5.8.0-6.1.el7.x86_64
(@ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-6.1.el7
Updated By: collectd-5.8.1-1.el7.x86_64 (epel)
collectd(x86-64) = 5.8.1-1.el7
Available: collectd-5.7.2-1.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.7.2-1.el7
Available: collectd-5.7.2-3.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.7.2-3.el7
Available: collectd-5.8.0-2.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-2.el7
Available: collectd-5.8.0-3.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-3.el7
Available: collectd-5.8.0-5.el7.x86_64
(ovirt-4.2-centos-opstools)
collectd(x86-64) = 5.8.0-5.el7
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
Uploading Enabled Repositories Report
Loaded plugins: fastestmirror, product-id, subscription-manager
This system is not registered with an entitlement server. You can use
subscription-manager to register.
Cannot upload enabled repos report, is this client registered?
[root@nscovirtkvm41prdpoa ~]# yum repolist
Loaded plugins: enabled_repos_upload, fastestmirror, package_upload,
product-id, search-disabled-repos, subscription-manager, vdsmupgrade
This system is not registered with an entitlement server. You can use
subscription-manager to register.
Loading mirror speeds from cached hostfile
* base: mirror.ufscar.br
* epel: mirror.ci.ifes.edu.br
* extras: centos.brnet.net.br
* ovirt-4.2: www.gtlib.gatech.edu
* ovirt-4.2-epel: mirror.ci.ifes.edu.br
* updates: mirror.ufscar.br
repo id repo
name
status
base/7/x86_64 CentOS-7
- Base
9,911
centos-sclo-rh-release/x86_64 CentOS-7
- SCLo rh
8,099
epel/x86_64 Extra
Packages for Enterprise Linux 7 - x86_64
12,708
extras/7/x86_64 CentOS-7
- Extras
434
ovirt-4.2/7 Latest
oVirt 4.2 Release
2,439
ovirt-4.2-centos-gluster312/x86_64 CentOS-7
- Gluster 3.12
262
ovirt-4.2-centos-opstools/x86_64 CentOS-7
- OpsTools - release
666
ovirt-4.2-centos-ovirt42/x86_64 CentOS-7
- oVirt 4.2
582
ovirt-4.2-centos-qemu-ev/x86_64 CentOS-7
- QEMU EV
63
ovirt-4.2-epel/x86_64 Extra
Packages for Enterprise Linux 7 - x86_64
12,708
ovirt-4.2-virtio-win-latest
virtio-win builds roughly matching what will be shipped in upcoming RHEL
38
updates/7/x86_64 CentOS-7
- Updates
1,614
zabbix/7/x86_64 Zabbix
Official Repository - x86_64
183
zabbix-non-supported/x86_64 Zabbix
Official Repository non-supported - x86_64
4
repolist: 49,711
Uploading Enabled Repositories Report
Loaded plugins: fastestmirror, product-id, subscription-manager
This system is not registered with an entitlement server. You can use
subscription-manager to register.
Cannot upload enabled repos report, is this client registered?
[root@nscovirtkvm41prdpoa ~]#
5 years, 11 months
Disk corruption on hosted engine
by Callum Smith
Dear All,
So we've had some major disk corruption on our hosted engine (hardware to blame), and we have taken backups. However, the hosted-engine VM will no longer boot at all, database is thoroughly corrupted, and we need to rebuild the thing. Just a sanity check on the best route:
Preamble:
VMs are still running fine - only hosted engine affected
VMs are distributed across our entire 3 node cluster
All 3 nodes are registered as hosted engine candidates
1. Do another hosted-engine --deploy on one of the existing hosts, and then restore the backup into that
2. Build a new host, deploy the hosted-engine, then restore a backup on a fresh node
Regards,
Callum
--
Callum Smith
Research Computing Core
Wellcome Trust Centre for Human Genetics
University of Oxford
e. callum(a)well.ox.ac.uk<mailto:callum@well.ox.ac.uk>
5 years, 11 months
VM protal : Erorr in some VMs
by mustafa.taha.mu95@gmail.com
hi
i want some users to connect to some ovirt VMs .so i add them to the permission of that VMs.
after that my users connect to VM portal and success to login but when they want to run console
some VMs work with console and other does not and this massage appear :
"
Failed to retrieve VM console details Conflict
"
any idea about that ?
5 years, 11 months
Port 9090 Redirect Nginx Proxy
by dcsteve24@gmail.com
I've run into a small issue I'm looking to resolve. I currently have 5 hosts I'm working to configure as oVirt Hosts and a single oVirt Manager to manage them all. The oVirt Manager can talk to the hosts, the hosts show up, they can be managed, etc.
Our client PCs reside on a different network (and this can't be changed). We used to just add the interfaces to communicate on the new network but because of IP spaces and DFARS/NIST requirements, we are moving away from this. I've set up a reverse proxy so only a single machine serves as the gateway between the networks. This works for everything except one small issue. If you click "Host Console" from the ovirt manager, it opens up a new page with port 9090 to the cockpit page of the host.
I have this working except I am getting a blank page for cockpit after logging in. What's even more odd, I can use the reverse proxy settings to browse to the 443 port and redirect to 9090 and i can load everything fine, but 9090 to 9090 does not work. I've looked at other threads and posts, including the GitLab thing, which got me here, but can't get this last missing piece. Is anybody able to provide any assistance? Based off research it looks like something I could edit in the cockpit.conf file, except ovirt apparently doesn't make the cockpit.conf file... and trying to force it through the GUI by adding /settings to the URL says page not found.
Alternatively I could edit the oVirt Mgr to point to client to the 443 port and that'd work, if I knew where to edit those hyperlinks upon click.
P.S. this is a test/develop internal network env which is why i have proxy_ssl_verify off.
Nginx Reverse Proxy config:
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream websocket {
server host11.<domain>:9090;
}
server {
listen 80;
server_name host11.<domain>;
return 301 https://$server_name$request_uri;
}
#Allows user to go directly to server (I work!)
server {
listen 443;
server_name host11.<domain>;
ssl on;
ssl_certificate <path to certificate.pem>;
ssl_certificate_key <path to key.pem>;
proxy_ssl_verify off;
location / {
proxy_pass https://websocket;
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
# needed for websocket
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# change scheme of "Origin" to https
proxy_set_header Origin https://$host;
gzip off;
}
}
#allows ovirtmgr host console redirect (I don't work)
server {
listen 9090;
server_name host11.<domain>;
ssl on;
ssl_certificate <path to certificate.pem>;
ssl_certificate_key <path to key.pem>;
proxy_ssl_verify off;
location / {
proxy_pass https://websocket;
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
# needed for websocket
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# change scheme of "Origin" to https
proxy_set_header Origin https://$host;
gzip off;
}
}
Host Error Log:
#Log in Page
Dec 17 11:22:15 ovirt11 systemd: Cannot add dependency job for unit lvm2-lvmetad.socket, ignoring: Unit is masked.
Dec 17 11:22:15 ovirt11 systemd: Starting Cockpit Web Service...
Dec 17 11:22:15 ovirt11 systemd: Started Cockpit Web Service.
Dec 17 11:22:16 ovirt11 cockpit-ws: Using certificate: /etc/cockpit/ws-certs.d/0-self-signed.cert
Dec 17 11:22:16 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=18185 DF PROTO=TCP SPT=34518 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:42 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=54797 DF PROTO=TCP SPT=34520 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
#after successfully logging in
Dec 17 11:22:42 ovirt11 cockpit-session: pam_ssh_add: Failed adding some keys
Dec 17 11:22:42 ovirt11 systemd-logind: New session 856 of user locadmin.
Dec 17 11:22:42 ovirt11 systemd: Started Session 856 of user locadmin.
Dec 17 11:22:43 ovirt11 cockpit-ws: logged in user session
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=28754 DF PROTO=TCP SPT=34522 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=24281 DF PROTO=TCP SPT=34524 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=32811 DF PROTO=TCP SPT=34526 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=24156 DF PROTO=TCP SPT=34528 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=60945 DF PROTO=TCP SPT=34530 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=31779 DF PROTO=TCP SPT=34532 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=45849 DF PROTO=TCP SPT=34534 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=7433 DF PROTO=TCP SPT=34536 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=13932 DF PROTO=TCP SPT=34538 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=48921 DF PROTO=TCP SPT=34540 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=23475 DF PROTO=TCP SPT=34542 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=21419 DF PROTO=TCP SPT=34544 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=54052 DF PROTO=TCP SPT=34546 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:43 ovirt11 kernel: HTTPS SYN: IN=ovirtmgmt OUT= MAC=b8:2a:72:de:a3:76:00:50:56:8b:01:ca:08:00 SRC=192.168.100.251 DST=192.168.100.111 LEN=60 TOS=0x00 PREC=0x00 TTL=64 ID=34558 DF PROTO=TCP SPT=34548 DPT=9090 WINDOW=29200 RES=0x00 SYN URGP=0
Dec 17 11:22:58 ovirt11 cockpit-ws: session timed out
Dec 17 11:22:58 ovirt11 systemd-logind: Removed session 856.
5 years, 11 months
Gluster Disk Full
by suporte@logicworks.pt
Hi,
I have a gluster volume with disk full. It's a simple oVirt all in one.
The VM with the disk on that volume stopped.
The error message:
Failed to update OVF disks 1252828a-3265-4d5c-9440-40b2b85c1feb, OVF data isn't updated on those OVF stores (Data Center Default, Storage Domain gfs1).
VDSM command SetVolumeDescriptionVDS failed: Error while updating volume meta data: ("(u'/rhev/data-center/mnt/glusterSD/gfs1.growtrade.pt:_gv0/0f21e2bf-44d9-4afd-8800-28e99d617a4b/images/1252828a-3265-4d5c-9440-40b2b85c1feb/f8b62638-ba48-4ed8-8101-3027c124d20c',)[Errno 28] No space left on device: u'/rhev/data-center/mnt/glusterSD/gfs1.growtrade.pt:_gv0/0f21e2bf-44d9-4afd-8800-28e99d617a4b/images/1252828a-3265-4d5c-9440-40b2b85c1feb/f8b62638-ba48-4ed8-8101-3027c124d20c.meta.new'",)
Any idea what can I do?
Thanks
--
Jose Ferradeira
http://www.logicworks.pt
5 years, 11 months
ovirt agent
by mustafa.taha.mu95@gmail.com
i am trying to install ovirt agent on ubuntu and try to open this documnet but it does not work and this page will appear :
https://www.ovirt.org/develop/
can the ovirt team fixed that or there is new thing other than agent ?
5 years, 11 months
Import OVA
by suporte@logicworks.pt
Hi,
I have created a export domain using Glusterfs.
When I go to Storage-Domains, click on the export domain, on VM Import the Import button is greyed out.
My test environment is running a 4.2.7.5-1.el7 version.
Any idea?
Thanks
--
Jose Ferradeira
http://www.logicworks.pt
5 years, 11 months
Regarding Ovirt node installation
by Hemant Gupta
Hi,
While adding Ovirt node to Ovirt engine I am getting below network errors
on ovirt-engine.log
2018-12-14 10:38:58,900+05 INFO
[org.ovirt.engine.core.bll.provider.network.SyncNetworkProviderCommand]
(EE-ManagedThreadFactory-engineScheduled-Thread-85) [5a45cbef] Lock freed
to object
'EngineLock:{exclusiveLocks='[3b5faef7-e0f5-4995-934e-0d681c253d8f=PROVIDER]',
sharedLocks=''}'
2018-12-14 10:42:05,962+05 ERROR
[org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector]
(EE-ManagedThreadFactory-engineScheduled-Thread-66) [] EVENT_ID:
VDS_BROKER_COMMAND_FAILURE(10,802), VDSM OvirtNode command Get Host
Capabilities failed: Message timeout which can be caused by communication
issues
2018-12-14 10:42:05,962+05 ERROR
[org.ovirt.engine.core.vdsbroker.monitoring.HostMonitoring]
(EE-ManagedThreadFactory-engineScheduled-Thread-66) [] Unable to
RefreshCapabilities: VDSNetworkException: VDSGenericException:
VDSNetworkException: Message timeout which can be caused by communication
issues
2018-12-14 10:43:42,433+05 INFO
[org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService]
(EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool
'default' is using 0 threads out of 1, 5 threads waiting for tasks.
2018-12-14 10:43:42,433+05 INFO
[org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService]
(EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool
'engineScheduled' is using 0 threads out of 100, 100 threads waiting for
tasks.
2018-12-14 10:43:42,433+05 INFO
[org.ovirt.engine.core.bll.utils.ThreadPoolMonitoringService]
(EE-ManagedThreadFactory-engineThreadMonitoring-Thread-1) [] Thread pool
'engineThreadMonitoring' is using 1 threads out of 1, 0 threads waiting for
tasks.
2018-12-14 10:43:58,949+05 INFO
[org.ovirt.engine.core.bll.provider.network.SyncNetworkProviderCommand]
(EE-ManagedThreadFactory-engineScheduled-Thread-30) [28e597cc] Lock
Acquired to object
'EngineLock:{exclusiveLocks='[3b5faef7-e0f5-4995-934e-0d681c253d8f=PROVIDER]',
sharedLocks=''}'
2018-12-14 10:43:58,959+05 INFO
[org.ovirt.engine.core.bll.provider.network.SyncNetworkProviderCommand]
(EE-ManagedThreadFactory-engineScheduled-Thread-30) [28e597cc] Running
command: SyncNetworkProviderCommand internal: true.
2018-12-14 10:43:59,248+05 INFO
[org.ovirt.engine.core.sso.utils.AuthenticationUtils] (default task-47) []
User admin@internal successfully logged in with scopes: ovirt-app-api
ovirt-ext=token-info:authz-search ovirt-ext=token-info:public-authz-search
ovirt-ext=token-info:validate ovirt-ext=token:password-access
2018-12-14 10:43:59,438+05 INFO
[org.ovirt.engine.core.bll.provider.network.SyncNetworkProviderCommand]
(EE-ManagedThreadFactory-engineScheduled-Thread-30) [28e597cc] Lock freed
to object
'EngineLock:{exclusiveLocks='[3b5faef7-e0f5-4995-934e-0d681c253d8f=PROVIDER]',
sharedLocks=''}'
2018-12-14 10:45:28,996+05 ERROR
[org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector]
(EE-ManagedThreadFactory-engineScheduled-Thread-68) [] EVENT_ID:
VDS_BROKER_COMMAND_FAILURE(10,802), VDSM OvirtNode command Get Host
Capabilities failed: Message timeout which can be caused by communication
issues
2018-12-14 10:45:28,996+05 ERROR
[org.ovirt.engine.core.vdsbroker.monitoring.HostMonitoring]
(EE-ManagedThreadFactory-engineScheduled-Thread-68) [] Unable to
RefreshCapabilities: VDSNetworkException: VDSGenericException:
VDSNetworkException: Message timeout which can be caused by communication
issues
https://www.ovirt.org/documentation/install-guide/chap-System_Requirements/
Will I need to open port between my Ovirt master and Ovirt node for Ovirt
node connection and establishment to be completed....???
Please help I am stuck here...
5 years, 11 months