[Pacemaker] VirtualDomain problem after reboot of one node

zorg zorg at probesys.com
Tue Jan 29 05:24:14 EST 2013


Hello,
I found someone having same problem in august but no solution where given

i have setup a 2-node cluster and configured  virtualdomain
resources using iscsi storage and libvirt.

when i reboot one node (shutdown -r now) everything seems
to be ok, virtual machines running on this node are migrated
away and continue to run on the other node.

but when the node comes up again i looks like it first checks
that none of the configured resources are running.

this checks fails VirtualDomain resources because libvirtd
is not running at this time, i can see a lot of errors like

Jan 29 11:03:58 node1 lrmd: [1807]: info: RA output:
(vm_VM_mysql1:probe:stderr) Failed to connect socket to
'/var/run/libvirt/libvirt-sock': No such file or directory

And the VM are stop one all node and restart but no migration occur

SO every time a node restart it restart of the VM of the cluster
It s not really what i want


here is my conf
node node1 \
     utilization cpu="32" hv_memory="132285200" \
     utilization io_usage="10000" \
     attributes standby="off"
node node2 \
     utilization cpu="32" hv_memory="132285200" \
     attributes standby="off" \
     utilization io_usage="10000"
primitive p_iscsi ocf:heartbeat:iscsi \
     params portal="192.168.2.2:3260" target="iqn.2013-01.grek.zorba" \
     op start interval="0" timeout="120" \
     op stop interval="0" timeout="120"
primitive p_libvirtd lsb:libvirt-bin \
     op monitor interval="120s" \
     op start interval="0" \
     op stop interval="0"
primitive p_open-iscsi lsb:open-iscsi \
     op start interval="0" timeout="20" \
     op stop interval="0" timeout="20" \
     op monitor interval="20"
primitive s_ipmi_node1 stonith:fence_ipmilan \
     op monitor interval="20s" \
     params action="off" pcmk_host_check="static-list" 
pcmk_host_list="node2" ipaddr="13.6.23.12" login="corosync" passwd="mdp" 
power_wait="4" lanplus="true" delay="1"
primitive s_ipmi_node2 stonith:fence_ipmilan \
     op monitor interval="20s" \
     params action="off" pcmk_host_check="static-list" 
pcmk_host_list="node1" ipaddr="13.6.23.10" login="corosync" passwd="mdp" 
power_wait="4" lanplus="true" delay="10"
primitive vm_VM_django1 ocf:heartbeat:VirtualDomain \
     params config="/etc/libvirt/qemu/VM_django1.xml" 
hypervisor="qemu:///system" migration_transport="ssh" \
     meta allow-migrate="true" target-role="Started" \
     op start interval="0" timeout="3min" \
     op stop interval="0" timeout="3min" \
     op migrate_from interval="0" timeout="5min" \
     op migrate_to interval="0" timeout="5min" \
     op monitor interval="60" timeout="30" \
     utilization cpu="2" hv_memory="8192" io_usage="10"
primitive vm_VM_mysql1 ocf:heartbeat:VirtualDomain \
     params config="/etc/libvirt/qemu/VM_mysql1.xml" 
hypervisor="qemu:///system" migration_transport="ssh" \
     meta allow-migrate="true" target-role="Started" \
     op start interval="0" timeout="3min" \
     op stop interval="0" timeout="3min" \
     op migrate_from interval="0" timeout="5min" \
     op migrate_to interval="0" timeout="5min" \
     op monitor interval="60" timeout="30" \
     utilization io_usage="10" cpu="1" hv_memory="2048"
primitive vm_VM_lamp1 ocf:heartbeat:VirtualDomain \
     params config="/etc/libvirt/qemu/VM_lamp1.xml" 
hypervisor="qemu:///system" migration_transport="ssh" \
     meta allow-migrate="true" target-role="Started" is-managed="true" \
     op start interval="0" timeout="3min" \
     op stop interval="0" timeout="3min" \
     op migrate_from interval="0" timeout="5min" \
     op migrate_to interval="0" timeout="5min" \
     op monitor interval="60" timeout="30" \
     utilization io_usage="50" cpu="6" hv_memory="13312"
primitive vm_VM_tomcat1 ocf:heartbeat:VirtualDomain \
     params config="/etc/libvirt/qemu/VM_tomcat1.xml" 
hypervisor="qemu:///system" migration_transport="ssh" \
     meta allow-migrate="true" target-role="Started" is-managed="false" \
     op start interval="0" timeout="3min" \
     op stop interval="0" timeout="3min" \
     op migrate_from interval="0" timeout="5min" \
     op migrate_to interval="0" timeout="5min" \
     op monitor interval="60" timeout="30" \
     utilization io_usage="10" cpu="4" hv_memory="10240"
group cluster_service p_open-iscsi p_iscsi p_libvirtd
clone cl_cluster_service cluster_service \
     meta target-role="Started"
location loc-prefer-stonith-1 s_ipmi_node1 \
     rule $id="loc-prefer-stonith-1-rule" inf: #uname eq node1
location loc-prefer-stonith-2 s_ipmi_node2 \
     rule $id="loc-prefer-stonith-2-rule" inf: #uname eq node2
order ord_cl_libvirtd_vm inf: cl_cluster_service vm_VM_django1
order ord_cl_libvirtd_vm_VM_mysql1 inf: cl_cluster_service vm_VM_mysql1
order ord_cl_libvirtd_vm_VM_lamp1 inf: cl_cluster_service vm_VM_lamp1
order ord_cl_libvirtd_vm_VM_tomcat1 inf: cl_cluster_service vm_VM_tomcat1
property $id="cib-bootstrap-options" \
     dc-version="1.1.7-ee0730e13d124c3d58f00016c3376a1de5323cff" \
     cluster-infrastructure="openais" \
     stonith-enabled="true" \
     expected-quorum-votes="2" \
     placement-strategy="balanced" \
     no-quorum-policy="ignore" \
     last-lrm-refresh="1359413335" \
     stonith-action="reboot"






More information about the Pacemaker mailing list