[Pacemaker] SLES11+HAE: Resources on a single node with two configured?
Aleksey Zholdak
aleksey at zholdak.com
Thu May 6 10:57:04 UTC 2010
Andrew,
> Which incidentally means I was right.
> The cluster wasn't even able to get to this stage before.
Yes, you are right.
> If you attach "cibadmin -Ql" I'll have a look.
To simplify the experiment, I reduced the config, here it is:
--8<-----------------------------------------------------------------------
<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="0"
admin_epoch="0" epoch="677" num_updates="10" cib-last-written="Thu May 6
13:42:10 2010" dc-uuid="sles2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair name="stonith-enabled"
id="cib-bootstrap-options-stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes"
name="expected-quorum-votes" value="2"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="1.0.6-c48e3360eb18c53fd68bb7e7dbe39279ccbc0354"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure"
name="cluster-infrastructure" value="openais"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="sles2" uname="sles2" type="normal"/>
<node id="sles1" uname="sles1" type="normal"/>
</nodes>
<resources>
<clone id="sbd-clone">
<meta_attributes id="sbd-clone-meta_attributes">
<nvpair id="sbd-clone-meta_attributes-target-role"
name="target-role" value="Started"/>
<nvpair id="sbd-clone-meta_attributes-ordered" name="ordered"
value="true"/>
</meta_attributes>
<primitive class="stonith" id="sbd_fense" type="external/sbd">
<instance_attributes id="sbd_fense-instance_attributes">
<nvpair id="sbd_fense-instance_attributes-sbd_device"
name="sbd_device" value="/dev/mapper/SBD"/>
</instance_attributes>
</primitive>
</clone>
<clone id="dlm-clone">
<meta_attributes id="dlm-clone-meta_attributes">
<nvpair id="dlm-clone-meta_attributes-target-role"
name="target-role" value="Started"/>
<nvpair id="dlm-clone-meta_attributes-globally-unique"
name="globally-unique" value="false"/>
<nvpair id="dlm-clone-meta_attributes-interleave"
name="interleave" value="true"/>
<nvpair id="dlm-clone-meta_attributes-ordered" name="ordered"
value="true"/>
</meta_attributes>
<primitive class="ocf" id="dlm" provider="pacemaker" type="controld">
<operations>
<op id="dlm-monitor-120s" interval="120s" name="monitor"/>
</operations>
</primitive>
</clone>
<clone id="clvm-clone">
<meta_attributes id="clvm-clone-meta_attributes">
<nvpair id="clvm-clone-meta_attributes-target-role"
name="target-role" value="Started"/>
<nvpair id="clvm-clone-meta_attributes-interleave"
name="interleave" value="true"/>
<nvpair id="clvm-clone-meta_attributes-ordered" name="ordered"
value="true"/>
</meta_attributes>
<primitive class="ocf" id="clvm" provider="lvm2" type="clvmd">
<instance_attributes id="clvm-instance_attributes">
<nvpair id="clvm-instance_attributes-daemon_timeout"
name="daemon_timeout" value="30"/>
</instance_attributes>
</primitive>
</clone>
<clone id="eCluster_vg0-clone">
<meta_attributes id="eCluster_vg0-clone-meta_attributes">
<nvpair id="eCluster_vg0-clone-meta_attributes-interleave"
name="interleave" value="true"/>
<nvpair id="eCluster_vg0-clone-meta_attributes-ordered"
name="ordered" value="true"/>
</meta_attributes>
<primitive class="ocf" id="eCluster_vg0" provider="heartbeat"
type="LVM">
<instance_attributes id="eCluster_vg0-instance_attributes">
<nvpair id="eCluster_vg0-instance_attributes-volgrpname"
name="volgrpname" value="eCluster_vg0"/>
</instance_attributes>
</primitive>
</clone>
<clone id="o2cb-clone">
<meta_attributes id="o2cb-clone-meta_attributes">
<nvpair id="o2cb-clone-meta_attributes-globally-unique"
name="globally-unique" value="false"/>
<nvpair id="o2cb-clone-meta_attributes-interleave"
name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="o2cb" provider="ocfs2" type="o2cb">
<operations>
<op id="o2cb-monitor-120s" interval="120s" name="monitor"/>
</operations>
</primitive>
</clone>
<clone id="fs-clone">
<meta_attributes id="fs-clone-meta_attributes">
<nvpair id="fs-clone-meta_attributes-interleave"
name="interleave" value="true"/>
<nvpair id="fs-clone-meta_attributes-ordered" name="ordered"
value="true"/>
</meta_attributes>
<primitive class="ocf" id="fs" provider="heartbeat" type="Filesystem">
<instance_attributes id="fs-instance_attributes">
<nvpair id="fs-instance_attributes-device" name="device"
value="/dev/eCluster_vg0/eCluster_lv0"/>
<nvpair id="fs-instance_attributes-directory" name="directory"
value="/eCluster"/>
<nvpair id="fs-instance_attributes-fstype" name="fstype"
value="ocfs2"/>
</instance_attributes>
<operations>
<op id="fs-monitor-120s" interval="120s" name="monitor"/>
</operations>
</primitive>
</clone>
<clone id="pingd-clone">
<meta_attributes id="pingd-clone-meta_attributes">
<nvpair id="pingd-clone-meta_attributes-globally-unique"
name="globally-unique" value="false"/>
<nvpair id="pingd-clone-meta_attributes-ordered" name="ordered"
value="true"/>
</meta_attributes>
<primitive class="ocf" id="pingd" provider="pacemaker" type="pingd">
<instance_attributes id="pingd-instance_attributes">
<nvpair id="pingd-instance_attributes-host_list"
name="host_list" value="172.16.3.250"/>
<nvpair id="pingd-instance_attributes-multiplier"
name="multiplier" value="100"/>
</instance_attributes>
<operations>
<op id="pingd-monitor-5s" interval="5s" name="monitor"
timeout="20s"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_colocation id="colo-clvm" rsc="clvm-clone" score="INFINITY"
with-rsc="dlm-clone"/>
<rsc_order first="dlm-clone" id="order-clvm" score="INFINITY"
then="clvm-clone"/>
<rsc_colocation id="colo-eCluster_vg0" rsc="eCluster_vg0-clone"
score="INFINITY" with-rsc="clvm-clone"/>
<rsc_order first="clvm-clone" id="order-eCluster_vg0"
score="INFINITY" then="eCluster_vg0-clone"/>
<rsc_colocation id="o2cb-with-eCluster_vg0" rsc="o2cb-clone"
score="INFINITY" with-rsc="eCluster_vg0-clone"/>
<rsc_order first="eCluster_vg0-clone" id="order-o2cb"
score="INFINITY" then="o2cb-clone"/>
<rsc_colocation id="fs-with-o2cb" rsc="fs-clone" score="INFINITY"
with-rsc="o2cb-clone"/>
<rsc_order first="o2cb-clone" id="order-fs" score="INFINITY"
then="fs-clone"/>
<rsc_colocation id="pingd-with-fs" rsc="pingd-clone"
score="INFINITY" with-rsc="fs-clone"/>
<rsc_order first="fs-clone" id="order-pingd" score="INFINITY"
then="pingd-clone"/>
</constraints>
<rsc_defaults>
<meta_attributes id="rsc-options">
<nvpair id="rsc-options-resource-stickiness"
name="resource-stickiness" value="100"/>
</meta_attributes>
</rsc_defaults>
<op_defaults/>
</configuration>
<status>
<node_state id="sles2" uname="sles2" ha="active" in_ccm="true"
crmd="online" join="member" expected="member"
crm-debug-origin="do_update_resource" shutdown="0">
<lrm id="sles2">
<lrm_resources>
<lrm_resource id="sbd_fense:0" type="external/sbd" class="stonith">
<lrm_rsc_op id="sbd_fense:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="4:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;4:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="2" rc-code="7" op-status="0" interval="0" last-run="1273143104"
last-rc-change="1273143104" exec-time="0" queue-time="0"
op-digest="34ebf5b573e6cc889ae2272d8870afc6"/>
<lrm_rsc_op id="sbd_fense:0_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="4:1:0:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:0;4:1:0:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="9" rc-code="0" op-status="0" interval="0" last-run="1273143104"
last-rc-change="1273143104" exec-time="1290" queue-time="0"
op-digest="34ebf5b573e6cc889ae2272d8870afc6"/>
</lrm_resource>
<lrm_resource id="clvm:0" type="clvmd" class="ocf" provider="lvm2">
<lrm_rsc_op id="clvm:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="6:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;6:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="4" rc-code="7" op-status="0" interval="0" last-run="1273143103"
last-rc-change="1273143103" exec-time="100" queue-time="0"
op-digest="21015abf7dd336e68f45ef73249ff9c6"/>
</lrm_resource>
<lrm_resource id="dlm:0" type="controld" class="ocf"
provider="pacemaker">
<lrm_rsc_op id="dlm:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;5:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="3" rc-code="7" op-status="0" interval="0" last-run="1273143103"
last-rc-change="1273143103" exec-time="100" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="eCluster_vg0:0" type="LVM" class="ocf"
provider="heartbeat">
<lrm_rsc_op id="eCluster_vg0:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="7:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;7:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="5" rc-code="7" op-status="0" interval="0" last-run="1273143103"
last-rc-change="1273143103" exec-time="180" queue-time="0"
op-digest="762069da1ca9a2eddc87dbdb6b97058b"/>
</lrm_resource>
<lrm_resource id="o2cb:0" type="o2cb" class="ocf" provider="ocfs2">
<lrm_rsc_op id="o2cb:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="8:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;8:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="6" rc-code="7" op-status="0" interval="0" last-run="1273143103"
last-rc-change="1273143103" exec-time="180" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="pingd:0" type="pingd" class="ocf"
provider="pacemaker">
<lrm_rsc_op id="pingd:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="10:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;10:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="8" rc-code="7" op-status="0" interval="0" last-run="1273143104"
last-rc-change="1273143104" exec-time="20" queue-time="1000"
op-digest="593184cf20c7d5b634c076a73404a4ee"/>
</lrm_resource>
<lrm_resource id="fs:0" type="Filesystem" class="ocf"
provider="heartbeat">
<lrm_rsc_op id="fs:0_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="9:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
transition-magic="0:7;9:0:7:b0903214-6f1a-4e07-b841-846d8c20312b"
call-id="7" rc-code="7" op-status="0" interval="0" last-run="1273143104"
last-rc-change="1273143104" exec-time="60" queue-time="1000"
op-digest="1b53d7711bab18d2e94000d0cbf5212b"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="sles2">
<instance_attributes id="status-sles2">
<nvpair id="status-sles2-probe_complete" name="probe_complete"
value="true"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
--8<-----------------------------------------------------------------------
And here is the log after running one of the nodes.
--8<-----------------------------------------------------------------------
May 6 13:50:39 sles2 openais[6041]: [MAIN ] AIS Executive Service RELEASE
'subrev 1152 version 0.80'
May 6 13:50:39 sles2 openais[6041]: [MAIN ] Copyright (C) 2002-2006
MontaVista Software, Inc and contributors.
May 6 13:50:39 sles2 openais[6041]: [MAIN ] Copyright (C) 2006 Red Hat, Inc.
May 6 13:50:39 sles2 openais[6041]: [MAIN ] AIS Executive Service: started
and ready to provide service.
May 6 13:50:39 sles2 openais[6041]: [TOTEM] Token Timeout (5000 ms)
retransmit timeout (490 ms)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] token hold (382 ms)
retransmits before loss (10 retrans)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] join (1000 ms) send_join (45
ms) consensus (2500 ms) merge (200 ms)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] downcheck (1000 ms) fail to
recv const (50 msgs)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] seqno unchanged const (30
rotations) Maximum network MTU 1500
May 6 13:50:39 sles2 openais[6041]: [TOTEM] window size per rotation (50
messages) maximum messages per rotation (20 messages)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] send threads (0 threads)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] RRP token expired timeout (490 ms)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] RRP token problem counter
(2000 ms)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] RRP threshold (10 problem count)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] RRP mode set to none.
May 6 13:50:39 sles2 openais[6041]: [TOTEM] heartbeat_failures_allowed (0)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] max_network_delay (50 ms)
May 6 13:50:39 sles2 openais[6041]: [TOTEM] HeartBeat is Disabled. To
enable set heartbeat_failures_allowed > 0
May 6 13:50:39 sles2 openais[6041]: [TOTEM] Receive multicast socket recv
buffer size (262142 bytes).
May 6 13:50:39 sles2 openais[6041]: [TOTEM] Transmit multicast socket send
buffer size (262142 bytes).
May 6 13:50:39 sles2 openais[6041]: [TOTEM] The network interface
[192.168.111.2] is now up.
May 6 13:50:39 sles2 openais[6041]: [TOTEM] Created or loaded sequence id
3276.192.168.111.2 for this ring.
May 6 13:50:39 sles2 openais[6041]: [TOTEM] entering GATHER state from 15.
May 6 13:50:39 sles2 openais[6041]: [crm ] info: process_ais_conf:
Reading configure
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: config_find_next:
Processing additional logging options...
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt: Found
'off' for option: debug
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt: Found
'no' for option: to_file
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt: Found
'local7' for option: syslog_facility
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: config_find_next:
Processing additional service options...
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt:
Defaulting to 'pcmk' for option: clustername
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt: Found
'yes' for option: use_logd
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: get_config_opt: Found
'yes' for option: use_mgmtd
May 6 13:50:39 sles2 openais[6041]: [crm ] info: pcmk_startup: CRM:
Initialized
May 6 13:50:39 sles2 openais[6041]: [crm ] Logging: Initialized pcmk_startup
May 6 13:50:39 sles2 openais[6041]: [crm ] info: pcmk_startup: Maximum
core file size is: 18446744073709551615
May 6 13:50:39 sles2 openais[6041]: [crm ] info: pcmk_startup: Service: 9
May 6 13:50:39 sles2 openais[6041]: [crm ] info: pcmk_startup: Local
hostname: sles2
May 6 13:50:39 sles2 openais[6041]: [crm ] info: pcmk_update_nodeid:
Local node id: 2
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: update_member: Creating
entry for node 2 born on 0
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: update_member: 0x740430
Node 2 now known as sles2 (was: (null))
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: update_member: Node
sles2 now has 1 quorum votes (was 0)
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: update_member: Node
2/sles2 is now: member
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6213 for process stonithd
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6214 for process cib
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6215 for process lrmd
May 6 13:50:39 sles2 lrmd: [6215]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 mgmtd: [6221]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 cib: [6214]: WARN: Initializing connection to logging
daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 lrmd: [6215]: info: G_main_add_SignalHandler: Added
signal handler for signal 15
May 6 13:50:39 sles2 mgmtd: [6221]: info: Pacemaker-mgmt Hg Version:
a4d84893fb6e4abdfc2320eb2c6d44f71e7447c9
May 6 13:50:39 sles2 stonithd: [6213]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 cib: [6214]: info: Invoked: /usr/lib64/heartbeat/cib
May 6 13:50:39 sles2 stonithd: [6213]: info: G_main_add_SignalHandler:
Added signal handler for signal 10
May 6 13:50:39 sles2 attrd: [6216]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 pengine: [6217]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 crmd: [6218]: WARN: Initializing connection to
logging daemon failed. Logging daemon may not be running
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6216 for process attrd
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6217 for process pengine
May 6 13:50:39 sles2 stonithd: [6213]: info: G_main_add_SignalHandler:
Added signal handler for signal 12
May 6 13:50:39 sles2 mgmtd: [6221]: info: G_main_add_SignalHandler: Added
signal handler for signal 15
May 6 13:50:39 sles2 attrd: [6216]: info: Invoked: /usr/lib64/heartbeat/attrd
May 6 13:50:39 sles2 pengine: [6217]: info: Invoked:
/usr/lib64/heartbeat/pengine
May 6 13:50:39 sles2 mgmtd: [6221]: debug: Enabling coredumps
May 6 13:50:39 sles2 cib: [6214]: info: G_main_add_TriggerHandler: Added
signal manual handler
May 6 13:50:39 sles2 crmd: [6218]: info: Invoked: /usr/lib64/heartbeat/crmd
May 6 13:50:40 sles2 crmd: [6218]: info: main: CRM Hg Version:
c48e3360eb18c53fd68bb7e7dbe39279ccbc0354
May 6 13:50:39 sles2 stonithd: [6213]: info: Stack hogger failed 0xffffffff
May 6 13:50:39 sles2 attrd: [6216]: info: main: Starting up
May 6 13:50:39 sles2 mgmtd: [6221]: info: G_main_add_SignalHandler: Added
signal handler for signal 10
May 6 13:50:39 sles2 pengine: [6217]: info: main: Starting pengine
May 6 13:50:39 sles2 lrmd: [6215]: info: G_main_add_SignalHandler: Added
signal handler for signal 17
May 6 13:50:40 sles2 cib: [6214]: info: G_main_add_SignalHandler: Added
signal handler for signal 17
May 6 13:50:40 sles2 mgmtd: [6221]: info: G_main_add_SignalHandler: Added
signal handler for signal 12
May 6 13:50:40 sles2 attrd: [6216]: info: crm_cluster_connect: Connecting
to OpenAIS
May 6 13:50:40 sles2 crmd: [6218]: info: crmd_init: Starting crmd
May 6 13:50:40 sles2 attrd: [6216]: info: init_ais_connection: Creating
connection to our AIS plugin
May 6 13:50:40 sles2 cib: [6214]: info: retrieveCib: Reading cluster
configuration from: /var/lib/heartbeat/crm/cib.xml (digest:
/var/lib/heartbeat/crm/cib.xml.sig)
May 6 13:50:40 sles2 lrmd: [6215]: info: G_main_add_SignalHandler: Added
signal handler for signal 10
May 6 13:50:40 sles2 lrmd: [6215]: info: G_main_add_SignalHandler: Added
signal handler for signal 12
May 6 13:50:40 sles2 attrd: [6216]: info: init_ais_connection: AIS
connection established
May 6 13:50:40 sles2 lrmd: [6215]: info: Started.
May 6 13:50:40 sles2 crmd: [6218]: info: G_main_add_SignalHandler: Added
signal handler for signal 17
May 6 13:50:40 sles2 stonithd: [6213]: info: crm_cluster_connect:
Connecting to OpenAIS
May 6 13:50:40 sles2 stonithd: [6213]: info: init_ais_connection: Creating
connection to our AIS plugin
May 6 13:50:40 sles2 mgmtd: [6221]: info: init_crm: live
May 6 13:50:40 sles2 mgmtd: [6221]: info: login to cib live: 0, ret:-10
May 6 13:50:39 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6218 for process crmd
May 6 13:50:40 sles2 openais[6041]: [MAIN ] info: spawn_child: Forked
child 6221 for process mgmtd
May 6 13:50:40 sles2 openais[6041]: [MAIN ] Service initialized 'Pacemaker
Cluster Manager 1.0.6'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
extended virtual synchrony service'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
cluster membership service B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
availability management framework B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
checkpoint service B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
event service B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
distributed locking service B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
message service B.01.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
configuration service'
May 6 13:50:40 sles2 attrd: [6216]: info: get_ais_nodeid: Server details:
id=2 uname=sles2 cname=pcmk
May 6 13:50:40 sles2 attrd: [6216]: info: crm_new_peer: Node sles2 now has
id: 2
May 6 13:50:40 sles2 attrd: [6216]: info: crm_new_peer: Node 2 is now
known as sles2
May 6 13:50:40 sles2 stonithd: [6213]: info: init_ais_connection: AIS
connection established
May 6 13:50:40 sles2 attrd: [6216]: info: main: Cluster connection active
May 6 13:50:40 sles2 attrd: [6216]: info: main: Accepting attribute updates
May 6 13:50:40 sles2 attrd: [6216]: info: main: Starting mainloop...
May 6 13:50:40 sles2 stonithd: [6213]: info: get_ais_nodeid: Server
details: id=2 uname=sles2 cname=pcmk
May 6 13:50:40 sles2 stonithd: [6213]: info: crm_new_peer: Node sles2 now
has id: 2
May 6 13:50:40 sles2 stonithd: [6213]: info: crm_new_peer: Node 2 is now
known as sles2
May 6 13:50:40 sles2 stonithd: [6213]: notice:
/usr/lib64/heartbeat/stonithd start up successfully.
May 6 13:50:40 sles2 stonithd: [6213]: info: G_main_add_SignalHandler:
Added signal handler for signal 17
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
cluster closed process group service v1.01'
May 6 13:50:40 sles2 openais[6041]: [SERV ] Service initialized 'openais
cluster config database access v1.01'
May 6 13:50:40 sles2 openais[6041]: [SYNC ] Not using a virtual synchrony
filter.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] Creating commit token because
I am the rep.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] Saving state aru 0 high seq
received 0
May 6 13:50:40 sles2 openais[6041]: [TOTEM] Storing new sequence id for
ring cd0
May 6 13:50:40 sles2 openais[6041]: [TOTEM] entering COMMIT state.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] entering RECOVERY state.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] position [0] member 192.168.111.2:
May 6 13:50:40 sles2 openais[6041]: [TOTEM] previous ring seq 3276 rep
192.168.111.2
May 6 13:50:40 sles2 openais[6041]: [TOTEM] aru 0 high delivered 0
received flag 1
May 6 13:50:40 sles2 openais[6041]: [TOTEM] Did not need to originate any
messages in recovery.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] Sending initial ORF token
May 6 13:50:40 sles2 openais[6041]: [CLM ] CLM CONFIGURATION CHANGE
May 6 13:50:40 sles2 openais[6041]: [CLM ] New Configuration:
May 6 13:50:40 sles2 openais[6041]: [CLM ] Members Left:
May 6 13:50:40 sles2 openais[6041]: [CLM ] Members Joined:
May 6 13:50:40 sles2 openais[6041]: [crm ] notice: pcmk_peer_update:
Transitional membership event on ring 3280: memb=0, new=0, lost=0
May 6 13:50:40 sles2 openais[6041]: [CLM ] CLM CONFIGURATION CHANGE
May 6 13:50:40 sles2 openais[6041]: [CLM ] New Configuration:
May 6 13:50:40 sles2 openais[6041]: [CLM ] r(0) ip(192.168.111.2)
May 6 13:50:40 sles2 openais[6041]: [CLM ] Members Left:
May 6 13:50:40 sles2 openais[6041]: [CLM ] Members Joined:
May 6 13:50:40 sles2 openais[6041]: [CLM ] r(0) ip(192.168.111.2)
May 6 13:50:40 sles2 openais[6041]: [crm ] notice: pcmk_peer_update:
Stable membership event on ring 3280: memb=1, new=1, lost=0
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_peer_update: NEW:
sles2 2
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_peer_update: MEMB:
sles2 2
May 6 13:50:40 sles2 openais[6041]: [MAIN ] info: update_member: Node
sles2 now has process list: 00000000000000000000000000053312 (340754)
May 6 13:50:40 sles2 openais[6041]: [SYNC ] This node is within the
primary component and will provide service.
May 6 13:50:40 sles2 openais[6041]: [TOTEM] entering OPERATIONAL state.
May 6 13:50:40 sles2 openais[6041]: [CLM ] got nodejoin message 192.168.111.2
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_ipc: Recorded
connection 0x778cc0 for attrd/6216
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_ipc: Recorded
connection 0x779260 for stonithd/6213
May 6 13:50:40 sles2 cib: [6214]: info: startCib: CIB Initialization
completed successfully
May 6 13:50:40 sles2 cib: [6214]: info: crm_cluster_connect: Connecting to
OpenAIS
May 6 13:50:40 sles2 cib: [6214]: info: init_ais_connection: Creating
connection to our AIS plugin
May 6 13:50:40 sles2 cib: [6214]: info: init_ais_connection: AIS
connection established
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_ipc: Recorded
connection 0x778bb0 for cib/6214
May 6 13:50:40 sles2 openais[6041]: [crm ] info: pcmk_ipc: Sending
membership update 3280 to cib
May 6 13:50:40 sles2 cib: [6214]: info: get_ais_nodeid: Server details:
id=2 uname=sles2 cname=pcmk
May 6 13:50:40 sles2 cib: [6214]: info: crm_new_peer: Node sles2 now has id: 2
May 6 13:50:40 sles2 cib: [6214]: info: crm_new_peer: Node 2 is now known
as sles2
May 6 13:50:40 sles2 cib: [6214]: info: cib_init: Starting cib mainloop
May 6 13:50:40 sles2 cib: [6214]: info: ais_dispatch: Membership 3280:
quorum still lost
May 6 13:50:40 sles2 cib: [6214]: info: crm_update_peer: Node sles2: id=2
state=member (new) addr=r(0) ip(192.168.111.2) (new) votes=1 (new) born=0
seen=3280 proc=00000000000000000000000000053312 (new)
May 6 13:50:40 sles2 cib: [6722]: info: write_cib_contents: Archived
previous version as /var/lib/heartbeat/crm/cib-40.raw
May 6 13:50:40 sles2 cib: [6722]: info: write_cib_contents: Wrote version
0.676.0 of the CIB to disk (digest: 4c4508f4bd829ea01a67989008378659)
May 6 13:50:40 sles2 cib: [6722]: info: retrieveCib: Reading cluster
configuration from: /var/lib/heartbeat/crm/cib.Egvf4M (digest:
/var/lib/heartbeat/crm/cib.C4FTNE)
May 6 13:50:41 sles2 crmd: [6218]: info: do_cib_control: CIB connection
established
May 6 13:50:41 sles2 crmd: [6218]: info: crm_cluster_connect: Connecting
to OpenAIS
May 6 13:50:41 sles2 crmd: [6218]: info: init_ais_connection: Creating
connection to our AIS plugin
May 6 13:50:41 sles2 crmd: [6218]: info: init_ais_connection: AIS
connection established
May 6 13:50:41 sles2 openais[6041]: [crm ] info: pcmk_ipc: Recorded
connection 0x778dc0 for crmd/6218
May 6 13:50:41 sles2 openais[6041]: [crm ] info: pcmk_ipc: Sending
membership update 3280 to crmd
May 6 13:50:41 sles2 crmd: [6218]: info: get_ais_nodeid: Server details:
id=2 uname=sles2 cname=pcmk
May 6 13:50:41 sles2 crmd: [6218]: info: crm_new_peer: Node sles2 now has
id: 2
May 6 13:50:41 sles2 crmd: [6218]: info: crm_new_peer: Node 2 is now known
as sles2
May 6 13:50:41 sles2 crmd: [6218]: info: do_ha_control: Connected to the
cluster
May 6 13:50:41 sles2 crmd: [6218]: info: do_started: Delaying start, CCM
(0000000000100000) not connected
May 6 13:50:41 sles2 crmd: [6218]: info: crmd_init: Starting crmd's mainloop
May 6 13:50:41 sles2 crmd: [6218]: info: config_query_callback: Checking
for expired actions every 900000ms
May 6 13:50:41 sles2 openais[6041]: [crm ] info: update_expected_votes:
Expected quorum votes 1024 -> 2
May 6 13:50:41 sles2 crmd: [6218]: info: ais_dispatch: Membership 3280:
quorum still lost
May 6 13:50:41 sles2 crmd: [6218]: info: crm_update_peer: Node sles2: id=2
state=member (new) addr=r(0) ip(192.168.111.2) (new) votes=1 (new) born=0
seen=3280 proc=00000000000000000000000000053312 (new)
May 6 13:50:41 sles2 crmd: [6218]: info: do_started: The local CRM is
operational
May 6 13:50:41 sles2 crmd: [6218]: info: do_state_transition: State
transition S_STARTING -> S_PENDING [ input=I_PENDING cause=C_FSA_INTERNAL
origin=do_started ]
May 6 13:50:42 sles2 mgmtd: [6221]: debug: main: run the loop...
May 6 13:50:42 sles2 mgmtd: [6221]: info: Started.
May 6 13:50:42 sles2 crmd: [6218]: info: ais_dispatch: Membership 3280:
quorum still lost
May 6 13:50:45 sles2 attrd: [6216]: info: cib_connect: Connected to the
CIB after 1 signon attempts
May 6 13:50:45 sles2 attrd: [6216]: info: cib_connect: Sending full refresh
May 6 13:51:42 sles2 crmd: [6218]: info: crm_timer_popped: Election
Trigger (I_DC_TIMEOUT) just popped!
May 6 13:51:42 sles2 crmd: [6218]: WARN: do_log: FSA: Input I_DC_TIMEOUT
from crm_timer_popped() received in state S_PENDING
May 6 13:51:42 sles2 crmd: [6218]: info: do_state_transition: State
transition S_PENDING -> S_ELECTION [ input=I_DC_TIMEOUT
cause=C_TIMER_POPPED origin=crm_timer_popped ]
May 6 13:51:42 sles2 crmd: [6218]: info: do_state_transition: State
transition S_ELECTION -> S_INTEGRATION [ input=I_ELECTION_DC
cause=C_FSA_INTERNAL origin=do_election_check ]
May 6 13:51:42 sles2 crmd: [6218]: info: do_te_control: Registering TE
UUID: b0903214-6f1a-4e07-b841-846d8c20312b
May 6 13:51:42 sles2 crmd: [6218]: WARN: cib_client_add_notify_callback:
Callback already present
May 6 13:51:42 sles2 crmd: [6218]: info: set_graph_functions: Setting
custom graph functions
May 6 13:51:42 sles2 crmd: [6218]: info: unpack_graph: Unpacked transition
-1: 0 actions in 0 synapses
May 6 13:51:42 sles2 crmd: [6218]: info: do_dc_takeover: Taking over DC
status for this partition
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_readwrite: We are now
in R/W mode
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_master for section 'all' (origin=local/crmd/6,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section cib (origin=local/crmd/7,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section crm_config (origin=local/crmd/9,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 crmd: [6218]: info: join_make_offer: Making join
offers based on membership 3280
May 6 13:51:42 sles2 crmd: [6218]: info: do_dc_join_offer_all: join-1:
Waiting on 1 outstanding join acks
May 6 13:51:42 sles2 crmd: [6218]: info: ais_dispatch: Membership 3280:
quorum still lost
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section crm_config (origin=local/crmd/11,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 crmd: [6218]: info: config_query_callback: Checking
for expired actions every 900000ms
May 6 13:51:42 sles2 crmd: [6218]: info: update_dc: Set DC to sles2 (3.0.1)
May 6 13:51:42 sles2 crmd: [6218]: info: ais_dispatch: Membership 3280:
quorum still lost
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section crm_config (origin=local/crmd/14,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 crmd: [6218]: info: do_state_transition: State
transition S_INTEGRATION -> S_FINALIZE_JOIN [ input=I_INTEGRATED
cause=C_FSA_INTERNAL origin=check_join_state ]
May 6 13:51:42 sles2 crmd: [6218]: info: do_state_transition: All 1
cluster nodes responded to the join offer.
May 6 13:51:42 sles2 crmd: [6218]: info: do_dc_join_finalize: join-1:
Syncing the CIB from sles2 to the rest of the cluster
May 6 13:51:42 sles2 crmd: [6218]: info: te_connect_stonith: Attempting
connection to fencing daemon...
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section crm_config (origin=local/crmd/17,
version=0.676.0): ok (rc=0)
May 6 13:51:42 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_sync for section 'all' (origin=local/crmd/18,
version=0.676.0): ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: te_connect_stonith: Connected
May 6 13:51:43 sles2 crmd: [6218]: info: update_attrd: Connecting to attrd...
May 6 13:51:43 sles2 attrd: [6216]: info: find_hash_entry: Creating hash
entry for terminate
May 6 13:51:43 sles2 attrd: [6216]: info: find_hash_entry: Creating hash
entry for shutdown
May 6 13:51:43 sles2 crmd: [6218]: info: do_dc_join_ack: join-1: Updating
node state to member for sles2
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section nodes (origin=local/crmd/19,
version=0.676.0): ok (rc=0)
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_delete for section
//node_state[@uname='sles2']/transient_attributes (origin=local/crmd/20,
version=0.676.0): ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: erase_xpath_callback: Deletion of
"//node_state[@uname='sles2']/transient_attributes": ok (rc=0)
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_delete for section //node_state[@uname='sles2']/lrm
(origin=local/crmd/21, version=0.676.0): ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: erase_xpath_callback: Deletion of
"//node_state[@uname='sles2']/lrm": ok (rc=0)
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_delete for section //node_state[@uname='sles2']/lrm
(origin=local/crmd/22, version=0.676.0): ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: erase_xpath_callback: Deletion of
"//node_state[@uname='sles2']/lrm": ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: do_state_transition: State
transition S_FINALIZE_JOIN -> S_POLICY_ENGINE [ input=I_FINALIZED
cause=C_FSA_INTERNAL origin=check_join_state ]
May 6 13:51:43 sles2 crmd: [6218]: info: do_state_transition: All 1
cluster nodes are eligible to run resources.
May 6 13:51:43 sles2 crmd: [6218]: info: do_dc_join_final: Ensuring DC,
quorum and node attributes are up-to-date
May 6 13:51:43 sles2 crmd: [6218]: info: crm_update_quorum: Updating
quorum status to false (call=26)
May 6 13:51:43 sles2 attrd: [6216]: info: attrd_local_callback: Sending
full refresh (origin=crmd)
May 6 13:51:43 sles2 crmd: [6218]: info: abort_transition_graph:
do_te_invoke:191 - Triggered transition abort (complete=1) : Peer Cancelled
May 6 13:51:43 sles2 attrd: [6216]: info: attrd_trigger_update: Sending
flush op to all hosts for: terminate (<null>)
May 6 13:51:43 sles2 crmd: [6218]: info: do_pe_invoke: Query 27:
Requesting the current CIB: S_POLICY_ENGINE
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section nodes (origin=local/crmd/24,
version=0.676.1): ok (rc=0)
May 6 13:51:43 sles2 cib: [6214]: info: log_data_element: cib:diff: - <cib
admin_epoch="0" epoch="676" num_updates="1" />
May 6 13:51:43 sles2 cib: [6214]: info: log_data_element: cib:diff: + <cib
dc-uuid="sles2" admin_epoch="0" epoch="677" num_updates="1" />
May 6 13:51:43 sles2 cib: [6214]: info: cib_process_request: Operation
complete: op cib_modify for section cib (origin=local/crmd/26,
version=0.677.1): ok (rc=0)
May 6 13:51:43 sles2 crmd: [6218]: info: abort_transition_graph:
need_abort:59 - Triggered transition abort (complete=1) : Non-status change
May 6 13:51:43 sles2 crmd: [6218]: info: need_abort: Aborting on change to
admin_epoch
May 6 13:51:43 sles2 crmd: [6218]: info: do_pe_invoke: Query 28:
Requesting the current CIB: S_POLICY_ENGINE
May 6 13:51:43 sles2 attrd: [6216]: info: attrd_trigger_update: Sending
flush op to all hosts for: shutdown (<null>)
May 6 13:51:43 sles2 crmd: [6218]: info: do_pe_invoke_callback: Invoking
the PE: ref=pe_calc-dc-1273143103-7, seq=3280, quorate=0
May 6 13:51:43 sles2 pengine: [6217]: notice: unpack_config: On loss of
CCM Quorum: Ignore
May 6 13:51:43 sles2 pengine: [6217]: info: unpack_config: Node scores:
'red' = -INFINITY, 'yellow' = 0, 'green' = 0
May 6 13:51:43 sles2 pengine: [6217]: info: determine_online_status: Node
sles2 is online
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
sbd-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
sbd_fense:0 sbd_fense:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
dlm-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
dlm:0 dlm:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
clvm-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
clvm:0 clvm:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
eCluster_vg0-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
eCluster_vg0:0 eCluster_vg0:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
o2cb-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
o2cb:0 o2cb:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
fs-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
fs:0 fs:1 ]
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_print: Clone Set:
pingd-clone
May 6 13:51:43 sles2 pengine: [6217]: notice: short_print: Stopped: [
pingd:0 pingd:1 ]
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource
sbd_fense:1 cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from clvm-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from o2cb-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from fs-clone
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource dlm:1
cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from o2cb-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from fs-clone
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource clvm:1
cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from o2cb-clone
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from fs-clone
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource
eCluster_vg0:1 cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: info: native_merge_weights:
o2cb-clone: Rolling back scores from fs-clone
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource o2cb:1
cannot run anywhere
May 6 13:51:43 sles2 cib: [8689]: info: write_cib_contents: Archived
previous version as /var/lib/heartbeat/crm/cib-41.raw
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource fs:1
cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: WARN: native_color: Resource pingd:1
cannot run anywhere
May 6 13:51:43 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for dlm:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for o2cb:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for fs:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (5s) for pingd:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: WARN: stage6: Scheduling Node sles1
for STONITH
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering dlm:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering clvm:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 cib: [8689]: info: write_cib_contents: Wrote version
0.677.0 of the CIB to disk (digest: 7b4b41fdd8a1bd8c754c94e51fca76a2)
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering eCluster_vg0:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering o2cb:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering fs:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 pengine: [6217]: info: native_start_constraints:
Ordering pingd:0_start_0 after sles1 recovery
May 6 13:51:43 sles2 pengine: [6217]: info: find_compatible_child:
Colocating clvm:0 with dlm:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving dlm:0 with clvm:0
May 6 13:51:43 sles2 pengine: [6217]: info: find_compatible_child:
Colocating eCluster_vg0:0 with clvm:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving clvm:0 with eCluster_vg0:0
May 6 13:51:43 sles2 pengine: [6217]: info: find_compatible_child:
Colocating o2cb:0 with eCluster_vg0:0 on sles2
May 6 13:51:43 sles2 cib: [8689]: info: retrieveCib: Reading cluster
configuration from: /var/lib/heartbeat/crm/cib.IPoeDz (digest:
/var/lib/heartbeat/crm/cib.FG58F6)
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving eCluster_vg0:0 with o2cb:0
May 6 13:51:43 sles2 pengine: [6217]: info: find_compatible_child:
Colocating fs:0 with o2cb:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving o2cb:0 with fs:0
May 6 13:51:43 sles2 pengine: [6217]: info: find_compatible_child:
Colocating pingd:0 with fs:0 on sles2
May 6 13:51:43 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving fs:0 with pingd:0
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start
sbd_fense:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
sbd_fense:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start dlm:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
dlm:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start clvm:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
clvm:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start
eCluster_vg0:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
eCluster_vg0:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start o2cb:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
o2cb:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start fs:0 (sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
fs:1 (Stopped)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Start pingd:0
(sles2)
May 6 13:51:43 sles2 pengine: [6217]: notice: LogActions: Leave resource
pingd:1 (Stopped)
May 6 13:51:44 sles2 crmd: [6218]: info: do_state_transition: State
transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS
cause=C_IPC_MESSAGE origin=handle_response ]
May 6 13:51:44 sles2 crmd: [6218]: info: unpack_graph: Unpacked transition
0: 49 actions in 49 synapses
May 6 13:51:44 sles2 crmd: [6218]: info: do_te_invoke: Processing graph 0
(ref=pe_calc-dc-1273143103-7) derived from /var/lib/pengine/pe-warn-1877.bz2
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
4: monitor sbd_fense:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 lrmd: [6215]: notice: lrmd_rsc_new(): No
lrm_rprovider field in message
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=4:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=sbd_fense:0_monitor_0 )
May 6 13:51:44 sles2 lrmd: [6215]: info: rsc:sbd_fense:0:2: monitor
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
5: monitor dlm:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=5:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=dlm:0_monitor_0 )
May 6 13:51:44 sles2 lrmd: [6215]: info: rsc:dlm:0:3: monitor
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
6: monitor clvm:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=6:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=clvm:0_monitor_0 )
May 6 13:51:44 sles2 lrmd: [6215]: info: rsc:clvm:0:4: monitor
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
7: monitor eCluster_vg0:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=7:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=eCluster_vg0:0_monitor_0 )
May 6 13:51:44 sles2 lrmd: [6215]: info: rsc:eCluster_vg0:0:5: monitor
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
8: monitor o2cb:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=8:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=o2cb:0_monitor_0 )
May 6 13:51:44 sles2 lrmd: [6215]: info: rsc:o2cb:0:6: monitor
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
9: monitor fs:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=9:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=fs:0_monitor_0 )
May 6 13:51:44 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
10: monitor pingd:0_monitor_0 on sles2 (local)
May 6 13:51:44 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=10:0:7:b0903214-6f1a-4e07-b841-846d8c20312b op=pingd:0_monitor_0 )
May 6 13:51:44 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
sbd_fense:0_monitor_0 (call=2, rc=7, cib-update=29, confirmed=true) not running
May 6 13:51:44 sles2 crmd: [6218]: info: match_graph_event: Action
sbd_fense:0_monitor_0 (4) confirmed on sles2 (rc=0)
May 6 13:51:44 sles2 lrmd: [6215]: info: RA output: (dlm:0:monitor:stderr)
dlm_controld.pcmk: no process killed
May 6 13:51:44 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
clvm:0_monitor_0 (call=4, rc=7, cib-update=30, confirmed=true) not running
May 6 13:51:44 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
dlm:0_monitor_0 (call=3, rc=7, cib-update=31, confirmed=true) not running
May 6 13:51:44 sles2 crmd: [6218]: info: match_graph_event: Action
clvm:0_monitor_0 (6) confirmed on sles2 (rc=0)
May 6 13:51:44 sles2 crmd: [6218]: info: match_graph_event: Action
dlm:0_monitor_0 (5) confirmed on sles2 (rc=0)
May 6 13:51:44 sles2 pengine: [6217]: WARN: process_pe_message: Transition
0: WARNINGs found during PE processing. PEngine Input stored in:
/var/lib/pengine/pe-warn-1877.bz2
May 6 13:51:44 sles2 pengine: [6217]: info: process_pe_message:
Configuration WARNINGs found during PE processing. Please run "crm_verify
-L" to identify issues.
May 6 13:51:44 sles2 lrmd: [6215]: info: RA output:
(eCluster_vg0:0:monitor:stderr) logd is not running
May 6 13:51:44 sles2 lrmd: [6215]: info: RA output:
(o2cb:0:monitor:stderr) logd is not running
May 6 13:51:44 sles2 o2cb[8695]: INFO: configfs not laoded
May 6 13:51:44 sles2 LVM[8694]: INFO: LVM Volume eCluster_vg0 is offline
May 6 13:51:44 sles2 lrmd: [6215]: info: RA output:
(o2cb:0:monitor:stderr) 2010/05/06_13:51:44 INFO: configfs not laoded
May 6 13:51:44 sles2 lrmd: [6215]: info: RA output:
(eCluster_vg0:0:monitor:stderr) 2010/05/06_13:51:44 INFO: LVM Volume
eCluster_vg0 is offline
May 6 13:51:44 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
eCluster_vg0:0_monitor_0 (call=5, rc=7, cib-update=32, confirmed=true) not
running
May 6 13:51:44 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
o2cb:0_monitor_0 (call=6, rc=7, cib-update=33, confirmed=true) not running
May 6 13:51:44 sles2 crmd: [6218]: info: match_graph_event: Action
eCluster_vg0:0_monitor_0 (7) confirmed on sles2 (rc=0)
May 6 13:51:44 sles2 crmd: [6218]: info: match_graph_event: Action
o2cb:0_monitor_0 (8) confirmed on sles2 (rc=0)
May 6 13:51:45 sles2 lrmd: [6215]: info: rsc:fs:0:7: monitor
May 6 13:51:45 sles2 lrmd: [6215]: info: rsc:pingd:0:8: monitor
May 6 13:51:45 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
pingd:0_monitor_0 (call=8, rc=7, cib-update=34, confirmed=true) not running
May 6 13:51:45 sles2 crmd: [6218]: info: match_graph_event: Action
pingd:0_monitor_0 (10) confirmed on sles2 (rc=0)
May 6 13:51:45 sles2 lrmd: [6215]: info: RA output: (fs:0:monitor:stderr)
logd is not running
May 6 13:51:45 sles2 Filesystem[8732]: WARNING: Couldn't find device
[/dev/eCluster_vg0/eCluster_lv0]. Expected /dev/??? to exist
May 6 13:51:45 sles2 lrmd: [6215]: info: RA output: (fs:0:monitor:stderr)
2010/05/06_13:51:45 WARNING: Couldn't find device
[/dev/eCluster_vg0/eCluster_lv0]. Expected /dev/??? to exist
May 6 13:51:45 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
fs:0_monitor_0 (call=7, rc=7, cib-update=35, confirmed=true) not running
May 6 13:51:45 sles2 crmd: [6218]: info: match_graph_event: Action
fs:0_monitor_0 (9) confirmed on sles2 (rc=0)
May 6 13:51:45 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
3: probe_complete probe_complete on sles2 (local) - no waiting
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action 2
fired and confirmed
May 6 13:51:45 sles2 attrd: [6216]: info: find_hash_entry: Creating hash
entry for probe_complete
May 6 13:51:45 sles2 attrd: [6216]: info: attrd_trigger_update: Sending
flush op to all hosts for: probe_complete (true)
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
14 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
30 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
31 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
48 fired and confirmed
May 6 13:51:45 sles2 attrd: [6216]: info: attrd_perform_update: Sent
update 8: probe_complete=true
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
49 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
12 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
25 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
26 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
42 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
43 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: abort_transition_graph:
te_update_diff:146 - Triggered transition abort (complete=0,
tag=transient_attributes, id=sles2, magic=NA, cib=0.677.9) : Transient
attribute: update
May 6 13:51:45 sles2 crmd: [6218]: info: update_abort_priority: Abort
priority upgraded from 0 to 1000000
May 6 13:51:45 sles2 crmd: [6218]: info: update_abort_priority: Abort
action done superceeded by restart
May 6 13:51:45 sles2 crmd: [6218]: info: run_graph:
====================================================
May 6 13:51:45 sles2 crmd: [6218]: notice: run_graph: Transition 0
(Complete=19, Pending=0, Fired=0, Skipped=23, Incomplete=7,
Source=/var/lib/pengine/pe-warn-1877.bz2): Stopped
May 6 13:51:45 sles2 crmd: [6218]: info: te_graph_trigger: Transition 0 is
now complete
May 6 13:51:45 sles2 crmd: [6218]: info: do_state_transition: State
transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC
cause=C_FSA_INTERNAL origin=notify_crmd ]
May 6 13:51:45 sles2 crmd: [6218]: info: do_state_transition: All 1
cluster nodes are eligible to run resources.
May 6 13:51:45 sles2 crmd: [6218]: info: do_pe_invoke: Query 36:
Requesting the current CIB: S_POLICY_ENGINE
May 6 13:51:45 sles2 crmd: [6218]: info: do_pe_invoke_callback: Invoking
the PE: ref=pe_calc-dc-1273143105-16, seq=3280, quorate=0
May 6 13:51:45 sles2 pengine: [6217]: notice: unpack_config: On loss of
CCM Quorum: Ignore
May 6 13:51:45 sles2 pengine: [6217]: info: unpack_config: Node scores:
'red' = -INFINITY, 'yellow' = 0, 'green' = 0
May 6 13:51:45 sles2 pengine: [6217]: info: determine_online_status: Node
sles2 is online
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
sbd-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
sbd_fense:0 sbd_fense:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
dlm-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
dlm:0 dlm:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
clvm-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
clvm:0 clvm:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
eCluster_vg0-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
eCluster_vg0:0 eCluster_vg0:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
o2cb-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
o2cb:0 o2cb:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
fs-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
fs:0 fs:1 ]
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_print: Clone Set:
pingd-clone
May 6 13:51:45 sles2 pengine: [6217]: notice: short_print: Stopped: [
pingd:0 pingd:1 ]
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource
sbd_fense:1 cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from clvm-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from o2cb-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from fs-clone
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource dlm:1
cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from o2cb-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from fs-clone
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource clvm:1
cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from o2cb-clone
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from fs-clone
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource
eCluster_vg0:1 cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: info: native_merge_weights:
o2cb-clone: Rolling back scores from fs-clone
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource o2cb:1
cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource fs:1
cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: WARN: native_color: Resource pingd:1
cannot run anywhere
May 6 13:51:45 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for dlm:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for o2cb:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for fs:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (5s) for pingd:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: WARN: stage6: Scheduling Node sles1
for STONITH
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering dlm:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering clvm:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering eCluster_vg0:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering o2cb:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering fs:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: native_start_constraints:
Ordering pingd:0_start_0 after sles1 recovery
May 6 13:51:45 sles2 pengine: [6217]: info: find_compatible_child:
Colocating clvm:0 with dlm:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving dlm:0 with clvm:0
May 6 13:51:45 sles2 pengine: [6217]: info: find_compatible_child:
Colocating eCluster_vg0:0 with clvm:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving clvm:0 with eCluster_vg0:0
May 6 13:51:45 sles2 pengine: [6217]: info: find_compatible_child:
Colocating o2cb:0 with eCluster_vg0:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving eCluster_vg0:0 with o2cb:0
May 6 13:51:45 sles2 pengine: [6217]: info: find_compatible_child:
Colocating fs:0 with o2cb:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving o2cb:0 with fs:0
May 6 13:51:45 sles2 pengine: [6217]: info: find_compatible_child:
Colocating pingd:0 with fs:0 on sles2
May 6 13:51:45 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving fs:0 with pingd:0
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start
sbd_fense:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
sbd_fense:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start dlm:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
dlm:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start clvm:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
clvm:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start
eCluster_vg0:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
eCluster_vg0:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start o2cb:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
o2cb:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start fs:0 (sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
fs:1 (Stopped)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Start pingd:0
(sles2)
May 6 13:51:45 sles2 pengine: [6217]: notice: LogActions: Leave resource
pingd:1 (Stopped)
May 6 13:51:45 sles2 crmd: [6218]: info: do_state_transition: State
transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS
cause=C_IPC_MESSAGE origin=handle_response ]
May 6 13:51:45 sles2 crmd: [6218]: info: unpack_graph: Unpacked transition
1: 29 actions in 29 synapses
May 6 13:51:45 sles2 crmd: [6218]: info: do_te_invoke: Processing graph 1
(ref=pe_calc-dc-1273143105-16) derived from /var/lib/pengine/pe-warn-1878.bz2
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action 5
fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
11 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
33 fired and confirmed
May 6 13:51:45 sles2 crmd: [6218]: info: te_rsc_command: Initiating action
4: start sbd_fense:0_start_0 on sles2 (local)
May 6 13:51:45 sles2 crmd: [6218]: info: do_lrm_rsc_op: Performing
key=4:1:0:b0903214-6f1a-4e07-b841-846d8c20312b op=sbd_fense:0_start_0 )
May 6 13:51:45 sles2 lrmd: [6215]: info: rsc:sbd_fense:0:9: start
May 6 13:51:45 sles2 lrmd: [8777]: info: Try to start STONITH resource
<rsc_id=sbd_fense:0> : Device=external/sbd
May 6 13:51:45 sles2 pengine: [6217]: WARN: process_pe_message: Transition
1: WARNINGs found during PE processing. PEngine Input stored in:
/var/lib/pengine/pe-warn-1878.bz2
May 6 13:51:45 sles2 pengine: [6217]: info: process_pe_message:
Configuration WARNINGs found during PE processing. Please run "crm_verify
-L" to identify issues.
May 6 13:51:46 sles2 stonithd: [6213]: info: sbd_fense:0 stonith resource
started
May 6 13:51:46 sles2 lrmd: [6215]: debug: stonithRA plugin: provider
attribute is not needed and will be ignored.
May 6 13:51:46 sles2 crmd: [6218]: info: process_lrm_event: LRM operation
sbd_fense:0_start_0 (call=9, rc=0, cib-update=37, confirmed=true) ok
May 6 13:51:46 sles2 crmd: [6218]: info: match_graph_event: Action
sbd_fense:0_start_0 (4) confirmed on sles2 (rc=0)
May 6 13:51:46 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action 6
fired and confirmed
May 6 13:51:46 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
43 fired and confirmed
May 6 13:51:46 sles2 crmd: [6218]: info: te_fence_node: Executing reboot
fencing operation (45) on sles1 (timeout=60000)
May 6 13:51:46 sles2 stonithd: [6213]: info: client tengine [pid: 6218]
requests a STONITH operation RESET on node sles1
May 6 13:51:46 sles2 stonithd: [6213]: info:
stonith_operate_locally::2713: sending fencing op RESET for sles1 to
sbd_fense:0 (external/sbd) (pid=8792)
May 6 13:52:46 sles2 stonithd: [6213]: WARN: external_sbd_fense:0_1
process (PID 8792) timed out (try 1). Killing with signal SIGTERM (15).
May 6 13:52:48 sles2 stonithd: [8792]: info: external_run_cmd: Calling
'/usr/lib64/stonith/plugins/external/sbd reset sles1' returned 15
May 6 13:52:48 sles2 stonithd: [8792]: CRIT: external_reset_req: 'sbd
reset' for host sles1 failed with rc 15
May 6 13:52:48 sles2 stonithd: [6213]: info: failed to STONITH node sles1
with local device sbd_fense:0 (exitcode 5), gonna try the next local device
May 6 13:52:48 sles2 stonithd: [6213]: info: we can't manage sles1,
broadcast request to other nodes
May 6 13:53:48 sles2 stonithd: [6213]: ERROR: Failed to STONITH the node
sles1: optype=RESET, op_result=TIMEOUT
May 6 13:53:48 sles2 crmd: [6218]: info: tengine_stonith_callback:
call=-2, optype=1, node_name=sles1, result=2, node_list=,
action=45:1:0:b0903214-6f1a-4e07-b841-846d8c20312b
May 6 13:53:48 sles2 crmd: [6218]: ERROR: tengine_stonith_callback:
Stonith of sles1 failed (2)... aborting transition.
May 6 13:53:48 sles2 crmd: [6218]: info: abort_transition_graph:
tengine_stonith_callback:398 - Triggered transition abort (complete=0) :
Stonith failed
May 6 13:53:48 sles2 crmd: [6218]: info: update_abort_priority: Abort
priority upgraded from 0 to 1000000
May 6 13:53:48 sles2 crmd: [6218]: info: update_abort_priority: Abort
action done superceeded by restart
May 6 13:53:48 sles2 crmd: [6218]: info: run_graph:
====================================================
May 6 13:53:48 sles2 crmd: [6218]: notice: run_graph: Transition 1
(Complete=7, Pending=0, Fired=0, Skipped=16, Incomplete=6,
Source=/var/lib/pengine/pe-warn-1878.bz2): Stopped
May 6 13:53:48 sles2 crmd: [6218]: info: te_graph_trigger: Transition 1 is
now complete
May 6 13:53:48 sles2 crmd: [6218]: info: do_state_transition: State
transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC
cause=C_FSA_INTERNAL origin=notify_crmd ]
May 6 13:53:48 sles2 crmd: [6218]: info: do_state_transition: All 1
cluster nodes are eligible to run resources.
May 6 13:53:48 sles2 crmd: [6218]: info: do_pe_invoke: Query 38:
Requesting the current CIB: S_POLICY_ENGINE
May 6 13:53:48 sles2 pengine: [6217]: notice: unpack_config: On loss of
CCM Quorum: Ignore
May 6 13:53:48 sles2 crmd: [6218]: info: do_pe_invoke_callback: Invoking
the PE: ref=pe_calc-dc-1273143228-18, seq=3280, quorate=0
May 6 13:53:48 sles2 pengine: [6217]: info: unpack_config: Node scores:
'red' = -INFINITY, 'yellow' = 0, 'green' = 0
May 6 13:53:48 sles2 pengine: [6217]: info: determine_online_status: Node
sles2 is online
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
sbd-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Started: [
sles2 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
sbd_fense:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
dlm-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
dlm:0 dlm:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
clvm-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
clvm:0 clvm:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
eCluster_vg0-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
eCluster_vg0:0 eCluster_vg0:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
o2cb-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
o2cb:0 o2cb:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
fs-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
fs:0 fs:1 ]
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_print: Clone Set:
pingd-clone
May 6 13:53:48 sles2 pengine: [6217]: notice: short_print: Stopped: [
pingd:0 pingd:1 ]
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource
sbd_fense:1 cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from clvm-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from o2cb-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
dlm-clone: Rolling back scores from fs-clone
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource dlm:1
cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from eCluster_vg0-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from o2cb-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
clvm-clone: Rolling back scores from fs-clone
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource clvm:1
cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from o2cb-clone
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
eCluster_vg0-clone: Rolling back scores from fs-clone
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource
eCluster_vg0:1 cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: info: native_merge_weights:
o2cb-clone: Rolling back scores from fs-clone
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource o2cb:1
cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource fs:1
cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: WARN: native_color: Resource pingd:1
cannot run anywhere
May 6 13:53:48 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for dlm:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for o2cb:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (120s) for fs:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: RecurringOp: Start
recurring monitor (5s) for pingd:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: WARN: stage6: Scheduling Node sles1
for STONITH
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering dlm:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering clvm:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering eCluster_vg0:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering o2cb:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering fs:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: native_start_constraints:
Ordering pingd:0_start_0 after sles1 recovery
May 6 13:53:48 sles2 pengine: [6217]: info: find_compatible_child:
Colocating clvm:0 with dlm:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving dlm:0 with clvm:0
May 6 13:53:48 sles2 pengine: [6217]: info: find_compatible_child:
Colocating eCluster_vg0:0 with clvm:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving clvm:0 with eCluster_vg0:0
May 6 13:53:48 sles2 pengine: [6217]: info: find_compatible_child:
Colocating o2cb:0 with eCluster_vg0:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving eCluster_vg0:0 with o2cb:0
May 6 13:53:48 sles2 pengine: [6217]: info: find_compatible_child:
Colocating fs:0 with o2cb:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving o2cb:0 with fs:0
May 6 13:53:48 sles2 pengine: [6217]: info: find_compatible_child:
Colocating pingd:0 with fs:0 on sles2
May 6 13:53:48 sles2 pengine: [6217]: notice: clone_rsc_order_lh:
Interleaving fs:0 with pingd:0
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
sbd_fense:0 (Started sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
sbd_fense:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start dlm:0 (sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
dlm:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start clvm:0 (sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
clvm:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start
eCluster_vg0:0 (sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
eCluster_vg0:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start o2cb:0 (sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
o2cb:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start fs:0 (sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
fs:1 (Stopped)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Start pingd:0
(sles2)
May 6 13:53:48 sles2 pengine: [6217]: notice: LogActions: Leave resource
pingd:1 (Stopped)
May 6 13:53:48 sles2 crmd: [6218]: info: do_state_transition: State
transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS
cause=C_IPC_MESSAGE origin=handle_response ]
May 6 13:53:48 sles2 crmd: [6218]: info: unpack_graph: Unpacked transition
2: 26 actions in 26 synapses
May 6 13:53:48 sles2 crmd: [6218]: info: do_te_invoke: Processing graph 2
(ref=pe_calc-dc-1273143228-18) derived from /var/lib/pengine/pe-warn-1879.bz2
May 6 13:53:48 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
12 fired and confirmed
May 6 13:53:48 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
34 fired and confirmed
May 6 13:53:48 sles2 crmd: [6218]: info: te_pseudo_action: Pseudo action
44 fired and confirmed
May 6 13:53:48 sles2 crmd: [6218]: info: te_fence_node: Executing reboot
fencing operation (46) on sles1 (timeout=60000)
May 6 13:53:48 sles2 stonithd: [6213]: info: client tengine [pid: 6218]
requests a STONITH operation RESET on node sles1
May 6 13:53:48 sles2 stonithd: [6213]: info:
stonith_operate_locally::2713: sending fencing op RESET for sles1 to
sbd_fense:0 (external/sbd) (pid=10062)
May 6 13:53:48 sles2 pengine: [6217]: WARN: process_pe_message: Transition
2: WARNINGs found during PE processing. PEngine Input stored in:
/var/lib/pengine/pe-warn-1879.bz2
May 6 13:53:48 sles2 pengine: [6217]: info: process_pe_message:
Configuration WARNINGs found during PE processing. Please run "crm_verify
-L" to identify issues.
May 6 13:54:48 sles2 stonithd: [6213]: WARN: external_sbd_fense:0_1
process (PID 10062) timed out (try 1). Killing with signal SIGTERM (15).
May 6 13:54:49 sles2 stonithd: [10062]: info: external_run_cmd: Calling
'/usr/lib64/stonith/plugins/external/sbd reset sles1' returned 15
May 6 13:54:49 sles2 stonithd: [10062]: CRIT: external_reset_req: 'sbd
reset' for host sles1 failed with rc 15
May 6 13:54:49 sles2 stonithd: [6213]: info: failed to STONITH node sles1
with local device sbd_fense:0 (exitcode 5), gonna try the next local device
May 6 13:54:49 sles2 stonithd: [6213]: info: we can't manage sles1,
broadcast request to other nodes
--8<-----------------------------------------------------------------------
P.S. Sorry, I do not know whether you receive a file in this list, if I
send it as attachement.
--
С уважением,
ЖОЛДАК Алексей
ICQ 150074
MSN aleksey at zholdak.com
Skype aleksey.zholdak
Voice +380442388043
More information about the Pacemaker
mailing list