[Pacemaker] Dual primary DRBD+OCFS2+XEN+Pacemaker failover issues

kamal kishi kamal.kishi at gmail.com
Thu Jun 5 14:09:09 CEST 2014


Hi All,

I'm trying to configure Dual primary DRBD+OCFS2+XEN+Pacemaker
Attached here are my -
DRBD configuration is in "drbd.txt"
XEN configuration is in "xen config.txt"
Pacemaker configuration is in "Pacemaker config.txt"

Steps followed,
1. Configured DRBD+OCFS
2. Started XEN manually once to install windows7
3. Stopped XEN VM, unmounted OCFS2 and demoted DRBD down
4. Configured Pacemaker
5. All the resources started, Xen VM - xenwin7 started in server1
6. Turned off Server1
7. xenwin7 resource in server2 states itself as failed and then unmanaged
Important :
8. When the status turns unmanaged, the xenwin7 can be opened with
vncviewer but the screen is white blank screen

Above one is issue number 1, log file attached titled - "syslog.txt"

9. Turned on server1, restarted corosync in both server1 and server2 at a
time.
DRBD and OCFS2 resources started successfully but the xenwin7 did not start
at all

This is issue number 2,
Find the log file attached, titled - "syslog later.txt"

Looking forward for solution as next week there's a FAT to be addressed
using cluster configuration

Regards,
Kamal Kishore B V
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://oss.clusterlabs.org/pipermail/pacemaker/attachments/20140605/048100e5/attachment-0001.html>
-------------- next part --------------
global { usage-count no; }
common {
syncer {
rate 60M;
csums-alg sha1;
al-extents 809;
verify-alg sha1;
}}
resource r0 {
protocol C;
startup {
degr-wfc-timeout 30;
wfc-timeout 30;
become-primary-on both;
}
disk {
fencing resource-and-stonith;
on-io-error detach;
}
handlers {
fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
outdate-peer "/usr/lib/drbd/outdate-peer.sh";
split-brain "/usr/lib/drbd/notify-split-brain.sh root";
pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh root";
pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh root";
local-io-error "/usr/lib/drbd/notify-io-error.sh root";
}
net {
allow-two-primaries;
cram-hmac-alg sha1;
shared-secret "kalki";
after-sb-0pri discard-zero-changes;
after-sb-1pri discard-secondary;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
on server1 {
device /dev/drbd0;
disk /dev/sda3;
address 10.0.0.1:7788;
meta-disk internal;
}
on server2 {
device /dev/drbd0;
disk /dev/sda3;
address 10.0.0.2:7788;
meta-disk internal;
}
}
-------------- next part --------------
crm configure
property no-quorum-policy=ignore
property stonith-enabled=false
property default-resource-stickiness=1000
commit
bye

crm configure
primitive Clu-FS-DRBD ocf:linbit:drbd \
params drbd_resource="r0" \
operations $id="Clu-FS-DRBD-ops" \
op start interval="0" timeout="49s" \
op stop interval="0" timeout="50s" \
op monitor interval="40s" role="Master" timeout="50s" \
op monitor interval="41s" role="Slave" timeout="51s" \
meta target-role="started"

ms Clu-FS-DRBD-Master Clu-FS-DRBD \
meta resource-stickines="100" master-max="2" notify="true" interleave="true"

primitive Clu-FS-Mount ocf:heartbeat:Filesystem \
params device="/dev/drbd/by-res/r0" directory="/cluster" fstype="ocfs2" \
op monitor interval="120s" \
meta target-role="started"

clone Clu-FS-Mount-Clone Clu-FS-Mount \
meta interleave="true" ordered="true"

order Cluster-FS-After-DRBD inf: \
Clu-FS-DRBD-Master:promote \
Clu-FS-Mount-Clone:start


primitive xenwin7 ocf:heartbeat:Xen \
params xmfile="/home/cluster/xen/win7.cfg" \
op monitor interval="40s" \
meta target-role="started" is-managed="true" allow-migrate="true"

colocation Clu-Clo-DRBD inf: Clu-FS-Mount-Clone Clu-FS-DRBD-Master:Master
colocation win7-Xen-Clu-Clo inf: xenwin7 Clu-FS-Mount-Clone
commit
bye
-------------- next part --------------
Jun  5 16:50:18 server2 NetworkManager[959]: <info> (eth6): carrier now OFF (device state 10)
Jun  5 16:50:18 server2 kernel: [  371.012666] bnx2 0000:01:00.0: eth6: NIC Copper Link is Down
Jun  5 16:50:18 server2 kernel: [  371.013260] xenbr0: port 1(eth6) entering forwarding state
Jun  5 16:50:20 server2 NetworkManager[959]: <info> (eth6): carrier now ON (device state 10)
Jun  5 16:50:20 server2 kernel: [  373.237729] bnx2 0000:01:00.0: eth6: NIC Copper Link is Up, 100 Mbps full duplex, receive & transmit flow control ON
Jun  5 16:50:20 server2 kernel: [  373.238249] xenbr0: port 1(eth6) entering forwarding state
Jun  5 16:50:20 server2 kernel: [  373.238268] xenbr0: port 1(eth6) entering forwarding state
Jun  5 16:50:20 server2 corosync[1558]:   [TOTEM ] A processor failed, forming new configuration.
Jun  5 16:50:21 server2 kernel: [  374.154609] bnx2 0000:01:00.0: eth6: NIC Copper Link is Down
Jun  5 16:50:21 server2 NetworkManager[959]: <info> (eth6): carrier now OFF (device state 10)
Jun  5 16:50:21 server2 kernel: [  374.237292] xenbr0: port 1(eth6) entering forwarding state
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] notice: pcmk_peer_update: Transitional membership event on ring 116: memb=1, new=0, lost=1
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: pcmk_peer_update: memb: server2 33554442
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: pcmk_peer_update: lost: server1 16777226
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] notice: pcmk_peer_update: Stable membership event on ring 116: memb=1, new=0, lost=0
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: pcmk_peer_update: MEMB: server2 33554442
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: ais_mark_unseen_peer_dead: Node server1 was not seen in the previous transition
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: update_member: Node 16777226/server1 is now: lost
Jun  5 16:50:24 server2 corosync[1558]:   [pcmk  ] info: send_member_notification: Sending membership update 116 to 2 children
Jun  5 16:50:24 server2 corosync[1558]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Jun  5 16:50:24 server2 corosync[1558]:   [CPG   ] chosen downlist: sender r(0) ip(10.0.0.2) ; members(old:2 left:1)
Jun  5 16:50:24 server2 corosync[1558]:   [MAIN  ] Completed service synchronization, ready to provide service.
Jun  5 16:50:24 server2 crmd: [1596]: notice: ais_dispatch_message: Membership 116: quorum lost
Jun  5 16:50:24 server2 crmd: [1596]: info: ais_status_callback: status: server1 is now lost (was member)
Jun  5 16:50:24 server2 crmd: [1596]: info: crm_update_peer: Node server1: id=16777226 state=lost (new) addr=r(0) ip(10.0.0.1)  votes=1 born=104 seen=112 proc=00000000000000000000000000111312
Jun  5 16:50:24 server2 crmd: [1596]: WARN: check_dead_member: Our DC node (server1) left the cluster
Jun  5 16:50:24 server2 cib: [1592]: notice: ais_dispatch_message: Membership 116: quorum lost
Jun  5 16:50:24 server2 cib: [1592]: info: crm_update_peer: Node server1: id=16777226 state=lost (new) addr=r(0) ip(10.0.0.1)  votes=1 born=104 seen=112 proc=00000000000000000000000000111312
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: State transition S_NOT_DC -> S_ELECTION [ input=I_ELECTION cause=C_FSA_INTERNAL origin=check_dead_member ]
Jun  5 16:50:24 server2 crmd: [1596]: info: update_dc: Unset DC server1
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: State transition S_ELECTION -> S_INTEGRATION [ input=I_ELECTION_DC cause=C_FSA_INTERNAL origin=do_election_check ]
Jun  5 16:50:24 server2 crmd: [1596]: info: do_te_control: Registering TE UUID: b1dadb9e-1efe-40bd-a718-8dde49a3d26b
Jun  5 16:50:24 server2 crmd: [1596]: info: set_graph_functions: Setting custom graph functions
Jun  5 16:50:24 server2 crmd: [1596]: info: unpack_graph: Unpacked transition -1: 0 actions in 0 synapses
Jun  5 16:50:24 server2 crmd: [1596]: info: do_dc_takeover: Taking over DC status for this partition
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_readwrite: We are now in R/W mode
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_master for section 'all' (origin=local/crmd/17, version=0.66.8): ok (rc=0)
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/18, version=0.66.9): ok (rc=0)
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/20, version=0.66.10): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: join_make_offer: Making join offers based on membership 116
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/22, version=0.66.11): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_dc_join_offer_all: join-1: Waiting on 1 outstanding join acks
Jun  5 16:50:24 server2 crmd: [1596]: info: ais_dispatch_message: Membership 116: quorum still lost
Jun  5 16:50:24 server2 crmd: [1596]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 16:50:24 server2 crmd: [1596]: info: update_dc: Set DC to server2 (3.0.5)
Jun  5 16:50:24 server2 crmd: [1596]: info: config_query_callback: Shutdown escalation occurs after: 1200000ms
Jun  5 16:50:24 server2 crmd: [1596]: info: config_query_callback: Checking for expired actions every 900000ms
Jun  5 16:50:24 server2 crmd: [1596]: info: config_query_callback: Sending expected-votes=2 to corosync
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/25, version=0.66.12): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: ais_dispatch_message: Membership 116: quorum still lost
Jun  5 16:50:24 server2 crmd: [1596]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/28, version=0.66.13): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: State transition S_INTEGRATION -> S_FINALIZE_JOIN [ input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes responded to the join offer.
Jun  5 16:50:24 server2 crmd: [1596]: info: do_dc_join_finalize: join-1: Syncing the CIB from server2 to the rest of the cluster
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_sync for section 'all' (origin=local/crmd/29, version=0.66.13): ok (rc=0)
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/30, version=0.66.14): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_dc_join_ack: join-1: Updating node state to member for server2
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server2']/lrm (origin=local/crmd/31, version=0.66.15): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server2']/lrm": ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE [ input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 16:50:24 server2 crmd: [1596]: info: do_dc_join_final: Ensuring DC, quorum and node attributes are up-to-date
Jun  5 16:50:24 server2 crmd: [1596]: info: crm_update_quorum: Updating quorum status to false (call=35)
Jun  5 16:50:24 server2 crmd: [1596]: info: abort_transition_graph: do_te_invoke:167 - Triggered transition abort (complete=1) : Peer Cancelled
Jun  5 16:50:24 server2 crmd: [1596]: info: do_pe_invoke: Query 36: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 16:50:24 server2 attrd: [1594]: notice: attrd_local_callback: Sending full refresh (origin=crmd)
Jun  5 16:50:24 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: master-Clu-FS-DRBD:1 (10000)
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/33, version=0.66.17): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: WARN: match_down_event: No match for shutdown action on server1
Jun  5 16:50:24 server2 crmd: [1596]: info: te_update_diff: Stonith/shutdown of server1 not matched
Jun  5 16:50:24 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:215 - Triggered transition abort (complete=1, tag=node_state, id=server1, magic=NA, cib=0.66.18) : Node failure
Jun  5 16:50:24 server2 crmd: [1596]: info: do_pe_invoke: Query 37: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 16:50:24 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/35, version=0.66.19): ok (rc=0)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=37, ref=pe_calc-dc-1401967224-14, seq=116, quorate=0
Jun  5 16:50:24 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 16:50:24 server2 pengine: [1595]: notice: RecurringOp:  Start recurring monitor (40s) for xenwin7 on server2
Jun  5 16:50:24 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 16:50:24 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server2)
Jun  5 16:50:24 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server2)
Jun  5 16:50:24 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 16:50:24 server2 pengine: [1595]: notice: LogActions: Start   xenwin7#011(server2)
Jun  5 16:50:24 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 16:50:24 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 0: 2 actions in 2 synapses
Jun  5 16:50:24 server2 crmd: [1596]: info: do_te_invoke: Processing graph 0 (ref=pe_calc-dc-1401967224-14) derived from /var/lib/pengine/pe-input-42.bz2
Jun  5 16:50:24 server2 crmd: [1596]: info: te_rsc_command: Initiating action 40: start xenwin7_start_0 on server2 (local)
Jun  5 16:50:24 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=40:0:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=xenwin7_start_0 )
Jun  5 16:50:24 server2 lrmd: [1593]: info: rsc:xenwin7 start[14] (pid 3601)
Jun  5 16:50:24 server2 pengine: [1595]: notice: process_pe_message: Transition 0: PEngine Input stored in: /var/lib/pengine/pe-input-42.bz2
Jun  5 16:50:28 server2 kernel: [  381.060744] block drbd0: PingAck did not arrive in time.
Jun  5 16:50:28 server2 kernel: [  381.060756] block drbd0: peer( Primary -> Unknown ) conn( Connected -> NetworkFailure ) pdsk( UpToDate -> DUnknown ) susp( 0 -> 1 ) 
Jun  5 16:50:28 server2 kernel: [  381.070203] block drbd0: asender terminated
Jun  5 16:50:28 server2 kernel: [  381.070211] block drbd0: Terminating drbd0_asender
Jun  5 16:50:28 server2 kernel: [  381.070286] block drbd0: Connection closed
Jun  5 16:50:28 server2 kernel: [  381.070365] block drbd0: conn( NetworkFailure -> Unconnected ) 
Jun  5 16:50:28 server2 kernel: [  381.070374] block drbd0: receiver terminated
Jun  5 16:50:28 server2 kernel: [  381.070379] block drbd0: helper command: /sbin/drbdadm fence-peer minor-0
Jun  5 16:50:28 server2 kernel: [  381.070386] block drbd0: Restarting drbd0_receiver
Jun  5 16:50:28 server2 kernel: [  381.070391] block drbd0: receiver (re)started
Jun  5 16:50:28 server2 kernel: [  381.070401] block drbd0: conn( Unconnected -> WFConnection ) 
Jun  5 16:50:28 server2 crm-fence-peer.sh[3677]: invoked for r0
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: - <cib admin_epoch="0" epoch="66" num_updates="21" />
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: + <cib epoch="67" num_updates="1" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.5" update-origin="server2" update-client="cibadmin" cib-last-written="Thu Jun  5 16:45:20 2014" have-quorum="0" dc-uuid="server2" >
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +   <configuration >
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +     <constraints >
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +       <rsc_location rsc="Clu-FS-DRBD-Master" id="drbd-fence-by-handler-Clu-FS-DRBD-Master" __crm_diff_marker__="added:top" >
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +         <rule role="Master" score="-INFINITY" id="drbd-fence-by-handler-rule-Clu-FS-DRBD-Master" >
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +           <expression attribute="#uname" operation="ne" value="server2" id="drbd-fence-by-handler-expr-Clu-FS-DRBD-Master" />
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +         </rule>
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +       </rsc_location>
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +     </constraints>
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: +   </configuration>
Jun  5 16:50:29 server2 cib: [1592]: info: cib:diff: + </cib>
Jun  5 16:50:29 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:124 - Triggered transition abort (complete=0, tag=diff, id=(null), magic=NA, cib=0.67.1) : Non-status change
Jun  5 16:50:29 server2 cib: [1592]: info: cib_process_request: Operation complete: op cib_create for section constraints (origin=local/cibadmin/2, version=0.67.1): ok (rc=0)
Jun  5 16:50:29 server2 crm-fence-peer.sh[3677]: INFO peer is reachable, my disk is UpToDate: placed constraint 'drbd-fence-by-handler-Clu-FS-DRBD-Master'
Jun  5 16:50:29 server2 crmd: [1596]: info: update_abort_priority: Abort priority upgraded from 0 to 1000000
Jun  5 16:50:29 server2 crmd: [1596]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 16:50:29 server2 kernel: [  382.210510] block drbd0: helper command: /sbin/drbdadm fence-peer minor-0 exit code 4 (0x400)
Jun  5 16:50:29 server2 kernel: [  382.210517] block drbd0: fence-peer helper returned 4 (peer was fenced)
Jun  5 16:50:29 server2 kernel: [  382.210526] block drbd0: pdsk( DUnknown -> Outdated ) 
Jun  5 16:50:29 server2 kernel: [  382.210571] block drbd0: new current UUID 662B73A915BFB851:70D57213B2E5F1C7:A6CF0DA1948D2428:A6CE0DA1948D2429
Jun  5 16:50:29 server2 kernel: [  382.216441] block drbd0: susp( 1 -> 0 ) 
Jun  5 16:50:44 server2 lrmd: [1593]: WARN: xenwin7:start process (PID 3601) timed out (try 1).  Killing with signal SIGTERM (15).
Jun  5 16:50:44 server2 lrmd: [1593]: WARN: operation start[14] on xenwin7 for client 1596: pid 3601 timed out
Jun  5 16:50:44 server2 crmd: [1596]: ERROR: process_lrm_event: LRM operation xenwin7_start_0 (14) Timed Out (timeout=20000ms)
Jun  5 16:50:44 server2 crmd: [1596]: WARN: status_from_rc: Action 40 (xenwin7_start_0) on server2 failed (target: 0 vs. rc: -2): Error
Jun  5 16:50:44 server2 crmd: [1596]: WARN: update_failcount: Updating failcount for xenwin7 on server2 after failed start: rc=-2 (update=INFINITY, time=1401967244)
Jun  5 16:50:44 server2 crmd: [1596]: info: abort_transition_graph: match_graph_event:277 - Triggered transition abort (complete=0, tag=lrm_rsc_op, id=xenwin7_last_failure_0, magic=2:-2;40:0:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b, cib=0.67.2) : Event failed
Jun  5 16:50:44 server2 crmd: [1596]: info: match_graph_event: Action xenwin7_start_0 (40) confirmed on server2 (rc=4)
Jun  5 16:50:44 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 16:50:44 server2 crmd: [1596]: notice: run_graph: Transition 0 (Complete=1, Pending=0, Fired=0, Skipped=1, Incomplete=0, Source=/var/lib/pengine/pe-input-42.bz2): Stopped
Jun  5 16:50:44 server2 crmd: [1596]: info: te_graph_trigger: Transition 0 is now complete
Jun  5 16:50:44 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 16:50:44 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 16:50:44 server2 crmd: [1596]: info: do_pe_invoke: Query 39: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 16:50:44 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: fail-count-xenwin7 (INFINITY)
Jun  5 16:50:44 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=39, ref=pe_calc-dc-1401967244-16, seq=116, quorate=0
Jun  5 16:50:44 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 16:50:44 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 16:50:44 server2 pengine: [1595]: notice: RecurringOp:  Start recurring monitor (40s) for xenwin7 on server2
Jun  5 16:50:44 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 16:50:44 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server2)
Jun  5 16:50:44 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server2)
Jun  5 16:50:44 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 16:50:44 server2 pengine: [1595]: notice: LogActions: Recover xenwin7#011(Started server2)
Jun  5 16:50:44 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 16:50:44 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 1: 4 actions in 4 synapses
Jun  5 16:50:44 server2 crmd: [1596]: info: do_te_invoke: Processing graph 1 (ref=pe_calc-dc-1401967244-16) derived from /var/lib/pengine/pe-input-43.bz2
Jun  5 16:50:44 server2 crmd: [1596]: info: te_rsc_command: Initiating action 3: stop xenwin7_stop_0 on server2 (local)
Jun  5 16:50:44 server2 attrd: [1594]: notice: attrd_perform_update: Sent update 18: fail-count-xenwin7=INFINITY
Jun  5 16:50:44 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=3:1:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=xenwin7_stop_0 )
Jun  5 16:50:44 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: last-failure-xenwin7 (1401967244)
Jun  5 16:50:44 server2 lrmd: [1593]: info: rsc:xenwin7 stop[15] (pid 3750)
Jun  5 16:50:44 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server2-fail-count-xenwin7, name=fail-count-xenwin7, value=INFINITY, magic=NA, cib=0.67.3) : Transient attribute: update
Jun  5 16:50:44 server2 crmd: [1596]: info: update_abort_priority: Abort priority upgraded from 0 to 1000000
Jun  5 16:50:44 server2 crmd: [1596]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 16:50:44 server2 attrd: [1594]: notice: attrd_perform_update: Sent update 21: last-failure-xenwin7=1401967244
Jun  5 16:50:44 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server2-last-failure-xenwin7, name=last-failure-xenwin7, value=1401967244, magic=NA, cib=0.67.4) : Transient attribute: update
Jun  5 16:50:44 server2 pengine: [1595]: notice: process_pe_message: Transition 1: PEngine Input stored in: /var/lib/pengine/pe-input-43.bz2
Jun  5 16:50:47 server2 kernel: [  400.368736] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 has been idle for 30.79 secs, shutting it down.
Jun  5 16:50:47 server2 kernel: [  400.368780] o2net: No longer connected to node server1 (num 0) at 10.0.0.1:7777
Jun  5 16:50:47 server2 kernel: [  400.368820] (kworker/u:2,69,5):dlm_do_master_request:1332 ERROR: link to 0 went down!
Jun  5 16:50:47 server2 kernel: [  400.368828] (xend,3671,4):dlm_send_remote_convert_request:395 ERROR: Error -112 when sending message 504 (key 0x649b059e) to node 0
Jun  5 16:50:47 server2 kernel: [  400.368837] o2dlm: Waiting on the death of node 0 in domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:50:47 server2 kernel: [  400.368846] (kworker/u:2,69,5):dlm_get_lock_resource:917 ERROR: status = -112
Jun  5 16:50:56 server2 kernel: [  409.512767] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:50:59 server2 kernel: [  412.512758] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:02 server2 kernel: [  415.512769] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:04 server2 lrmd: [1593]: WARN: xenwin7:stop process (PID 3750) timed out (try 1).  Killing with signal SIGTERM (15).
Jun  5 16:51:04 server2 lrmd: [1593]: WARN: operation stop[15] on xenwin7 for client 1596: pid 3750 timed out
Jun  5 16:51:04 server2 crmd: [1596]: ERROR: process_lrm_event: LRM operation xenwin7_stop_0 (15) Timed Out (timeout=20000ms)
Jun  5 16:51:04 server2 crmd: [1596]: WARN: status_from_rc: Action 3 (xenwin7_stop_0) on server2 failed (target: 0 vs. rc: -2): Error
Jun  5 16:51:04 server2 crmd: [1596]: WARN: update_failcount: Updating failcount for xenwin7 on server2 after failed stop: rc=-2 (update=INFINITY, time=1401967264)
Jun  5 16:51:04 server2 crmd: [1596]: info: abort_transition_graph: match_graph_event:277 - Triggered transition abort (complete=0, tag=lrm_rsc_op, id=xenwin7_last_failure_0, magic=2:-2;3:1:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b, cib=0.67.5) : Event failed
Jun  5 16:51:04 server2 crmd: [1596]: info: match_graph_event: Action xenwin7_stop_0 (3) confirmed on server2 (rc=4)
Jun  5 16:51:04 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 16:51:04 server2 crmd: [1596]: notice: run_graph: Transition 1 (Complete=1, Pending=0, Fired=0, Skipped=3, Incomplete=0, Source=/var/lib/pengine/pe-input-43.bz2): Stopped
Jun  5 16:51:04 server2 crmd: [1596]: info: te_graph_trigger: Transition 1 is now complete
Jun  5 16:51:04 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 16:51:04 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 16:51:04 server2 crmd: [1596]: info: do_pe_invoke: Query 41: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 16:51:04 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: last-failure-xenwin7 (1401967264)
Jun  5 16:51:04 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=41, ref=pe_calc-dc-1401967264-18, seq=116, quorate=0
Jun  5 16:51:04 server2 attrd: [1594]: notice: attrd_perform_update: Sent update 23: last-failure-xenwin7=1401967264
Jun  5 16:51:04 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 16:51:04 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 16:51:04 server2 pengine: [1595]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server2)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server2)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   xenwin7#011(Started unmanaged)
Jun  5 16:51:04 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=1, tag=nvpair, id=status-server2-last-failure-xenwin7, name=last-failure-xenwin7, value=1401967264, magic=NA, cib=0.67.6) : Transient attribute: update
Jun  5 16:51:04 server2 crmd: [1596]: info: handle_response: pe_calc calculation pe_calc-dc-1401967264-18 is obsolete
Jun  5 16:51:04 server2 crmd: [1596]: info: do_pe_invoke: Query 42: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 16:51:04 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=42, ref=pe_calc-dc-1401967264-19, seq=116, quorate=0
Jun  5 16:51:04 server2 pengine: [1595]: notice: process_pe_message: Transition 2: PEngine Input stored in: /var/lib/pengine/pe-input-44.bz2
Jun  5 16:51:04 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 16:51:04 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 16:51:04 server2 pengine: [1595]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server2)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server2)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 16:51:04 server2 pengine: [1595]: notice: LogActions: Leave   xenwin7#011(Started unmanaged)
Jun  5 16:51:04 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 16:51:04 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 3: 0 actions in 0 synapses
Jun  5 16:51:04 server2 crmd: [1596]: info: do_te_invoke: Processing graph 3 (ref=pe_calc-dc-1401967264-19) derived from /var/lib/pengine/pe-input-45.bz2
Jun  5 16:51:04 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 16:51:04 server2 crmd: [1596]: notice: run_graph: Transition 3 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-45.bz2): Complete
Jun  5 16:51:04 server2 crmd: [1596]: info: te_graph_trigger: Transition 3 is now complete
Jun  5 16:51:04 server2 crmd: [1596]: info: notify_crmd: Transition 3 status: done - <null>
Jun  5 16:51:04 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 16:51:04 server2 crmd: [1596]: info: do_state_transition: Starting PEngine Recheck Timer
Jun  5 16:51:04 server2 pengine: [1595]: notice: process_pe_message: Transition 3: PEngine Input stored in: /var/lib/pengine/pe-input-45.bz2
Jun  5 16:51:05 server2 kernel: [  418.512772] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:08 server2 kernel: [  421.512762] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:11 server2 kernel: [  424.512767] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:14 server2 kernel: [  427.512756] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:17 server2 kernel: [  430.448743] o2net: No connection established with node 0 after 30.0 seconds, giving up.
Jun  5 16:51:17 server2 kernel: [  430.512753] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:22 server2 kernel: [  435.452767] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:25 server2 kernel: [  438.452763] o2net: Connection to node server1 (num 0) at 10.0.0.1:7777 shutdown, state 7
Jun  5 16:51:27 server2 kernel: [  440.331164] (xend,3671,5):dlm_send_remote_convert_request:395 ERROR: Error -107 when sending message 504 (key 0x649b059e) to node 0
Jun  5 16:51:27 server2 kernel: [  440.331171] o2dlm: Waiting on the death of node 0 in domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:27 server2 kernel: [  440.331197] o2cb: o2dlm has evicted node 0 from domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:27 server2 kernel: [  440.368733] (kworker/u:2,69,4):dlm_restart_lock_mastery:1221 ERROR: node down! 0
Jun  5 16:51:27 server2 kernel: [  440.368742] (kworker/u:2,69,4):dlm_wait_for_lock_mastery:1038 ERROR: status = -11
Jun  5 16:51:28 server2 kernel: [  441.344741] o2dlm: Waiting on the recovery of node 0 in domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:28 server2 kernel: [  441.372738] o2dlm: Waiting on the recovery of node 0 in domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:29 server2 kernel: [  441.984735] o2dlm: Begin recovery on domain F18CB82626444DD0913312B7AE741C5B for node 0
Jun  5 16:51:29 server2 kernel: [  441.984761] o2dlm: Node 1 (me) is the Recovery Master for the dead node 0 in domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:29 server2 kernel: [  441.984855] o2dlm: End recovery on domain F18CB82626444DD0913312B7AE741C5B
Jun  5 16:51:39 server2 kernel: [  452.355431] ocfs2: Begin replay journal (node 0, slot 1) on device (147,0)
Jun  5 16:51:42 server2 kernel: [  454.647883] ocfs2: End replay journal (node 0, slot 1) on device (147,0)
Jun  5 16:51:42 server2 kernel: [  454.666018] ocfs2: Beginning quota recovery on device (147,0) for slot 1
Jun  5 16:51:42 server2 kernel: [  454.707973] ocfs2: Finishing quota recovery on device (147,0) for slot 1
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: add XENBUS_PATH=backend/vbd/1/768
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: add XENBUS_PATH=backend/vbd/1/5632
Jun  5 16:51:43 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices added (path: /sys/devices/vif-1-0/net/vif1.0, iface: vif1.0)
Jun  5 16:51:43 server2 NetworkManager[959]:    SCPlugin-Ifupdown: device added (path: /sys/devices/vif-1-0/net/vif1.0, iface: vif1.0): no ifupdown configuration found.
Jun  5 16:51:43 server2 NetworkManager[959]: <warn> failed to allocate link cache: (-10) Operation not supported
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): carrier is OFF
Jun  5 16:51:43 server2 NetworkManager[959]: <error> [1401967303.430826] [nm-device-ethernet.c:456] real_update_permanent_hw_address(): (vif1.0): unable to read permanent MAC address (error 0)
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): new Ethernet device (driver: 'vif' ifindex: 8)
Jun  5 16:51:43 server2 kernel: [  456.004286] ADDRCONF(NETDEV_UP): vif1.0: link is not ready
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): exported as /org/freedesktop/NetworkManager/Devices/4
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): now managed
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): device state change: unmanaged -> unavailable (reason 'managed') [10 20 2]
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): bringing up device.
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): preparing device.
Jun  5 16:51:43 server2 NetworkManager[959]: <info> (vif1.0): deactivating device (reason 'managed') [2]
Jun  5 16:51:43 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 16:51:43 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 16:51:43 server2 NetworkManager[959]: <info> Added default wired connection 'Wired connection 4' for /sys/devices/vif-1-0/net/vif1.0
Jun  5 16:51:43 server2 kernel: [  456.005197] ADDRCONF(NETDEV_UP): vif1.0: link is not ready
Jun  5 16:51:43 server2 kernel: [  456.033642] device tap1.0 entered promiscuous mode
Jun  5 16:51:43 server2 kernel: [  456.033787] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 16:51:43 server2 kernel: [  456.033800] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/vif-bridge: online type_if=vif XENBUS_PATH=backend/vif/1/0
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/vif-bridge: add type_if=tap XENBUS_PATH=
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/5632/node /dev/loop0 to xenstore.
Jun  5 16:51:43 server2 kernel: [  456.193578] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 16:51:43 server2 kernel: [  456.201033] device vif1.0 entered promiscuous mode
Jun  5 16:51:43 server2 kernel: [  456.205250] ADDRCONF(NETDEV_UP): vif1.0: link is not ready
Jun  5 16:51:43 server2 kernel: [  456.208392] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 16:51:43 server2 kernel: [  456.208418] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge add for tap1.0, bridge xenbr1.
Jun  5 16:51:43 server2 NetworkManager[959]: <warn> /sys/devices/virtual/net/tap1.0: couldn't determine device driver; ignoring...
Jun  5 16:51:43 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices added (path: /sys/devices/virtual/net/tap1.0, iface: tap1.0)
Jun  5 16:51:43 server2 NetworkManager[959]:    SCPlugin-Ifupdown: device added (path: /sys/devices/virtual/net/tap1.0, iface: tap1.0): no ifupdown configuration found.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/5632/physical-device 7:0 to xenstore.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/5632/hotplug-status connected to xenstore.
Jun  5 16:51:43 server2 kernel: [  456.309876] ip_tables: (C) 2000-2006 Netfilter Core Team
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/768/node /dev/loop1 to xenstore.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/768/physical-device 7:1 to xenstore.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/1/768/hotplug-status connected to xenstore.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge online for vif1.0, bridge xenbr1.
Jun  5 16:51:43 server2 logger: /etc/xen/scripts/vif-bridge: Writing backend/vif/1/0/hotplug-status connected to xenstore.
Jun  5 16:51:45 server2 avahi-daemon[1093]: Joining mDNS multicast group on interface tap1.0.IPv6 with address fe80::fcff:ffff:feff:ffff.
Jun  5 16:51:45 server2 avahi-daemon[1093]: New relevant interface tap1.0.IPv6 for mDNS.
Jun  5 16:51:45 server2 avahi-daemon[1093]: Registering new address record for fe80::fcff:ffff:feff:ffff on tap1.0.*.
Jun  5 16:51:48 server2 AptDaemon: INFO: Quitting due to inactivity
Jun  5 16:51:48 server2 AptDaemon: INFO: Quitting was requested
Jun  5 16:51:54 server2 kernel: [  466.808723] tap1.0: no IPv6 routers present
-------------- next part --------------
Jun  5 17:06:04 server2 crmd: [1596]: info: crm_timer_popped: PEngine Recheck Timer (I_PE_CALC) just popped (900000ms)
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_TIMER_POPPED origin=crm_timer_popped ]
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: Progressed to state S_POLICY_ENGINE after C_TIMER_POPPED
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 17:06:04 server2 crmd: [1596]: info: do_pe_invoke: Query 43: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:06:04 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=43, ref=pe_calc-dc-1401968164-20, seq=116, quorate=0
Jun  5 17:06:04 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:06:04 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 17:06:04 server2 pengine: [1595]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 17:06:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 17:06:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server2)
Jun  5 17:06:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server2)
Jun  5 17:06:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:06:04 server2 pengine: [1595]: notice: LogActions: Leave   xenwin7#011(Started unmanaged)
Jun  5 17:06:04 server2 pengine: [1595]: notice: process_pe_message: Transition 4: PEngine Input stored in: /var/lib/pengine/pe-input-45.bz2
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:06:04 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 4: 0 actions in 0 synapses
Jun  5 17:06:04 server2 crmd: [1596]: info: do_te_invoke: Processing graph 4 (ref=pe_calc-dc-1401968164-20) derived from /var/lib/pengine/pe-input-45.bz2
Jun  5 17:06:04 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 17:06:04 server2 crmd: [1596]: notice: run_graph: Transition 4 (Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-45.bz2): Complete
Jun  5 17:06:04 server2 crmd: [1596]: info: te_graph_trigger: Transition 4 is now complete
Jun  5 17:06:04 server2 crmd: [1596]: info: notify_crmd: Transition 4 status: done - <null>
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:06:04 server2 crmd: [1596]: info: do_state_transition: Starting PEngine Recheck Timer
Jun  5 17:07:56 server2 corosync[1558]:   [SERV  ] Unloading all Corosync service engines.
Jun  5 17:07:56 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: Shuting down Pacemaker
Jun  5 17:07:56 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to crmd: [1596]
Jun  5 17:07:56 server2 crmd: [1596]: info: crm_signal_dispatch: Invoking handler for signal 15: Terminated
Jun  5 17:07:56 server2 crmd: [1596]: info: crm_shutdown: Requesting shutdown
Jun  5 17:07:56 server2 crmd: [1596]: notice: crm_shutdown: Forcing shutdown in: 1200000ms
Jun  5 17:07:56 server2 crmd: [1596]: info: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_SHUTDOWN cause=C_SHUTDOWN origin=crm_shutdown ]
Jun  5 17:07:56 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 17:07:56 server2 crmd: [1596]: info: do_shutdown_req: Sending shutdown request to DC: server2
Jun  5 17:07:56 server2 crmd: [1596]: info: handle_shutdown_request: Creating shutdown request for server2 (state=S_POLICY_ENGINE)
Jun  5 17:07:56 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: shutdown (1401968276)
Jun  5 17:07:56 server2 attrd: [1594]: notice: attrd_perform_update: Sent update 26: shutdown=1401968276
Jun  5 17:07:57 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=1, tag=nvpair, id=status-server2-shutdown, name=shutdown, value=1401968276, magic=NA, cib=0.67.8) : Transient attribute: update
Jun  5 17:07:57 server2 crmd: [1596]: info: do_pe_invoke: Query 45: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:07:57 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=45, ref=pe_calc-dc-1401968276-22, seq=116, quorate=0
Jun  5 17:07:57 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:07:57 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 17:07:57 server2 pengine: [1595]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 17:07:57 server2 pengine: [1595]: notice: stage6: Scheduling Node server2 for shutdown
Jun  5 17:07:57 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 17:07:57 server2 pengine: [1595]: notice: LogActions: Demote  Clu-FS-DRBD:1#011(Master -> Stopped server2)
Jun  5 17:07:57 server2 pengine: [1595]: notice: LogActions: Stop    Clu-FS-Mount:0#011(server2)
Jun  5 17:07:57 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:07:57 server2 pengine: [1595]: notice: LogActions: Leave   xenwin7#011(Started unmanaged)
Jun  5 17:07:57 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:07:57 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 5: 22 actions in 22 synapses
Jun  5 17:07:57 server2 crmd: [1596]: info: do_te_invoke: Processing graph 5 (ref=pe_calc-dc-1401968276-22) derived from /var/lib/pengine/pe-input-46.bz2
Jun  5 17:07:57 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 28 fired and confirmed
Jun  5 17:07:57 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 35 fired and confirmed
Jun  5 17:07:57 server2 crmd: [1596]: info: te_rsc_command: Initiating action 51: notify Clu-FS-DRBD:1_pre_notify_demote_0 on server2 (local)
Jun  5 17:07:57 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=51:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-DRBD:1_notify_0 )
Jun  5 17:07:57 server2 lrmd: [1593]: info: rsc:Clu-FS-DRBD:1 notify[16] (pid 5221)
Jun  5 17:07:57 server2 crmd: [1596]: info: te_rsc_command: Initiating action 32: stop Clu-FS-Mount:0_stop_0 on server2 (local)
Jun  5 17:07:57 server2 lrmd: [1593]: info: cancel_op: operation monitor[13] on Clu-FS-Mount:0 for client 1596, its parameters: fstype=[ocfs2] CRM_meta_timeout=[20000] CRM_meta_name=[monitor] crm_feature_set=[3.0.5] device=[/dev/drbd/by-res/r0] CRM_meta_notify=[false] CRM_meta_clone_node_max=[1] CRM_meta_clone=[0] CRM_meta_clone_max=[2] CRM_meta_interval=[120000] CRM_meta_globally_unique=[false] directory=[/cluster]  cancelled
Jun  5 17:07:57 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=32:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-Mount:0_stop_0 )
Jun  5 17:07:57 server2 lrmd: [1593]: info: rsc:Clu-FS-Mount:0 stop[17] (pid 5222)
Jun  5 17:07:57 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-Mount:0_monitor_120000 (call=13, status=1, cib-update=0, confirmed=true) Cancelled
Jun  5 17:07:57 server2 pengine: [1595]: notice: process_pe_message: Transition 5: PEngine Input stored in: /var/lib/pengine/pe-input-46.bz2
Jun  5 17:07:57 server2 Filesystem[5222]: INFO: Running stop for /dev/drbd/by-res/r0 on /cluster
Jun  5 17:07:57 server2 lrmd: [1593]: info: operation notify[16] on Clu-FS-DRBD:1 for client 1596: pid 5221 exited with return code 0
Jun  5 17:07:57 server2 crmd: [1596]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:1_notify_0 from 51:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b: lrm_invoke-lrmd-1401968277-25
Jun  5 17:07:57 server2 crmd: [1596]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968277-25 from server2
Jun  5 17:07:57 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (51) confirmed on server2 (rc=0)
Jun  5 17:07:57 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_notify_0 (call=16, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:07:57 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 29 fired and confirmed
Jun  5 17:07:57 server2 Filesystem[5222]: INFO: Trying to unmount /cluster
Jun  5 17:07:57 server2 lrmd: [1593]: info: RA output: (Clu-FS-Mount:0:stop:stderr) umount: /cluster: device is busy.#012        (In some cases useful info about processes that use#012         the device is found by lsof(8) or fuser(1))
Jun  5 17:07:57 server2 lrmd: [1593]: info: RA output: (Clu-FS-Mount:0:stop:stderr) 
Jun  5 17:07:57 server2 Filesystem[5222]: ERROR: Couldn't unmount /cluster; trying cleanup with TERM
Jun  5 17:07:57 server2 lrmd: [1593]: info: RA output: (Clu-FS-Mount:0:stop:stderr) /cluster:           
Jun  5 17:07:57 server2 lrmd: [1593]: info: RA output: (Clu-FS-Mount:0:stop:stdout)   3886
Jun  5 17:07:57 server2 lrmd: [1593]: info: RA output: (Clu-FS-Mount:0:stop:stderr) 
Jun  5 17:07:57 server2 Filesystem[5222]: INFO: Some processes on /cluster were signalled
Jun  5 17:07:57 server2 avahi-daemon[1093]: Interface tap1.0.IPv6 no longer relevant for mDNS.
Jun  5 17:07:57 server2 avahi-daemon[1093]: Leaving mDNS multicast group on interface tap1.0.IPv6 with address fe80::fcff:ffff:feff:ffff.
Jun  5 17:07:57 server2 avahi-daemon[1093]: Withdrawing address record for fe80::fcff:ffff:feff:ffff on tap1.0.
Jun  5 17:07:57 server2 avahi-daemon[1093]: Withdrawing workstation service for tap1.0.
Jun  5 17:07:57 server2 kernel: [ 1429.910813] xenbr1: port 2(tap1.0) entering forwarding state
Jun  5 17:07:57 server2 kernel: [ 1429.911993] xenbr1: port 2(tap1.0) entering disabled state
Jun  5 17:07:57 server2 kernel: [ 1429.912195] xenbr1: port 2(tap1.0) entering disabled state
Jun  5 17:07:57 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices removed (path: /sys/devices/virtual/net/tap1.0, iface: tap1.0)
Jun  5 17:07:57 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/console/1/0
Jun  5 17:07:57 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vfb/1/0
Jun  5 17:07:57 server2 avahi-daemon[1093]: Withdrawing workstation service for vif1.0.
Jun  5 17:07:57 server2 kernel: [ 1430.540878] xenbr1: port 3(vif1.0) entering disabled state
Jun  5 17:07:57 server2 kernel: [ 1430.541016] xenbr1: port 3(vif1.0) entering disabled state
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: remove XENBUS_PATH=backend/vbd/1/768
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: remove XENBUS_PATH=backend/vbd/1/5632
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: offline type_if=vif XENBUS_PATH=backend/vif/1/0
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: brctl delif xenbr1 vif1.0 failed
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vbd/1/5632
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: ifconfig vif1.0 down failed
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vbd/1/768
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge offline for vif1.0, bridge xenbr1.
Jun  5 17:07:58 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices removed (path: /sys/devices/vif-1-0/net/vif1.0, iface: vif1.0)
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif1.0): now unmanaged
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif1.0): device state change: unavailable -> unmanaged (reason 'removed') [20 10 36]
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif1.0): cleaning up...
Jun  5 17:07:58 server2 NetworkManager[959]: <warn> (8) failed to find interface name for index
Jun  5 17:07:58 server2 NetworkManager[959]: (nm-system.c:685):nm_system_iface_get_flags: runtime check failed: (iface != NULL)
Jun  5 17:07:58 server2 NetworkManager[959]: <error> [1401968278.173365] [nm-system.c:687] nm_system_iface_get_flags(): (unknown): failed to get interface link object
Jun  5 17:07:58 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 17:07:58  NetworkManager[959]: last message repeated 2 times
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vif/1/0
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: add XENBUS_PATH=backend/vbd/2/768
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: add XENBUS_PATH=backend/vbd/2/5632
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/768/hotplug-error /cluster/xenwin7.img does not exist. backend/vbd/2/768/hotplug-status error to xenstore.
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: /cluster/xenwin7.img does not exist.
Jun  5 17:07:58 server2 kernel: [ 1430.966466] device tap2.0 entered promiscuous mode
Jun  5 17:07:58 server2 kernel: [ 1430.966551] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:58 server2 kernel: [ 1430.966563] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:58 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices added (path: /sys/devices/vif-2-0/net/vif2.0, iface: vif2.0)
Jun  5 17:07:58 server2 NetworkManager[959]:    SCPlugin-Ifupdown: device added (path: /sys/devices/vif-2-0/net/vif2.0, iface: vif2.0): no ifupdown configuration found.
Jun  5 17:07:58 server2 NetworkManager[959]: <warn> failed to allocate link cache: (-10) Operation not supported
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): carrier is OFF
Jun  5 17:07:58 server2 NetworkManager[959]: <error> [1401968278.405494] [nm-device-ethernet.c:456] real_update_permanent_hw_address(): (vif2.0): unable to read permanent MAC address (error 0)
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): new Ethernet device (driver: 'vif' ifindex: 10)
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): exported as /org/freedesktop/NetworkManager/Devices/5
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): now managed
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): device state change: unmanaged -> unavailable (reason 'managed') [10 20 2]
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): bringing up device.
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): preparing device.
Jun  5 17:07:58 server2 NetworkManager[959]: <info> (vif2.0): deactivating device (reason 'managed') [2]
Jun  5 17:07:58 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 17:07:58 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 17:07:58 server2 NetworkManager[959]: <info> Added default wired connection 'Wired connection 4' for /sys/devices/vif-2-0/net/vif2.0
Jun  5 17:07:58 server2 kernel: [ 1430.977969] ADDRCONF(NETDEV_UP): vif2.0: link is not ready
Jun  5 17:07:58 server2 kernel: [ 1430.979005] ADDRCONF(NETDEV_UP): vif2.0: link is not ready
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: online type_if=vif XENBUS_PATH=backend/vif/2/0
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: add type_if=tap XENBUS_PATH=
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/5632/node /dev/loop0 to xenstore.
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/5632/physical-device 7:0 to xenstore.
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/5632/hotplug-status connected to xenstore.
Jun  5 17:07:58 server2 kernel: [ 1431.132048] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:58 server2 kernel: [ 1431.132240] device vif2.0 entered promiscuous mode
Jun  5 17:07:58 server2 kernel: [ 1431.138486] ADDRCONF(NETDEV_UP): vif2.0: link is not ready
Jun  5 17:07:58 server2 kernel: [ 1431.144288] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:58 server2 kernel: [ 1431.144301] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge add for tap2.0, bridge xenbr1.
Jun  5 17:07:58 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices added (path: /sys/devices/virtual/net/tap2.0, iface: tap2.0)
Jun  5 17:07:58 server2 NetworkManager[959]:    SCPlugin-Ifupdown: device added (path: /sys/devices/virtual/net/tap2.0, iface: tap2.0): no ifupdown configuration found.
Jun  5 17:07:58 server2 NetworkManager[959]: <warn> /sys/devices/virtual/net/tap2.0: couldn't determine device driver; ignoring...
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge online for vif2.0, bridge xenbr1.
Jun  5 17:07:58 server2 logger: /etc/xen/scripts/vif-bridge: Writing backend/vif/2/0/hotplug-status connected to xenstore.
Jun  5 17:07:59 server2 kernel: [ 1431.795480] xenbr1: port 2(tap2.0) entering forwarding state
Jun  5 17:07:59 server2 kernel: [ 1431.796540] xenbr1: port 2(tap2.0) entering disabled state
Jun  5 17:07:59 server2 kernel: [ 1431.796759] xenbr1: port 2(tap2.0) entering disabled state
Jun  5 17:07:59 server2 avahi-daemon[1093]: Withdrawing workstation service for tap2.0.
Jun  5 17:07:59 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices removed (path: /sys/devices/virtual/net/tap2.0, iface: tap2.0)
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/console/2/0
Jun  5 17:07:59 server2 avahi-daemon[1093]: Withdrawing workstation service for vif2.0.
Jun  5 17:07:59 server2 kernel: [ 1431.982427] xenbr1: port 3(vif2.0) entering disabled state
Jun  5 17:07:59 server2 kernel: [ 1431.982605] xenbr1: port 3(vif2.0) entering disabled state
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vfb/2/0
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: remove XENBUS_PATH=backend/vbd/2/768
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: remove XENBUS_PATH=backend/vbd/2/5632
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/vif-bridge: offline type_if=vif XENBUS_PATH=backend/vif/2/0
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/768/hotplug-error xenstore-read backend/vbd/2/768/node failed. backend/vbd/2/768/hotplug-status error to xenstore.
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/vif-bridge: brctl delif xenbr1 vif2.0 failed
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: xenstore-read backend/vbd/2/768/node failed.
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: Writing backend/vbd/2/768/hotplug-error /etc/xen/scripts/block failed; error detected. backend/vbd/2/768/hotplug-status error to xenstore.
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/vif-bridge: ifconfig vif2.0 down failed
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/block: /etc/xen/scripts/block failed; error detected.
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vbd/2/768
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/vif-bridge: Successful vif-bridge offline for vif2.0, bridge xenbr1.
Jun  5 17:07:59 server2 NetworkManager[959]:    SCPlugin-Ifupdown: devices removed (path: /sys/devices/vif-2-0/net/vif2.0, iface: vif2.0)
Jun  5 17:07:59 server2 NetworkManager[959]: <info> (vif2.0): now unmanaged
Jun  5 17:07:59 server2 NetworkManager[959]: <info> (vif2.0): device state change: unavailable -> unmanaged (reason 'removed') [20 10 36]
Jun  5 17:07:59 server2 NetworkManager[959]: <info> (vif2.0): cleaning up...
Jun  5 17:07:59 server2 NetworkManager[959]: <warn> (10) failed to find interface name for index
Jun  5 17:07:59 server2 NetworkManager[959]: (nm-system.c:685):nm_system_iface_get_flags: runtime check failed: (iface != NULL)
Jun  5 17:07:59 server2 NetworkManager[959]: <error> [1401968279.590132] [nm-system.c:687] nm_system_iface_get_flags(): (unknown): failed to get interface link object
Jun  5 17:07:59 server2 NetworkManager[959]: <info> Unmanaged Device found; state CONNECTED forced. (see http://bugs.launchpad.net/bugs/191889)
Jun  5 17:07:59  NetworkManager[959]: last message repeated 2 times
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vif/2/0
Jun  5 17:07:59 server2 logger: /etc/xen/scripts/xen-hotplug-cleanup: XENBUS_PATH=backend/vbd/2/5632
Jun  5 17:08:02 server2 kernel: [ 1434.957091] o2dlm: Leaving domain F18CB82626444DD0913312B7AE741C5B
Jun  5 17:08:03 server2 kernel: [ 1436.340747] o2net: No longer connected to node server1 (num 0) at 10.0.0.1:7777
Jun  5 17:08:03 server2 kernel: [ 1436.347079] ocfs2: Unmounting device (147,0) on (node 1)
Jun  5 17:08:03 server2 Filesystem[5222]: INFO: unmounted /cluster successfully
Jun  5 17:08:03 server2 lrmd: [1593]: info: operation stop[17] on Clu-FS-Mount:0 for client 1596: pid 5222 exited with return code 0
Jun  5 17:08:03 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-Mount:0_stop_0 (call=17, rc=0, cib-update=46, confirmed=true) ok
Jun  5 17:08:03 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-Mount:0_stop_0 (32) confirmed on server2 (rc=0)
Jun  5 17:08:03 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 36 fired and confirmed
Jun  5 17:08:03 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 26 fired and confirmed
Jun  5 17:08:03 server2 crmd: [1596]: info: te_rsc_command: Initiating action 6: demote Clu-FS-DRBD:1_demote_0 on server2 (local)
Jun  5 17:08:03 server2 lrmd: [1593]: info: cancel_op: operation monitor[11] on Clu-FS-DRBD:1 for client 1596, its parameters: drbd_resource=[r0] CRM_meta_role=[Master] CRM_meta_notify_stop_resource=[ ] CRM_meta_notify_start_resource=[Clu-FS-DRBD:1 ] CRM_meta_notify_inactive_resource=[Clu-FS-DRBD:1 ] CRM_meta_notify_master_uname=[server1 ] CRM_meta_timeout=[50000] CRM_meta_name=[monitor] CRM_meta_notify_demote_resource=[ ] CRM_meta_notify_promote_uname=[ ] crm_feature_set=[3.0.5] CRM_meta_notify=[true] CRM_meta_notify_start_uname=[server2 ] CRM_meta_clone_node cancelled
Jun  5 17:08:03 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=6:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-DRBD:1_demote_0 )
Jun  5 17:08:03 server2 lrmd: [1593]: info: rsc:Clu-FS-DRBD:1 demote[18] (pid 6048)
Jun  5 17:08:03 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_monitor_40000 (call=11, status=1, cib-update=0, confirmed=true) Cancelled
Jun  5 17:08:04 server2 kernel: [ 1436.599118] block drbd0: role( Primary -> Secondary ) 
Jun  5 17:08:04 server2 kernel: [ 1436.599165] block drbd0: bitmap WRITE of 0 pages took 0 jiffies
Jun  5 17:08:04 server2 kernel: [ 1436.599174] block drbd0: 0 KB (0 bits) marked out-of-sync by on disk bit-map.
Jun  5 17:08:04 server2 lrmd: [1593]: info: RA output: (Clu-FS-DRBD:1:demote:stdout) 
Jun  5 17:08:04 server2 lrmd: [1593]: info: operation demote[18] on Clu-FS-DRBD:1 for client 1596: pid 6048 exited with return code 0
Jun  5 17:08:04 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_demote_0 (call=18, rc=0, cib-update=47, confirmed=true) ok
Jun  5 17:08:04 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-DRBD:1_demote_0 (6) confirmed on server2 (rc=0)
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 27 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 30 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_rsc_command: Initiating action 52: notify Clu-FS-DRBD:1_post_notify_demote_0 on server2 (local)
Jun  5 17:08:04 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=52:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-DRBD:1_notify_0 )
Jun  5 17:08:04 server2 lrmd: [1593]: info: rsc:Clu-FS-DRBD:1 notify[19] (pid 6080)
Jun  5 17:08:04 server2 lrmd: [1593]: info: RA output: (Clu-FS-DRBD:1:notify:stdout) 
Jun  5 17:08:04 server2 lrmd: [1593]: info: operation notify[19] on Clu-FS-DRBD:1 for client 1596: pid 6080 exited with return code 0
Jun  5 17:08:04 server2 crmd: [1596]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:1_notify_0 from 52:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b: lrm_invoke-lrmd-1401968284-28
Jun  5 17:08:04 server2 crmd: [1596]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968284-28 from server2
Jun  5 17:08:04 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (52) confirmed on server2 (rc=0)
Jun  5 17:08:04 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_notify_0 (call=19, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 31 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 16 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_rsc_command: Initiating action 50: notify Clu-FS-DRBD:1_pre_notify_stop_0 on server2 (local)
Jun  5 17:08:04 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=50:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-DRBD:1_notify_0 )
Jun  5 17:08:04 server2 lrmd: [1593]: info: rsc:Clu-FS-DRBD:1 notify[20] (pid 6112)
Jun  5 17:08:04 server2 lrmd: [1593]: info: operation notify[20] on Clu-FS-DRBD:1 for client 1596: pid 6112 exited with return code 0
Jun  5 17:08:04 server2 crmd: [1596]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:1_notify_0 from 50:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b: lrm_invoke-lrmd-1401968284-30
Jun  5 17:08:04 server2 crmd: [1596]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968284-30 from server2
Jun  5 17:08:04 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (50) confirmed on server2 (rc=0)
Jun  5 17:08:04 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_notify_0 (call=20, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 17 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 14 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_rsc_command: Initiating action 7: stop Clu-FS-DRBD:1_stop_0 on server2 (local)
Jun  5 17:08:04 server2 crmd: [1596]: info: do_lrm_rsc_op: Performing key=7:5:0:b1dadb9e-1efe-40bd-a718-8dde49a3d26b op=Clu-FS-DRBD:1_stop_0 )
Jun  5 17:08:04 server2 lrmd: [1593]: info: rsc:Clu-FS-DRBD:1 stop[21] (pid 6135)
Jun  5 17:08:04 server2 kernel: [ 1436.924662] block drbd0: Requested state change failed by peer: Refusing to be Primary while peer is not outdated (-7)
Jun  5 17:08:04 server2 kernel: [ 1436.924839] block drbd0: peer( Primary -> Unknown ) conn( Connected -> Disconnecting ) disk( UpToDate -> Outdated ) pdsk( UpToDate -> DUnknown ) 
Jun  5 17:08:04 server2 kernel: [ 1436.924976] block drbd0: meta connection shut down by peer.
Jun  5 17:08:04 server2 kernel: [ 1436.924981] block drbd0: asender terminated
Jun  5 17:08:04 server2 kernel: [ 1436.924985] block drbd0: Terminating drbd0_asender
Jun  5 17:08:04 server2 kernel: [ 1436.928412] block drbd0: Connection closed
Jun  5 17:08:04 server2 kernel: [ 1436.928447] block drbd0: conn( Disconnecting -> StandAlone ) 
Jun  5 17:08:04 server2 kernel: [ 1436.928479] block drbd0: receiver terminated
Jun  5 17:08:04 server2 kernel: [ 1436.928484] block drbd0: Terminating drbd0_receiver
Jun  5 17:08:04 server2 kernel: [ 1436.928522] block drbd0: disk( Outdated -> Failed ) 
Jun  5 17:08:04 server2 kernel: [ 1436.928543] block drbd0: Sending state for detaching disk failed
Jun  5 17:08:04 server2 kernel: [ 1436.928558] block drbd0: disk( Failed -> Diskless ) 
Jun  5 17:08:04 server2 kernel: [ 1436.928632] block drbd0: drbd_bm_resize called with capacity == 0
Jun  5 17:08:04 server2 kernel: [ 1436.928801] block drbd0: worker terminated
Jun  5 17:08:04 server2 kernel: [ 1436.928805] block drbd0: Terminating drbd0_worker
Jun  5 17:08:04 server2 lrmd: [1593]: info: RA output: (Clu-FS-DRBD:1:stop:stdout) 
Jun  5 17:08:04 server2 crm_attribute: [6166]: info: Invoked: crm_attribute -N server2 -n master-Clu-FS-DRBD:1 -l reboot -D 
Jun  5 17:08:04 server2 attrd: [1594]: notice: attrd_trigger_update: Sending flush op to all hosts for: master-Clu-FS-DRBD:1 (<null>)
Jun  5 17:08:04 server2 attrd: [1594]: notice: attrd_perform_update: Sent delete 28: node=server2, attr=master-Clu-FS-DRBD:1, id=<n/a>, set=(null), section=status
Jun  5 17:08:04 server2 crmd: [1596]: info: abort_transition_graph: te_update_diff:178 - Triggered transition abort (complete=0, tag=transient_attributes, id=server2, magic=NA, cib=0.67.11) : Transient attribute: removal
Jun  5 17:08:04 server2 crmd: [1596]: info: update_abort_priority: Abort priority upgraded from 0 to 1000000
Jun  5 17:08:04 server2 crmd: [1596]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 17:08:04 server2 lrmd: [1593]: info: operation stop[21] on Clu-FS-DRBD:1 for client 1596: pid 6135 exited with return code 0
Jun  5 17:08:04 server2 lrmd: [1593]: info: RA output: (Clu-FS-DRBD:1:stop:stdout) 
Jun  5 17:08:04 server2 attrd: [1594]: notice: attrd_perform_update: Sent delete -22: node=server2, attr=master-Clu-FS-DRBD:1, id=<n/a>, set=(null), section=status
Jun  5 17:08:04 server2 crmd: [1596]: info: process_lrm_event: LRM operation Clu-FS-DRBD:1_stop_0 (call=21, rc=0, cib-update=48, confirmed=true) ok
Jun  5 17:08:04 server2 crmd: [1596]: info: match_graph_event: Action Clu-FS-DRBD:1_stop_0 (7) confirmed on server2 (rc=0)
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 15 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 18 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: te_pseudo_action: Pseudo action 19 fired and confirmed
Jun  5 17:08:04 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 17:08:04 server2 crmd: [1596]: notice: run_graph: Transition 5 (Complete=20, Pending=0, Fired=0, Skipped=2, Incomplete=0, Source=/var/lib/pengine/pe-input-46.bz2): Stopped
Jun  5 17:08:04 server2 crmd: [1596]: info: te_graph_trigger: Transition 5 is now complete
Jun  5 17:08:04 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:04 server2 crmd: [1596]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 17:08:04 server2 crmd: [1596]: info: do_pe_invoke: Query 49: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:04 server2 crmd: [1596]: info: do_pe_invoke_callback: Invoking the PE: query=49, ref=pe_calc-dc-1401968284-32, seq=116, quorate=0
Jun  5 17:08:04 server2 pengine: [1595]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:04 server2 pengine: [1595]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown exec error (-2)
Jun  5 17:08:04 server2 pengine: [1595]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 17:08:04 server2 pengine: [1595]: notice: stage6: Scheduling Node server2 for shutdown
Jun  5 17:08:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Stopped)
Jun  5 17:08:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Stopped)
Jun  5 17:08:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Stopped)
Jun  5 17:08:04 server2 pengine: [1595]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:08:04 server2 pengine: [1595]: notice: LogActions: Leave   xenwin7#011(Started unmanaged)
Jun  5 17:08:04 server2 crmd: [1596]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:04 server2 crmd: [1596]: info: unpack_graph: Unpacked transition 6: 1 actions in 1 synapses
Jun  5 17:08:04 server2 crmd: [1596]: info: do_te_invoke: Processing graph 6 (ref=pe_calc-dc-1401968284-32) derived from /var/lib/pengine/pe-input-47.bz2
Jun  5 17:08:04 server2 crmd: [1596]: info: te_crm_command: Executing crm-event (37): do_shutdown on server2
Jun  5 17:08:04 server2 crmd: [1596]: info: te_crm_command: crm-event (37) is a local shutdown
Jun  5 17:08:04 server2 crmd: [1596]: info: run_graph: ====================================================
Jun  5 17:08:04 server2 crmd: [1596]: notice: run_graph: Transition 6 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-47.bz2): Complete
Jun  5 17:08:04 server2 crmd: [1596]: info: te_graph_trigger: Transition 6 is now complete
Jun  5 17:08:04 server2 crmd: [1596]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_STOPPING [ input=I_STOP cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:04 server2 crmd: [1596]: info: do_dc_release: DC role released
Jun  5 17:08:04 server2 crmd: [1596]: info: pe_connection_destroy: Connection to the Policy Engine released
Jun  5 17:08:04 server2 crmd: [1596]: info: do_te_control: Transitioner is now inactive
Jun  5 17:08:04 server2 crmd: [1596]: info: do_shutdown: Disconnecting STONITH...
Jun  5 17:08:04 server2 crmd: [1596]: info: tengine_stonith_connection_destroy: Fencing daemon disconnected
Jun  5 17:08:04 server2 crmd: [1596]: info: do_lrm_control: Disconnected from the LRM
Jun  5 17:08:04 server2 crmd: [1596]: notice: terminate_ais_connection: Disconnecting from AIS
Jun  5 17:08:04 server2 crmd: [1596]: info: do_ha_control: Disconnected from OpenAIS
Jun  5 17:08:04 server2 crmd: [1596]: info: do_cib_control: Disconnecting CIB
Jun  5 17:08:04 server2 crmd: [1596]: info: crmd_cib_connection_destroy: Connection to the CIB terminated...
Jun  5 17:08:04 server2 crmd: [1596]: info: do_exit: Performing A_EXIT_0 - gracefully exiting the CRMd
Jun  5 17:08:04 server2 cib: [1592]: info: cib_process_readwrite: We are now in R/O mode
Jun  5 17:08:04 server2 crmd: [1596]: info: free_mem: Dropping I_RELEASE_SUCCESS: [ state=S_STOPPING cause=C_FSA_INTERNAL origin=do_dc_release ]
Jun  5 17:08:04 server2 cib: [1592]: WARN: send_ipc_message: IPC Channel to 1596 is not connected
Jun  5 17:08:04 server2 crmd: [1596]: info: free_mem: Dropping I_TERMINATE: [ state=S_STOPPING cause=C_FSA_INTERNAL origin=do_stop ]
Jun  5 17:08:04 server2 cib: [1592]: WARN: send_via_callback_channel: Delivery of reply to client 1596/6cf5b826-1a4b-4a38-8689-b1d841ec96a2 failed
Jun  5 17:08:04 server2 crmd: [1596]: info: crm_xml_cleanup: Cleaning up memory from libxml2
Jun  5 17:08:04 server2 cib: [1592]: WARN: do_local_notify: A-Sync reply to crmd failed: reply failed
Jun  5 17:08:04 server2 crmd: [1596]: info: do_exit: [crmd] stopped (0)
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] info: pcmk_ipc_exit: Client crmd (conn=0x1516270, async-conn=0x1516270) left
Jun  5 17:08:04 server2 pengine: [1595]: notice: process_pe_message: Transition 6: PEngine Input stored in: /var/lib/pengine/pe-input-47.bz2
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: crmd confirmed stopped
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to pengine: [1595]
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: pengine confirmed stopped
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to attrd: [1594]
Jun  5 17:08:04 server2 attrd: [1594]: notice: main: Exiting...
Jun  5 17:08:04 server2 corosync[1558]:   [pcmk  ] info: pcmk_ipc_exit: Client attrd (conn=0x1511a20, async-conn=0x1511a20) left
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: attrd confirmed stopped
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to lrmd: [1593]
Jun  5 17:08:05 server2 lrmd: [1593]: info: lrmd is shutting down
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: lrmd confirmed stopped
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to cib: [1592]
Jun  5 17:08:05 server2 cib: [1592]: info: crm_signal_dispatch: Invoking handler for signal 15: Terminated
Jun  5 17:08:05 server2 cib: [1592]: info: cib_shutdown: Disconnected 0 clients
Jun  5 17:08:05 server2 cib: [1592]: info: cib_process_disconnect: All clients disconnected...
Jun  5 17:08:05 server2 cib: [1592]: info: cib_ha_connection_destroy: Heartbeat disconnection complete... exiting
Jun  5 17:08:05 server2 cib: [1592]: info: cib_ha_connection_destroy: Exiting...
Jun  5 17:08:05 server2 cib: [1592]: info: crm_xml_cleanup: Cleaning up memory from libxml2
Jun  5 17:08:05 server2 cib: [1592]: info: main: Done
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] info: pcmk_ipc_exit: Client cib (conn=0x150d220, async-conn=0x150d220) left
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: cib confirmed stopped
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: stop_child: Sent -15 to stonith-ng: [1591]
Jun  5 17:08:05 server2 stonith-ng: [1591]: info: crm_signal_dispatch: Invoking handler for signal 15: Terminated
Jun  5 17:08:05 server2 stonith-ng: [1591]: info: stonith_shutdown: Terminating with  0 clients
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] info: pcmk_ipc_exit: Client stonith-ng (conn=0x1508ec0, async-conn=0x1508ec0) left
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: stonith-ng confirmed stopped
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] info: update_member: Node server2 now has process list: 00000000000000000000000000000002 (2)
Jun  5 17:08:05 server2 corosync[1558]:   [pcmk  ] notice: pcmk_shutdown: Shutdown complete
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: Pacemaker Cluster Manager 1.1.6
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync extended virtual synchrony service
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync configuration service
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync cluster closed process group service v1.01
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync cluster config database access v1.01
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync profile loading service
Jun  5 17:08:05 server2 corosync[1558]:   [SERV  ] Service engine unloaded: corosync cluster quorum service v0.1
Jun  5 17:08:05 server2 corosync[1558]:   [MAIN  ] Corosync Cluster Engine exiting with status 0 at main.c:1858.
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Corosync Cluster Engine ('1.4.2'): started and ready to provide service.
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Corosync built-in features: nss
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Successfully read main configuration file '/etc/corosync/corosync.conf'.
Jun  5 17:08:06 server2 corosync[6171]:   [TOTEM ] Initializing transport (UDP/IP Multicast).
Jun  5 17:08:06 server2 corosync[6171]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).
Jun  5 17:08:06 server2 corosync[6171]:   [TOTEM ] The network interface [10.0.0.2] is now up.
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: process_ais_conf: Reading configure
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_init: Local handle: 4730966301143465987 for logging
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_next: Processing additional logging options...
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Found 'off' for option: debug
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Found 'no' for option: to_logfile
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Found 'yes' for option: to_syslog
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Found 'daemon' for option: syslog_facility
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_init: Local handle: 7739444317642555396 for quorum
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_next: No additional configuration supplied for: quorum
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: No default for option: provider
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_init: Local handle: 5650605097994944517 for service
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: config_find_next: Processing additional service options...
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Found '0' for option: ver
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Defaulting to 'pcmk' for option: clustername
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Defaulting to 'no' for option: use_logd
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: get_config_opt: Defaulting to 'no' for option: use_mgmtd
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_startup: CRM: Initialized
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] Logging: Initialized pcmk_startup
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: Invoked: /usr/lib/heartbeat/stonithd 
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: crm_log_init_worker: Changed active directory to /var/lib/heartbeat/cores/root
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: get_cluster_type: Cluster type is: 'openais'
Jun  5 17:08:06 server2 stonith-ng: [6179]: notice: crm_cluster_connect: Connecting to cluster infrastructure: classic openais (with plugin)
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: init_ais_connection_classic: Creating connection to our Corosync plugin
Jun  5 17:08:06 server2 cib: [6180]: info: crm_log_init_worker: Changed active directory to /var/lib/heartbeat/cores/hacluster
Jun  5 17:08:06 server2 cib: [6180]: info: retrieveCib: Reading cluster configuration from: /var/lib/heartbeat/crm/cib.xml (digest: /var/lib/heartbeat/crm/cib.xml.sig)
Jun  5 17:08:06 server2 cib: [6180]: info: validate_with_relaxng: Creating RNG parser context
Jun  5 17:08:06 server2 cib: [6180]: info: startCib: CIB Initialization completed successfully
Jun  5 17:08:06 server2 cib: [6180]: info: get_cluster_type: Cluster type is: 'openais'
Jun  5 17:08:06 server2 cib: [6180]: notice: crm_cluster_connect: Connecting to cluster infrastructure: classic openais (with plugin)
Jun  5 17:08:06 server2 cib: [6180]: info: init_ais_connection_classic: Creating connection to our Corosync plugin
Jun  5 17:08:06 server2 lrmd: [6181]: info: enabling coredumps
Jun  5 17:08:06 server2 lrmd: [6181]: info: Started.
Jun  5 17:08:06 server2 attrd: [6182]: info: Invoked: /usr/lib/heartbeat/attrd 
Jun  5 17:08:06 server2 attrd: [6182]: notice: crm_cluster_connect: Connecting to cluster infrastructure: classic openais (with plugin)
Jun  5 17:08:06 server2 pengine: [6183]: info: Invoked: /usr/lib/heartbeat/pengine 
Jun  5 17:08:06 server2 crmd: [6184]: info: Invoked: /usr/lib/heartbeat/crmd 
Jun  5 17:08:06 server2 crmd: [6184]: info: crm_log_init_worker: Changed active directory to /var/lib/heartbeat/cores/hacluster
Jun  5 17:08:06 server2 crmd: [6184]: info: main: CRM Hg Version: 9971ebba4494012a93c03b40a2c58ec0eb60f50c
Jun  5 17:08:06 server2 crmd: [6184]: info: crmd_init: Starting crmd
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_startup: Maximum core file size is: 18446744073709551615
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_startup: Service: 9
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_startup: Local hostname: server2
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_update_nodeid: Local node id: 33554442
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Creating entry for node 33554442 born on 0
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: 0x1c97710 Node 33554442 now known as server2 (was: (null))
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server2 now has 1 quorum votes (was 0)
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node 33554442/server2 is now: member
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6179 for process stonith-ng
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6180 for process cib
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6181 for process lrmd
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6182 for process attrd
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6183 for process pengine
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: spawn_child: Forked child 6184 for process crmd
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: Pacemaker Cluster Manager 1.1.6
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync extended virtual synchrony service
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync configuration service
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync cluster closed process group service v1.01
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync cluster config database access v1.01
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync profile loading service
Jun  5 17:08:06 server2 corosync[6171]:   [SERV  ] Service engine loaded: corosync cluster quorum service v0.1
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Compatibility mode set to whitetank.  Using V1 and V2 of the synchronization engine.
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Transitional membership event on ring 120: memb=0, new=0, lost=0
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Stable membership event on ring 120: memb=1, new=1, lost=0
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: NEW:  server2 33554442
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: server2 33554442
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server2 now has process list: 00000000000000000000000000111312 (1118994)
Jun  5 17:08:06 server2 corosync[6171]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Jun  5 17:08:06 server2 corosync[6171]:   [CPG   ] chosen downlist: sender r(0) ip(10.0.0.2) ; members(old:0 left:0)
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Completed service synchronization, ready to provide service.
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Recorded connection 0x1ca9a20 for attrd/6182
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: init_ais_connection_classic: AIS connection established
Jun  5 17:08:06 server2 cib: [6180]: info: init_ais_connection_classic: AIS connection established
Jun  5 17:08:06 server2 attrd: [6182]: notice: main: Starting mainloop...
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Recorded connection 0x1ca0ec0 for stonith-ng/6179
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Recorded connection 0x1ca5220 for cib/6180
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Sending membership update 120 to cib
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Transitional membership event on ring 924: memb=1, new=0, lost=0
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: memb: server2 33554442
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Stable membership event on ring 924: memb=2, new=1, lost=0
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Creating entry for node 16777226 born on 924
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node 16777226/unknown is now: member
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: NEW:  .pending. 16777226
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: .pending. 16777226
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: server2 33554442
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 924 to 1 children
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: 0x1c97710 Node 33554442 ((null)) born on: 924
Jun  5 17:08:06 server2 corosync[6171]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: get_ais_nodeid: Server details: id=33554442 uname=server2 cname=pcmk
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: init_ais_connection_once: Connection to 'classic openais (with plugin)': established
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: crm_new_peer: Node server2 now has id: 33554442
Jun  5 17:08:06 server2 cib: [6180]: info: get_ais_nodeid: Server details: id=33554442 uname=server2 cname=pcmk
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: crm_new_peer: Node 33554442 is now known as server2
Jun  5 17:08:06 server2 cib: [6180]: info: init_ais_connection_once: Connection to 'classic openais (with plugin)': established
Jun  5 17:08:06 server2 stonith-ng: [6179]: info: main: Starting stonith-ng mainloop
Jun  5 17:08:06 server2 cib: [6180]: info: crm_new_peer: Node server2 now has id: 33554442
Jun  5 17:08:06 server2 cib: [6180]: info: crm_new_peer: Node 33554442 is now known as server2
Jun  5 17:08:06 server2 cib: [6180]: info: cib_init: Starting cib mainloop
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: 0x1c9dc20 Node 16777226 (server1) born on: 116
Jun  5 17:08:06 server2 cib: [6180]: info: ais_dispatch_message: Membership 120: quorum still lost
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: 0x1c9dc20 Node 16777226 now known as server1 (was: (null))
Jun  5 17:08:06 server2 cib: [6180]: info: crm_update_peer: Node server2: id=33554442 state=member (new) addr=r(0) ip(10.0.0.2)  (new) votes=1 (new) born=0 seen=120 proc=00000000000000000000000000111312 (new)
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server1 now has process list: 00000000000000000000000000111312 (1118994)
Jun  5 17:08:06 server2 cib: [6180]: info: ais_dispatch_message: Membership 924: quorum still lost
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server1 now has 1 quorum votes (was 0)
Jun  5 17:08:06 server2 cib: [6180]: info: crm_new_peer: Node <null> now has id: 16777226
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 924 to 1 children
Jun  5 17:08:06 server2 cib: [6180]: info: crm_update_peer: Node (null): id=16777226 state=member (new) addr=r(0) ip(10.0.0.1)  votes=0 born=0 seen=924 proc=00000000000000000000000000000000
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] WARN: route_ais_message: Sending message to local.crmd failed: ipc delivery failed (rc=-2)
Jun  5 17:08:06 server2 cib: [6180]: info: crm_get_peer: Node 16777226 is now known as server1
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.40 -> 0.66.41 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.41 -> 0.66.42 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: notice: ais_dispatch_message: Membership 924: quorum acquired
Jun  5 17:08:06 server2 cib: [6180]: info: crm_update_peer: Node server1: id=16777226 state=member addr=r(0) ip(10.0.0.1)  votes=1 (new) born=116 seen=924 proc=00000000000000000000000000111312 (new)
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.42 -> 0.66.43 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.43 -> 0.66.44 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] WARN: route_ais_message: Sending message to local.crmd failed: ipc delivery failed (rc=-2)
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.44 -> 0.66.45 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.45 -> 0.66.46 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.46 -> 0.66.47 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.47 -> 0.66.48 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.48 -> 0.66.49 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 corosync[6171]:   [CPG   ] chosen downlist: sender r(0) ip(10.0.0.1) ; members(old:1 left:0)
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.49 -> 0.66.50 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 corosync[6171]:   [MAIN  ] Completed service synchronization, ready to provide service.
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.50 -> 0.66.51 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 cib: [6180]: WARN: cib_process_diff: Diff 0.66.51 -> 0.66.52 not applied to 0.67.0: current "epoch" is greater than required
Jun  5 17:08:06 server2 corosync[6171]:   [pcmk  ] WARN: route_ais_message: Sending message to local.crmd failed: ipc delivery failed (rc=-2)
Jun  5 17:08:07 server2 crmd: [6184]: info: do_cib_control: CIB connection established
Jun  5 17:08:07 server2 crmd: [6184]: info: get_cluster_type: Cluster type is: 'openais'
Jun  5 17:08:07 server2 crmd: [6184]: notice: crm_cluster_connect: Connecting to cluster infrastructure: classic openais (with plugin)
Jun  5 17:08:07 server2 crmd: [6184]: info: init_ais_connection_classic: Creating connection to our Corosync plugin
Jun  5 17:08:07 server2 crmd: [6184]: info: init_ais_connection_classic: AIS connection established
Jun  5 17:08:07 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Recorded connection 0x1cae840 for crmd/6184
Jun  5 17:08:07 server2 corosync[6171]:   [pcmk  ] info: pcmk_ipc: Sending membership update 924 to crmd
Jun  5 17:08:07 server2 crmd: [6184]: info: get_ais_nodeid: Server details: id=33554442 uname=server2 cname=pcmk
Jun  5 17:08:07 server2 crmd: [6184]: info: init_ais_connection_once: Connection to 'classic openais (with plugin)': established
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_new_peer: Node server2 now has id: 33554442
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_new_peer: Node 33554442 is now known as server2
Jun  5 17:08:07 server2 crmd: [6184]: info: ais_status_callback: status: server2 is now unknown
Jun  5 17:08:07 server2 crmd: [6184]: info: do_ha_control: Connected to the cluster
Jun  5 17:08:07 server2 crmd: [6184]: info: do_started: Delaying start, no membership data (0000000000100000)
Jun  5 17:08:07 server2 crmd: [6184]: info: crmd_init: Starting crmd's mainloop
Jun  5 17:08:07 server2 crmd: [6184]: notice: ais_dispatch_message: Membership 924: quorum acquired
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_new_peer: Node server1 now has id: 16777226
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_new_peer: Node 16777226 is now known as server1
Jun  5 17:08:07 server2 crmd: [6184]: info: ais_status_callback: status: server1 is now unknown
Jun  5 17:08:07 server2 crmd: [6184]: info: ais_status_callback: status: server1 is now member (was unknown)
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_update_peer: Node server1: id=16777226 state=member (new) addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=924 proc=00000000000000000000000000111312
Jun  5 17:08:07 server2 crmd: [6184]: notice: crmd_peer_update: Status update: Client server2/crmd now has status [online] (DC=<null>)
Jun  5 17:08:07 server2 crmd: [6184]: info: ais_status_callback: status: server2 is now member (was unknown)
Jun  5 17:08:07 server2 crmd: [6184]: info: crm_update_peer: Node server2: id=33554442 state=member (new) addr=r(0) ip(10.0.0.2)  (new) votes=1 (new) born=924 seen=924 proc=00000000000000000000000000111312 (new)
Jun  5 17:08:07 server2 crmd: [6184]: info: do_started: Delaying start, Config not read (0000000000000040)
Jun  5 17:08:07 server2 crmd: [6184]: info: config_query_callback: Shutdown escalation occurs after: 1200000ms
Jun  5 17:08:07 server2 crmd: [6184]: info: config_query_callback: Checking for expired actions every 900000ms
Jun  5 17:08:07 server2 crmd: [6184]: info: config_query_callback: Sending expected-votes=2 to corosync
Jun  5 17:08:07 server2 crmd: [6184]: info: do_started: The local CRM is operational
Jun  5 17:08:07 server2 crmd: [6184]: info: do_state_transition: State transition S_STARTING -> S_PENDING [ input=I_PENDING cause=C_FSA_INTERNAL origin=do_started ]
Jun  5 17:08:08 server2 crmd: [6184]: info: ais_dispatch_message: Membership 924: quorum retained
Jun  5 17:08:08 server2 crmd: [6184]: info: te_connect_stonith: Attempting connection to fencing daemon...
Jun  5 17:08:09 server2 crmd: [6184]: info: te_connect_stonith: Connected
Jun  5 17:08:09 server2 crmd: [6184]: info: update_dc: Set DC to server1 (3.0.5)
Jun  5 17:08:09 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_sync for section 'all' (origin=server1/crmd/64, version=0.67.0): ok (rc=0)
Jun  5 17:08:09 server2 crmd: [6184]: info: update_attrd: Connecting to attrd...
Jun  5 17:08:09 server2 crmd: [6184]: info: do_state_transition: State transition S_PENDING -> S_NOT_DC [ input=I_NOT_DC cause=C_HA_MESSAGE origin=do_cl_join_finalize_respond ]
Jun  5 17:08:09 server2 attrd: [6182]: notice: attrd_local_callback: Sending full refresh (origin=crmd)
Jun  5 17:08:09 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server2']/transient_attributes": ok (rc=0)
Jun  5 17:08:09 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=4:11:7:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=Clu-FS-DRBD:0_monitor_0 )
Jun  5 17:08:09 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 probe[2] (pid 6191)
Jun  5 17:08:09 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=5:11:7:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=Clu-FS-Mount:0_monitor_0 )
Jun  5 17:08:09 server2 lrmd: [6181]: info: rsc:Clu-FS-Mount:0 probe[3] (pid 6192)
Jun  5 17:08:09 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=6:11:7:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=xenwin7_monitor_0 )
Jun  5 17:08:09 server2 lrmd: [6181]: info: rsc:xenwin7 probe[4] (pid 6193)
Jun  5 17:08:09 server2 lrmd: [6181]: info: operation monitor[3] on Clu-FS-Mount:0 for client 6184: pid 6192 exited with return code 7
Jun  5 17:08:09 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-Mount:0_monitor_0 (call=3, rc=7, cib-update=7, confirmed=true) not running
Jun  5 17:08:09 server2 crm_attribute: [6292]: info: Invoked: crm_attribute -N server2 -n master-Clu-FS-DRBD:0 -l reboot -D 
Jun  5 17:08:09 server2 lrmd: [6181]: info: operation monitor[2] on Clu-FS-DRBD:0 for client 6184: pid 6191 exited with return code 7
Jun  5 17:08:09 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_monitor_0 (call=2, rc=7, cib-update=8, confirmed=true) not running
Jun  5 17:08:10 server2 lrmd: [6181]: info: operation monitor[4] on xenwin7 for client 6184: pid 6193 exited with return code 7
Jun  5 17:08:10 server2 crmd: [6184]: info: process_lrm_event: LRM operation xenwin7_monitor_0 (call=4, rc=7, cib-update=9, confirmed=true) not running
Jun  5 17:08:10 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 17:08:10 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=8:11:0:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=Clu-FS-DRBD:0_start_0 )
Jun  5 17:08:10 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 start[5] (pid 6310)
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout)         allow-two-primaries;
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout) 
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout) 
Jun  5 17:08:10 server2 kernel: [ 1442.834165] block drbd0: Starting worker thread (from drbdsetup [6357])
Jun  5 17:08:10 server2 kernel: [ 1442.834332] block drbd0: disk( Diskless -> Attaching ) 
Jun  5 17:08:10 server2 kernel: [ 1442.926012] block drbd0: Found 15 transactions (631 active extents) in activity log.
Jun  5 17:08:10 server2 kernel: [ 1442.926018] block drbd0: Method to ensure write ordering: flush
Jun  5 17:08:10 server2 kernel: [ 1442.926026] block drbd0: drbd_bm_resize called with capacity == 117182904
Jun  5 17:08:10 server2 kernel: [ 1442.926697] block drbd0: resync bitmap: bits=14647863 words=228873 pages=448
Jun  5 17:08:10 server2 kernel: [ 1442.926703] block drbd0: size = 56 GB (58591452 KB)
Jun  5 17:08:10 server2 kernel: [ 1443.140441] block drbd0: bitmap READ of 448 pages took 53 jiffies
Jun  5 17:08:10 server2 kernel: [ 1443.140872] block drbd0: recounting of set bits took additional 1 jiffies
Jun  5 17:08:10 server2 kernel: [ 1443.140877] block drbd0: 0 KB (0 bits) marked out-of-sync by on disk bit-map.
Jun  5 17:08:10 server2 kernel: [ 1443.140885] block drbd0: disk( Attaching -> Outdated ) 
Jun  5 17:08:10 server2 kernel: [ 1443.140889] block drbd0: attached to UUIDs 662B73A915BFB850:0000000000000000:70D77213B2E5F1C7:70D67213B2E5F1C7
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout) 
Jun  5 17:08:10 server2 kernel: [ 1443.179930] block drbd0: conn( StandAlone -> Unconnected ) 
Jun  5 17:08:10 server2 kernel: [ 1443.179954] block drbd0: Starting receiver thread (from drbd0_worker [6359])
Jun  5 17:08:10 server2 kernel: [ 1443.180042] block drbd0: receiver (re)started
Jun  5 17:08:10 server2 kernel: [ 1443.180051] block drbd0: conn( Unconnected -> WFConnection ) 
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout) 
Jun  5 17:08:10 server2 crm_attribute: [6388]: info: Invoked: crm_attribute -N server2 -n master-Clu-FS-DRBD:0 -l reboot -D 
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:start:stdout) 
Jun  5 17:08:10 server2 lrmd: [6181]: info: operation start[5] on Clu-FS-DRBD:0 for client 6184: pid 6310 exited with return code 0
Jun  5 17:08:10 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_start_0 (call=5, rc=0, cib-update=10, confirmed=true) ok
Jun  5 17:08:10 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=48:11:0:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:10 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[6] (pid 6396)
Jun  5 17:08:10 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:notify:stdout) 
Jun  5 17:08:10 server2 lrmd: [6181]: info: operation notify[6] on Clu-FS-DRBD:0 for client 6184: pid 6396 exited with return code 0
Jun  5 17:08:10 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 48:11:0:6597b448-a806-4a0b-91bd-19a1c5c270f7: lrm_invoke-lrmd-1401968290-4
Jun  5 17:08:10 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=6, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:10 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=9:11:0:6597b448-a806-4a0b-91bd-19a1c5c270f7 op=Clu-FS-DRBD:0_monitor_41000 )
Jun  5 17:08:10 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 monitor[7] (pid 6426)
Jun  5 17:08:10 server2 crm_attribute: [6453]: info: Invoked: crm_attribute -N server2 -n master-Clu-FS-DRBD:0 -l reboot -D 
Jun  5 17:08:10 server2 lrmd: [6181]: info: operation monitor[7] on Clu-FS-DRBD:0 for client 6184: pid 6426 exited with return code 0
Jun  5 17:08:10 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_monitor_41000 (call=7, rc=0, cib-update=11, confirmed=false) ok
Jun  5 17:08:11 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 17:08:11 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 4: probe_complete=true
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server1 now has process list: 00000000000000000000000000000002 (2)
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 924 to 2 children
Jun  5 17:08:16 server2 cib: [6180]: info: ais_dispatch_message: Membership 924: quorum retained
Jun  5 17:08:16 server2 cib: [6180]: info: crm_update_peer: Node server1: id=16777226 state=member addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=924 proc=00000000000000000000000000000002 (new)
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_dispatch_message: Membership 924: quorum retained
Jun  5 17:08:16 server2 crmd: [6184]: notice: crmd_peer_update: Status update: Client server1/crmd now has status [offline] (DC=server1)
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_peer_update: Got client status callback - our DC is dead
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_peer: Node server1: id=16777226 state=member addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=924 proc=00000000000000000000000000000002 (new)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_NOT_DC -> S_ELECTION [ input=I_ELECTION cause=C_CRMD_STATUS_CALLBACK origin=crmd_peer_update ]
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Unset DC server1
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_ELECTION -> S_INTEGRATION [ input=I_ELECTION_DC cause=C_FSA_INTERNAL origin=do_election_check ]
Jun  5 17:08:16 server2 crmd: [6184]: info: do_te_control: Registering TE UUID: e61e91a0-cc68-4093-9c39-b3be0421856b
Jun  5 17:08:16 server2 crmd: [6184]: info: set_graph_functions: Setting custom graph functions
Jun  5 17:08:16 server2 crmd: [6184]: info: unpack_graph: Unpacked transition -1: 0 actions in 0 synapses
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_takeover: Taking over DC status for this partition
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_readwrite: We are now in R/W mode
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_master for section 'all' (origin=local/crmd/12, version=0.67.27): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/13, version=0.67.28): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/15, version=0.67.29): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: join_make_offer: Making join offers based on membership 924
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/17, version=0.67.30): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: join_make_offer: Peer process on server1 is not active (yet?): 00000002 2
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_offer_all: join-1: Waiting on 1 outstanding join acks
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_dispatch_message: Membership 924: quorum retained
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Set DC to server2 (3.0.5)
Jun  5 17:08:16 server2 crmd: [6184]: info: config_query_callback: Shutdown escalation occurs after: 1200000ms
Jun  5 17:08:16 server2 crmd: [6184]: info: config_query_callback: Checking for expired actions every 900000ms
Jun  5 17:08:16 server2 crmd: [6184]: info: config_query_callback: Sending expected-votes=2 to corosync
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/20, version=0.67.31): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_dispatch_message: Membership 924: quorum retained
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 17:08:16 server2 crmd: [6184]: notice: ais_dispatch_message: Membership 928: quorum lost
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_status_callback: status: server1 is now lost (was member)
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_peer: Node server1: id=16777226 state=lost (new) addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=924 proc=00000000000000000000000000000002
Jun  5 17:08:16 server2 crmd: [6184]: info: check_join_state: check_dead_member: Membership changed since join started: 924 -> 928
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Transitional membership event on ring 928: memb=1, new=0, lost=1
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: memb: server2 33554442
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: lost: server1 16777226
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Stable membership event on ring 928: memb=1, new=0, lost=0
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: server2 33554442
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: ais_mark_unseen_peer_dead: Node server1 was not seen in the previous transition
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: update_member: Node 16777226/server1 is now: lost
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 928 to 2 children
Jun  5 17:08:16 server2 corosync[6171]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Jun  5 17:08:16 server2 corosync[6171]:   [CPG   ] chosen downlist: sender r(0) ip(10.0.0.2) ; members(old:2 left:1)
Jun  5 17:08:16 server2 corosync[6171]:   [MAIN  ] Completed service synchronization, ready to provide service.
Jun  5 17:08:16 server2 cib: [6180]: notice: ais_dispatch_message: Membership 928: quorum lost
Jun  5 17:08:16 server2 cib: [6180]: info: crm_update_peer: Node server1: id=16777226 state=lost (new) addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=924 proc=00000000000000000000000000000002
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/23, version=0.67.32): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_quorum: Updating quorum status to false (call=26)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/24, version=0.67.33): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/26, version=0.67.35): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Unset DC server2
Jun  5 17:08:16 server2 crmd: [6184]: info: join_make_offer: Making join offers based on membership 928
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/28, version=0.67.36): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_offer_all: join-2: Waiting on 1 outstanding join acks
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Set DC to server2 (3.0.5)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_INTEGRATION -> S_FINALIZE_JOIN [ input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: All 1 cluster nodes responded to the join offer.
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_finalize: join-2: Syncing the CIB from server2 to the rest of the cluster
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_sync for section 'all' (origin=local/crmd/30, version=0.67.36): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/31, version=0.67.37): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_ack: join-2: Updating node state to member for server2
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server2']/lrm (origin=local/crmd/32, version=0.67.38): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server2']/lrm": ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE [ input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: All 1 cluster nodes are eligible to run resources.
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_final: Ensuring DC, quorum and node attributes are up-to-date
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_quorum: Updating quorum status to false (call=36)
Jun  5 17:08:16 server2 crmd: [6184]: info: abort_transition_graph: do_te_invoke:167 - Triggered transition abort (complete=1) : Peer Cancelled
Jun  5 17:08:16 server2 crmd: [6184]: info: do_pe_invoke: Query 37: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:16 server2 attrd: [6182]: notice: attrd_local_callback: Sending full refresh (origin=crmd)
Jun  5 17:08:16 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/34, version=0.67.40): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: WARN: match_down_event: No match for shutdown action on server1
Jun  5 17:08:16 server2 crmd: [6184]: info: te_update_diff: Stonith/shutdown of server1 not matched
Jun  5 17:08:16 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:215 - Triggered transition abort (complete=1, tag=node_state, id=server1, magic=NA, cib=0.67.41) : Node failure
Jun  5 17:08:16 server2 crmd: [6184]: info: do_pe_invoke: Query 38: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/36, version=0.67.42): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=38, ref=pe_calc-dc-1401968296-14, seq=928, quorate=0
Jun  5 17:08:16 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:16 server2 pengine: [6183]: notice: common_apply_stickiness: Clu-FS-DRBD-Master can fail 999999 more times on server1 before being forced off
Jun  5 17:08:16 server2 pengine: [6183]: notice: common_apply_stickiness: Clu-FS-DRBD-Master can fail 999999 more times on server1 before being forced off
Jun  5 17:08:16 server2 pengine: [6183]: WARN: common_apply_stickiness: Forcing xenwin7 away from server1 after 1000000 failures (max=1000000)
Jun  5 17:08:16 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Slave server2)
Jun  5 17:08:16 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Stopped)
Jun  5 17:08:16 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Stopped)
Jun  5 17:08:16 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:08:16 server2 pengine: [6183]: notice: LogActions: Leave   xenwin7#011(Stopped)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:16 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 0: 1 actions in 1 synapses
Jun  5 17:08:16 server2 crmd: [6184]: info: do_te_invoke: Processing graph 0 (ref=pe_calc-dc-1401968296-14) derived from /var/lib/pengine/pe-input-48.bz2
Jun  5 17:08:16 server2 crmd: [6184]: info: te_rsc_command: Initiating action 4: probe_complete probe_complete on server2 (local) - no waiting
Jun  5 17:08:16 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:16 server2 crmd: [6184]: notice: run_graph: Transition 0 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-48.bz2): Complete
Jun  5 17:08:16 server2 crmd: [6184]: info: te_graph_trigger: Transition 0 is now complete
Jun  5 17:08:16 server2 crmd: [6184]: info: notify_crmd: Transition 0 status: done - <null>
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: Starting PEngine Recheck Timer
Jun  5 17:08:16 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 7: probe_complete=true
Jun  5 17:08:16 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 17:08:16 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 9: probe_complete=true
Jun  5 17:08:16 server2 pengine: [6183]: notice: process_pe_message: Transition 0: PEngine Input stored in: /var/lib/pengine/pe-input-48.bz2
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Transitional membership event on ring 932: memb=1, new=0, lost=0
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: memb: server2 33554442
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] notice: pcmk_peer_update: Stable membership event on ring 932: memb=2, new=1, lost=0
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: update_member: Node 16777226/server1 is now: member
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: NEW:  server1 16777226
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: server1 16777226
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: pcmk_peer_update: MEMB: server2 33554442
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 932 to 2 children
Jun  5 17:08:16 server2 corosync[6171]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Jun  5 17:08:16 server2 cib: [6180]: notice: ais_dispatch_message: Membership 932: quorum acquired
Jun  5 17:08:16 server2 cib: [6180]: info: crm_update_peer: Node server1: id=16777226 state=member (new) addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=932 proc=00000000000000000000000000000002
Jun  5 17:08:16 server2 crmd: [6184]: notice: ais_dispatch_message: Membership 932: quorum acquired
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_status_callback: status: server1 is now member (was lost)
Jun  5 17:08:16 server2 crmd: [6184]: WARN: match_down_event: No match for shutdown action on server1
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_peer: Node server1: id=16777226 state=member (new) addr=r(0) ip(10.0.0.1)  votes=1 born=116 seen=932 proc=00000000000000000000000000000002
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server1']/lrm (origin=local/crmd/39, version=0.67.45): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_quorum: Updating quorum status to true (call=43)
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: update_member: 0x1c9dc20 Node 16777226 (server1) born on: 932
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: update_member: Node server1 now has process list: 00000000000000000000000000111312 (1118994)
Jun  5 17:08:16 server2 corosync[6171]:   [pcmk  ] info: send_member_notification: Sending membership update 932 to 2 children
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server1']/transient_attributes (origin=local/crmd/40, version=0.67.46): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/41, version=0.67.47): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/43, version=0.67.49): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: ais_dispatch_message: Membership 932: quorum retained
Jun  5 17:08:16 server2 cib: [6180]: info: crm_update_peer: Node server1: id=16777226 state=member addr=r(0) ip(10.0.0.1)  votes=1 born=932 seen=932 proc=00000000000000000000000000111312 (new)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_sync_one for section 'all' (origin=server1/server1/(null), version=0.67.49): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 17:08:16 server2 crmd: [6184]: info: ais_dispatch_message: Membership 932: quorum retained
Jun  5 17:08:16 server2 crmd: [6184]: notice: crmd_peer_update: Status update: Client server1/crmd now has status [online] (DC=true)
Jun  5 17:08:16 server2 crmd: [6184]: info: crm_update_peer: Node server1: id=16777226 state=member addr=r(0) ip(10.0.0.1)  votes=1 born=932 seen=932 proc=00000000000000000000000000111312 (new)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/45, version=0.67.50): ok (rc=0)
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/47, version=0.67.52): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: crmd_ais_dispatch: Setting expected votes to 2
Jun  5 17:08:16 server2 crmd: [6184]: info: do_state_transition: State transition S_IDLE -> S_INTEGRATION [ input=I_NODE_JOIN cause=C_FSA_INTERNAL origin=crmd_peer_update ]
Jun  5 17:08:16 server2 crmd: [6184]: info: abort_transition_graph: do_te_invoke:175 - Triggered transition abort (complete=1) : Peer Halt
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Unset DC server2
Jun  5 17:08:16 server2 crmd: [6184]: info: join_make_offer: Making join offers based on membership 932
Jun  5 17:08:16 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section crm_config (origin=local/crmd/50, version=0.67.54): ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: do_dc_join_offer_all: join-3: Waiting on 2 outstanding join acks
Jun  5 17:08:16 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server1']/lrm": ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server1']/transient_attributes": ok (rc=0)
Jun  5 17:08:16 server2 crmd: [6184]: info: update_dc: Set DC to server2 (3.0.5)
Jun  5 17:08:16 server2 corosync[6171]:   [CPG   ] chosen downlist: sender r(0) ip(10.0.0.1) ; members(old:1 left:0)
Jun  5 17:08:16 server2 corosync[6171]:   [MAIN  ] Completed service synchronization, ready to provide service.
Jun  5 17:08:18 server2 crmd: [6184]: info: update_dc: Unset DC server2
Jun  5 17:08:18 server2 crmd: [6184]: info: do_dc_join_offer_all: A new node joined the cluster
Jun  5 17:08:18 server2 crmd: [6184]: info: do_dc_join_offer_all: join-4: Waiting on 2 outstanding join acks
Jun  5 17:08:18 server2 crmd: [6184]: info: update_dc: Set DC to server2 (3.0.5)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_state_transition: State transition S_INTEGRATION -> S_FINALIZE_JOIN [ input=I_INTEGRATED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 17:08:19 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes responded to the join offer.
Jun  5 17:08:19 server2 crmd: [6184]: info: do_dc_join_finalize: join-4: Syncing the CIB from server2 to the rest of the cluster
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_sync for section 'all' (origin=local/crmd/53, version=0.67.54): ok (rc=0)
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/54, version=0.67.55): ok (rc=0)
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/55, version=0.67.56): ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_dc_join_ack: join-4: Updating node state to member for server2
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server2']/lrm (origin=local/crmd/56, version=0.67.57): ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server2']/lrm": ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_dc_join_ack: join-4: Updating node state to member for server1
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server1']/transient_attributes (origin=server1/crmd/6, version=0.67.59): ok (rc=0)
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section //node_state[@uname='server1']/lrm (origin=local/crmd/58, version=0.67.60): ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: erase_xpath_callback: Deletion of "//node_state[@uname='server1']/lrm": ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_state_transition: State transition S_FINALIZE_JOIN -> S_POLICY_ENGINE [ input=I_FINALIZED cause=C_FSA_INTERNAL origin=check_join_state ]
Jun  5 17:08:19 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:19 server2 crmd: [6184]: info: do_dc_join_final: Ensuring DC, quorum and node attributes are up-to-date
Jun  5 17:08:19 server2 crmd: [6184]: info: crm_update_quorum: Updating quorum status to true (call=62)
Jun  5 17:08:19 server2 crmd: [6184]: info: abort_transition_graph: do_te_invoke:167 - Triggered transition abort (complete=1) : Peer Cancelled
Jun  5 17:08:19 server2 crmd: [6184]: info: do_pe_invoke: Query 63: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:19 server2 attrd: [6182]: notice: attrd_local_callback: Sending full refresh (origin=crmd)
Jun  5 17:08:19 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: probe_complete (true)
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section nodes (origin=local/crmd/60, version=0.67.62): ok (rc=0)
Jun  5 17:08:19 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_modify for section cib (origin=local/crmd/62, version=0.67.64): ok (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=63, ref=pe_calc-dc-1401968299-27, seq=932, quorate=1
Jun  5 17:08:19 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:19 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (41s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:19 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (41s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:19 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Slave server2)
Jun  5 17:08:19 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-DRBD:1#011(server1)
Jun  5 17:08:19 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Stopped)
Jun  5 17:08:19 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:08:19 server2 pengine: [6183]: notice: LogActions: Leave   xenwin7#011(Stopped)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:19 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 1: 16 actions in 16 synapses
Jun  5 17:08:19 server2 crmd: [6184]: info: do_te_invoke: Processing graph 1 (ref=pe_calc-dc-1401968299-27) derived from /var/lib/pengine/pe-input-49.bz2
Jun  5 17:08:19 server2 crmd: [6184]: info: te_rsc_command: Initiating action 6: monitor Clu-FS-DRBD:1_monitor_0 on server1
Jun  5 17:08:19 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 16 fired and confirmed
Jun  5 17:08:19 server2 crmd: [6184]: info: te_rsc_command: Initiating action 7: monitor Clu-FS-Mount:0_monitor_0 on server1
Jun  5 17:08:19 server2 crmd: [6184]: info: te_rsc_command: Initiating action 8: monitor xenwin7_monitor_0 on server1
Jun  5 17:08:19 server2 crmd: [6184]: info: te_rsc_command: Initiating action 49: notify Clu-FS-DRBD:0_pre_notify_start_0 on server2 (local)
Jun  5 17:08:19 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=49:1:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:19 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[8] (pid 6455)
Jun  5 17:08:19 server2 pengine: [6183]: notice: process_pe_message: Transition 1: PEngine Input stored in: /var/lib/pengine/pe-input-49.bz2
Jun  5 17:08:19 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-Mount:0_monitor_0 (7) confirmed on server1 (rc=0)
Jun  5 17:08:19 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:notify:stdout) 
Jun  5 17:08:19 server2 lrmd: [6181]: info: operation notify[8] on Clu-FS-DRBD:0 for client 6184: pid 6455 exited with return code 0
Jun  5 17:08:19 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 49:1:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968299-32
Jun  5 17:08:19 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968299-32 from server2
Jun  5 17:08:19 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (49) confirmed on server2 (rc=0)
Jun  5 17:08:19 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=8, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:19 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 17 fired and confirmed
Jun  5 17:08:19 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 14 fired and confirmed
Jun  5 17:08:19 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_monitor_0 (6) confirmed on server1 (rc=0)
Jun  5 17:08:20 server2 crmd: [6184]: info: match_graph_event: Action xenwin7_monitor_0 (8) confirmed on server1 (rc=0)
Jun  5 17:08:20 server2 crmd: [6184]: info: te_rsc_command: Initiating action 5: probe_complete probe_complete on server1 - no waiting
Jun  5 17:08:20 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 3 fired and confirmed
Jun  5 17:08:20 server2 crmd: [6184]: info: te_rsc_command: Initiating action 12: start Clu-FS-DRBD:1_start_0 on server1
Jun  5 17:08:20 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_start_0 (12) confirmed on server1 (rc=0)
Jun  5 17:08:20 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 15 fired and confirmed
Jun  5 17:08:20 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 18 fired and confirmed
Jun  5 17:08:20 server2 crmd: [6184]: info: te_rsc_command: Initiating action 50: notify Clu-FS-DRBD:0_post_notify_start_0 on server2 (local)
Jun  5 17:08:20 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=50:1:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:20 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[9] (pid 6485)
Jun  5 17:08:20 server2 crmd: [6184]: info: te_rsc_command: Initiating action 51: notify Clu-FS-DRBD:1_post_notify_start_0 on server1
Jun  5 17:08:20 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968300-4 from server1
Jun  5 17:08:20 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (51) confirmed on server1 (rc=0)
Jun  5 17:08:20 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:notify:stdout) 
Jun  5 17:08:20 server2 lrmd: [6181]: info: operation notify[9] on Clu-FS-DRBD:0 for client 6184: pid 6485 exited with return code 0
Jun  5 17:08:20 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 50:1:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968300-37
Jun  5 17:08:20 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968300-37 from server2
Jun  5 17:08:20 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (50) confirmed on server2 (rc=0)
Jun  5 17:08:20 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=9, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:20 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 19 fired and confirmed
Jun  5 17:08:20 server2 crmd: [6184]: info: te_rsc_command: Initiating action 13: monitor Clu-FS-DRBD:1_monitor_41000 on server1
Jun  5 17:08:20 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_monitor_41000 (13) confirmed on server1 (rc=0)
Jun  5 17:08:20 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:20 server2 crmd: [6184]: notice: run_graph: Transition 1 (Complete=16, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-49.bz2): Complete
Jun  5 17:08:20 server2 crmd: [6184]: info: te_graph_trigger: Transition 1 is now complete
Jun  5 17:08:20 server2 crmd: [6184]: info: notify_crmd: Transition 1 status: done - <null>
Jun  5 17:08:20 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:20 server2 crmd: [6184]: info: do_state_transition: Starting PEngine Recheck Timer
Jun  5 17:08:21 server2 kernel: [ 1453.592775] block drbd0: Handshake successful: Agreed network protocol version 96
Jun  5 17:08:21 server2 kernel: [ 1453.592956] block drbd0: Peer authenticated using 20 bytes of 'sha1' HMAC
Jun  5 17:08:21 server2 kernel: [ 1453.592967] block drbd0: conn( WFConnection -> WFReportParams ) 
Jun  5 17:08:21 server2 kernel: [ 1453.593018] block drbd0: Starting asender thread (from drbd0_receiver [6380])
Jun  5 17:08:21 server2 kernel: [ 1453.593211] block drbd0: data-integrity-alg: <not-used>
Jun  5 17:08:21 server2 kernel: [ 1453.593233] block drbd0: drbd_sync_handshake:
Jun  5 17:08:21 server2 kernel: [ 1453.593239] block drbd0: self 662B73A915BFB850:0000000000000000:70D77213B2E5F1C7:70D67213B2E5F1C7 bits:0 flags:0
Jun  5 17:08:21 server2 kernel: [ 1453.593244] block drbd0: peer 86EC1E704CE1074A:662B73A915BFB851:70D77213B2E5F1C6:70D67213B2E5F1C7 bits:1 flags:0
Jun  5 17:08:21 server2 kernel: [ 1453.593248] block drbd0: uuid_compare()=-1 by rule 50
Jun  5 17:08:21 server2 kernel: [ 1453.593256] block drbd0: peer( Unknown -> Secondary ) conn( WFReportParams -> WFBitMapT ) pdsk( DUnknown -> UpToDate ) 
Jun  5 17:08:21 server2 kernel: [ 1453.629455] block drbd0: conn( WFBitMapT -> WFSyncUUID ) 
Jun  5 17:08:21 server2 kernel: [ 1453.636616] block drbd0: updated sync uuid 662C73A915BFB850:0000000000000000:70D77213B2E5F1C7:70D67213B2E5F1C7
Jun  5 17:08:21 server2 kernel: [ 1453.641317] block drbd0: helper command: /sbin/drbdadm before-resync-target minor-0
Jun  5 17:08:21 server2 kernel: [ 1453.646581] block drbd0: helper command: /sbin/drbdadm before-resync-target minor-0 exit code 0 (0x0)
Jun  5 17:08:21 server2 kernel: [ 1453.646588] block drbd0: conn( WFSyncUUID -> SyncTarget ) disk( Outdated -> Inconsistent ) 
Jun  5 17:08:21 server2 kernel: [ 1453.646598] block drbd0: Began resync as SyncTarget (will sync 4 KB [1 bits set]).
Jun  5 17:08:21 server2 kernel: [ 1453.666358] block drbd0: Resync done (total 1 sec; paused 0 sec; 4 K/sec)
Jun  5 17:08:21 server2 kernel: [ 1453.666365] block drbd0: 0 % had equal checksums, eliminated: 0K; transferred 4K total 4K
Jun  5 17:08:21 server2 kernel: [ 1453.666371] block drbd0: updated UUIDs 86EC1E704CE1074A:0000000000000000:662C73A915BFB850:662B73A915BFB851
Jun  5 17:08:21 server2 kernel: [ 1453.666378] block drbd0: conn( SyncTarget -> Connected ) disk( Inconsistent -> UpToDate ) 
Jun  5 17:08:21 server2 kernel: [ 1453.671325] block drbd0: helper command: /sbin/drbdadm after-resync-target minor-0
Jun  5 17:08:21 server2 crm-unfence-peer.sh[6523]: invoked for r0
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: - <cib admin_epoch="0" epoch="67" num_updates="71" >
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -   <configuration >
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -     <constraints >
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -       <rsc_location rsc="Clu-FS-DRBD-Master" id="drbd-fence-by-handler-Clu-FS-DRBD-Master" __crm_diff_marker__="removed:top" >
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -         <rule role="Master" score="-INFINITY" id="drbd-fence-by-handler-rule-Clu-FS-DRBD-Master" >
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -           <expression attribute="#uname" operation="ne" value="server2" id="drbd-fence-by-handler-expr-Clu-FS-DRBD-Master" />
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -         </rule>
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -       </rsc_location>
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -     </constraints>
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: -   </configuration>
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: - </cib>
Jun  5 17:08:21 server2 cib: [6180]: info: cib:diff: + <cib epoch="68" num_updates="1" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.5" update-origin="server2" update-client="cibadmin" cib-last-written="Thu Jun  5 16:50:29 2014" have-quorum="1" dc-uuid="server2" />
Jun  5 17:08:21 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:124 - Triggered transition abort (complete=1, tag=diff, id=(null), magic=NA, cib=0.68.1) : Non-status change
Jun  5 17:08:21 server2 cib: [6180]: info: cib_process_request: Operation complete: op cib_delete for section 'all' (origin=local/cibadmin/2, version=0.68.1): ok (rc=0)
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph ]
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:21 server2 kernel: [ 1453.741765] block drbd0: helper command: /sbin/drbdadm after-resync-target minor-0 exit code 0 (0x0)
Jun  5 17:08:21 server2 crmd: [6184]: info: do_pe_invoke: Query 64: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:21 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=64, ref=pe_calc-dc-1401968301-39, seq=932, quorate=1
Jun  5 17:08:21 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Slave server2)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Slave server1)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Stopped)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   xenwin7#011(Stopped)
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:21 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 2: 1 actions in 1 synapses
Jun  5 17:08:21 server2 crmd: [6184]: info: do_te_invoke: Processing graph 2 (ref=pe_calc-dc-1401968301-39) derived from /var/lib/pengine/pe-input-50.bz2
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 6: probe_complete probe_complete on server1 - no waiting
Jun  5 17:08:21 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:21 server2 crmd: [6184]: notice: run_graph: Transition 2 (Complete=1, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-50.bz2): Complete
Jun  5 17:08:21 server2 crmd: [6184]: info: te_graph_trigger: Transition 2 is now complete
Jun  5 17:08:21 server2 crmd: [6184]: info: notify_crmd: Transition 2 status: done - <null>
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: Starting PEngine Recheck Timer
Jun  5 17:08:21 server2 kernel: [ 1453.845114] block drbd0: bitmap WRITE of 447 pages took 26 jiffies
Jun  5 17:08:21 server2 kernel: [ 1453.845122] block drbd0: 0 KB (0 bits) marked out-of-sync by on disk bit-map.
Jun  5 17:08:21 server2 pengine: [6183]: notice: process_pe_message: Transition 2: PEngine Input stored in: /var/lib/pengine/pe-input-50.bz2
Jun  5 17:08:21 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=1, tag=nvpair, id=status-server1-master-Clu-FS-DRBD.1, name=master-Clu-FS-DRBD:1, value=10000, magic=NA, cib=0.68.3) : Transient attribute: update
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: State transition S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=abort_transition_graph ]
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:21 server2 crmd: [6184]: info: do_pe_invoke: Query 65: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:21 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=65, ref=pe_calc-dc-1401968301-41, seq=932, quorate=1
Jun  5 17:08:21 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:21 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:21 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:21 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:0 on server1
Jun  5 17:08:21 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for xenwin7 on server1
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Slave server2)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Promote Clu-FS-DRBD:1#011(Slave -> Master server1)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:0#011(server1)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:1#011(Stopped)
Jun  5 17:08:21 server2 pengine: [6183]: notice: LogActions: Start   xenwin7#011(server1)
Jun  5 17:08:21 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:21 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 3: 20 actions in 20 synapses
Jun  5 17:08:21 server2 crmd: [6184]: info: do_te_invoke: Processing graph 3 (ref=pe_calc-dc-1401968301-41) derived from /var/lib/pengine/pe-input-51.bz2
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 2: cancel Clu-FS-DRBD:1_monitor_41000 on server1
Jun  5 17:08:21 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 29 fired and confirmed
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 45: start xenwin7_start_0 on server1
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 6: probe_complete probe_complete on server1 - no waiting
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 59: notify Clu-FS-DRBD:0_pre_notify_promote_0 on server2 (local)
Jun  5 17:08:21 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=59:3:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:21 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[10] (pid 6538)
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 61: notify Clu-FS-DRBD:1_pre_notify_promote_0 on server1
Jun  5 17:08:21 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968301-5 from server1
Jun  5 17:08:21 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_monitor_41000 (2) confirmed on server1 (rc=0)
Jun  5 17:08:21 server2 pengine: [6183]: notice: process_pe_message: Transition 3: PEngine Input stored in: /var/lib/pengine/pe-input-51.bz2
Jun  5 17:08:21 server2 lrmd: [6181]: info: operation notify[10] on Clu-FS-DRBD:0 for client 6184: pid 6538 exited with return code 0
Jun  5 17:08:21 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 59:3:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968301-47
Jun  5 17:08:21 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968301-47 from server2
Jun  5 17:08:21 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (59) confirmed on server2 (rc=0)
Jun  5 17:08:21 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=10, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:21 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968301-6 from server1
Jun  5 17:08:21 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (61) confirmed on server1 (rc=0)
Jun  5 17:08:21 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 30 fired and confirmed
Jun  5 17:08:21 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 27 fired and confirmed
Jun  5 17:08:21 server2 crmd: [6184]: info: te_rsc_command: Initiating action 13: promote Clu-FS-DRBD:1_promote_0 on server1
Jun  5 17:08:22 server2 kernel: [ 1454.592876] block drbd0: peer( Secondary -> Primary ) 
Jun  5 17:08:22 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_promote_0 (13) confirmed on server1 (rc=0)
Jun  5 17:08:22 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 28 fired and confirmed
Jun  5 17:08:22 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 31 fired and confirmed
Jun  5 17:08:22 server2 crmd: [6184]: info: te_rsc_command: Initiating action 60: notify Clu-FS-DRBD:0_post_notify_promote_0 on server2 (local)
Jun  5 17:08:22 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=60:3:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:22 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[11] (pid 6561)
Jun  5 17:08:22 server2 crmd: [6184]: info: te_rsc_command: Initiating action 62: notify Clu-FS-DRBD:1_post_notify_promote_0 on server1
Jun  5 17:08:22 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: master-Clu-FS-DRBD:0 (10000)
Jun  5 17:08:22 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 23: master-Clu-FS-DRBD:0=10000
Jun  5 17:08:22 server2 lrmd: [6181]: info: operation notify[11] on Clu-FS-DRBD:0 for client 6184: pid 6561 exited with return code 0
Jun  5 17:08:22 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:notify:stdout) 
Jun  5 17:08:22 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server2-master-Clu-FS-DRBD.0, name=master-Clu-FS-DRBD:0, value=10000, magic=NA, cib=0.68.8) : Transient attribute: update
Jun  5 17:08:22 server2 crmd: [6184]: info: update_abort_priority: Abort priority upgraded from 0 to 1000000
Jun  5 17:08:22 server2 crmd: [6184]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 17:08:22 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 60:3:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968302-51
Jun  5 17:08:22 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968302-51 from server2
Jun  5 17:08:22 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (60) confirmed on server2 (rc=0)
Jun  5 17:08:22 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=11, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:22 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968302-7 from server1
Jun  5 17:08:22 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (62) confirmed on server1 (rc=0)
Jun  5 17:08:22 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 32 fired and confirmed
Jun  5 17:08:22 server2 crmd: [6184]: WARN: status_from_rc: Action 45 (xenwin7_start_0) on server1 failed (target: 0 vs. rc: 1): Error
Jun  5 17:08:22 server2 crmd: [6184]: WARN: update_failcount: Updating failcount for xenwin7 on server1 after failed start: rc=1 (update=INFINITY, time=1401968302)
Jun  5 17:08:22 server2 crmd: [6184]: info: abort_transition_graph: match_graph_event:277 - Triggered transition abort (complete=0, tag=lrm_rsc_op, id=xenwin7_last_failure_0, magic=0:1;45:3:0:e61e91a0-cc68-4093-9c39-b3be0421856b, cib=0.68.9) : Event failed
Jun  5 17:08:22 server2 crmd: [6184]: info: match_graph_event: Action xenwin7_start_0 (45) confirmed on server1 (rc=4)
Jun  5 17:08:22 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:22 server2 crmd: [6184]: notice: run_graph: Transition 3 (Complete=14, Pending=0, Fired=0, Skipped=5, Incomplete=1, Source=/var/lib/pengine/pe-input-51.bz2): Stopped
Jun  5 17:08:22 server2 crmd: [6184]: info: te_graph_trigger: Transition 3 is now complete
Jun  5 17:08:22 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:22 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:22 server2 crmd: [6184]: info: do_pe_invoke: Query 66: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:22 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=66, ref=pe_calc-dc-1401968302-52, seq=932, quorate=1
Jun  5 17:08:22 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:22 server2 pengine: [6183]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server1: unknown error (1)
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:0 on server2
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:0 on server2
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:0 on server1
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:1 on server2
Jun  5 17:08:22 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for xenwin7 on server1
Jun  5 17:08:22 server2 pengine: [6183]: notice: LogActions: Promote Clu-FS-DRBD:0#011(Slave -> Master server2)
Jun  5 17:08:22 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server1)
Jun  5 17:08:22 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:0#011(server1)
Jun  5 17:08:22 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:1#011(server2)
Jun  5 17:08:22 server2 pengine: [6183]: notice: LogActions: Recover xenwin7#011(Started server1)
Jun  5 17:08:22 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:22 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 4: 24 actions in 24 synapses
Jun  5 17:08:22 server2 crmd: [6184]: info: do_te_invoke: Processing graph 4 (ref=pe_calc-dc-1401968302-52) derived from /var/lib/pengine/pe-input-52.bz2
Jun  5 17:08:22 server2 crmd: [6184]: info: te_rsc_command: Initiating action 1: cancel Clu-FS-DRBD:0_monitor_41000 on server2 (local)
Jun  5 17:08:22 server2 lrmd: [6181]: info: cancel_op: operation monitor[7] on Clu-FS-DRBD:0 for client 6184, its parameters: drbd_resource=[r0] CRM_meta_role=[Slave] CRM_meta_notify_stop_resource=[ ] CRM_meta_notify_start_resource=[Clu-FS-DRBD:0 ] CRM_meta_notify_inactive_resource=[Clu-FS-DRBD:0 Clu-FS-DRBD:1 ] CRM_meta_notify_master_uname=[ ] CRM_meta_timeout=[51000] CRM_meta_name=[monitor] CRM_meta_notify_demote_resource=[ ] CRM_meta_notify_promote_uname=[ ] crm_feature_set=[3.0.5] CRM_meta_notify=[true] CRM_meta_notify_start_uname=[server2 ] CRM_meta_clone cancelled
Jun  5 17:08:22 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_monitor_41000 from 1:4:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968302-54
Jun  5 17:08:22 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968302-54 from server2
Jun  5 17:08:22 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_monitor_41000 (1) confirmed on server2 (rc=0)
Jun  5 17:08:22 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 31 fired and confirmed
Jun  5 17:08:22 server2 crmd: [6184]: info: te_rsc_command: Initiating action 2: stop xenwin7_stop_0 on server1
Jun  5 17:08:22 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server1-fail-count-xenwin7, name=fail-count-xenwin7, value=INFINITY, magic=NA, cib=0.68.10) : Transient attribute: update
Jun  5 17:08:22 server2 crmd: [6184]: info: update_abort_priority: Abort priority upgraded from 0 to 1000000
Jun  5 17:08:22 server2 crmd: [6184]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 17:08:22 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server1-last-failure-xenwin7, name=last-failure-xenwin7, value=1401968302, magic=NA, cib=0.68.11) : Transient attribute: update
Jun  5 17:08:22 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_monitor_41000 (call=7, status=1, cib-update=0, confirmed=true) Cancelled
Jun  5 17:08:22 server2 pengine: [6183]: notice: process_pe_message: Transition 4: PEngine Input stored in: /var/lib/pengine/pe-input-52.bz2
Jun  5 17:08:23 server2 crmd: [6184]: info: match_graph_event: Action xenwin7_stop_0 (2) confirmed on server1 (rc=0)
Jun  5 17:08:23 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:23 server2 crmd: [6184]: notice: run_graph: Transition 4 (Complete=3, Pending=0, Fired=0, Skipped=15, Incomplete=6, Source=/var/lib/pengine/pe-input-52.bz2): Stopped
Jun  5 17:08:23 server2 crmd: [6184]: info: te_graph_trigger: Transition 4 is now complete
Jun  5 17:08:23 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:23 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:23 server2 crmd: [6184]: info: do_pe_invoke: Query 68: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:23 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=68, ref=pe_calc-dc-1401968303-56, seq=932, quorate=1
Jun  5 17:08:23 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:23 server2 pengine: [6183]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server1: unknown error (1)
Jun  5 17:08:23 server2 pengine: [6183]: WARN: common_apply_stickiness: Forcing xenwin7 away from server1 after 1000000 failures (max=1000000)
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:0 on server2
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:0 on server2
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for Clu-FS-DRBD:1 on server1
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:0 on server1
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:1 on server2
Jun  5 17:08:23 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (40s) for xenwin7 on server2
Jun  5 17:08:23 server2 pengine: [6183]: notice: LogActions: Promote Clu-FS-DRBD:0#011(Slave -> Master server2)
Jun  5 17:08:23 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server1)
Jun  5 17:08:23 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:0#011(server1)
Jun  5 17:08:23 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:1#011(server2)
Jun  5 17:08:23 server2 pengine: [6183]: notice: LogActions: Start   xenwin7#011(server2)
Jun  5 17:08:23 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:23 server2 crmd: [6184]: WARN: destroy_action: Cancelling timer for action 1 (src=83)
Jun  5 17:08:23 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 5: 21 actions in 21 synapses
Jun  5 17:08:23 server2 crmd: [6184]: info: do_te_invoke: Processing graph 5 (ref=pe_calc-dc-1401968303-56) derived from /var/lib/pengine/pe-input-53.bz2
Jun  5 17:08:23 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 29 fired and confirmed
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 47: start xenwin7_start_0 on server2 (local)
Jun  5 17:08:23 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=47:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=xenwin7_start_0 )
Jun  5 17:08:23 server2 lrmd: [6181]: info: rsc:xenwin7 start[12] (pid 6589)
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 59: notify Clu-FS-DRBD:0_pre_notify_promote_0 on server2 (local)
Jun  5 17:08:23 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=59:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:23 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[13] (pid 6590)
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 61: notify Clu-FS-DRBD:1_pre_notify_promote_0 on server1
Jun  5 17:08:23 server2 pengine: [6183]: notice: process_pe_message: Transition 5: PEngine Input stored in: /var/lib/pengine/pe-input-53.bz2
Jun  5 17:08:23 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968303-8 from server1
Jun  5 17:08:23 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (61) confirmed on server1 (rc=0)
Jun  5 17:08:23 server2 lrmd: [6181]: info: operation notify[13] on Clu-FS-DRBD:0 for client 6184: pid 6590 exited with return code 0
Jun  5 17:08:23 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 59:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968303-60
Jun  5 17:08:23 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968303-60 from server2
Jun  5 17:08:23 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (59) confirmed on server2 (rc=0)
Jun  5 17:08:23 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=13, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:23 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 30 fired and confirmed
Jun  5 17:08:23 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 27 fired and confirmed
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 8: promote Clu-FS-DRBD:0_promote_0 on server2 (local)
Jun  5 17:08:23 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=8:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_promote_0 )
Jun  5 17:08:23 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 promote[14] (pid 6653)
Jun  5 17:08:23 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:promote:stdout)         allow-two-primaries;
Jun  5 17:08:23 server2 kernel: [ 1456.480060] block drbd0: role( Secondary -> Primary ) 
Jun  5 17:08:23 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:promote:stdout) 
Jun  5 17:08:23 server2 lrmd: [6181]: info: operation promote[14] on Clu-FS-DRBD:0 for client 6184: pid 6653 exited with return code 0
Jun  5 17:08:23 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_promote_0 (call=14, rc=0, cib-update=69, confirmed=true) ok
Jun  5 17:08:23 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_promote_0 (8) confirmed on server2 (rc=0)
Jun  5 17:08:23 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 28 fired and confirmed
Jun  5 17:08:23 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 31 fired and confirmed
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 60: notify Clu-FS-DRBD:0_post_notify_promote_0 on server2 (local)
Jun  5 17:08:23 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=60:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_notify_0 )
Jun  5 17:08:23 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 notify[15] (pid 6684)
Jun  5 17:08:23 server2 crmd: [6184]: info: te_rsc_command: Initiating action 62: notify Clu-FS-DRBD:1_post_notify_promote_0 on server1
Jun  5 17:08:24 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968304-9 from server1
Jun  5 17:08:24 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_notify_0 (62) confirmed on server1 (rc=0)
Jun  5 17:08:24 server2 lrmd: [6181]: info: RA output: (Clu-FS-DRBD:0:notify:stdout) 
Jun  5 17:08:24 server2 lrmd: [6181]: info: operation notify[15] on Clu-FS-DRBD:0 for client 6184: pid 6684 exited with return code 0
Jun  5 17:08:24 server2 crmd: [6184]: info: send_direct_ack: ACK'ing resource op Clu-FS-DRBD:0_notify_0 from 60:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b: lrm_invoke-lrmd-1401968304-64
Jun  5 17:08:24 server2 crmd: [6184]: info: process_te_message: Processing (N)ACK lrm_invoke-lrmd-1401968304-64 from server2
Jun  5 17:08:24 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_notify_0 (60) confirmed on server2 (rc=0)
Jun  5 17:08:24 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_notify_0 (call=15, rc=0, cib-update=0, confirmed=true) ok
Jun  5 17:08:24 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 32 fired and confirmed
Jun  5 17:08:24 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 43 fired and confirmed
Jun  5 17:08:24 server2 crmd: [6184]: info: te_rsc_command: Initiating action 9: monitor Clu-FS-DRBD:0_monitor_40000 on server2 (local)
Jun  5 17:08:24 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=9:5:8:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-DRBD:0_monitor_40000 )
Jun  5 17:08:24 server2 lrmd: [6181]: info: rsc:Clu-FS-DRBD:0 monitor[16] (pid 6733)
Jun  5 17:08:24 server2 crmd: [6184]: info: te_rsc_command: Initiating action 14: monitor Clu-FS-DRBD:1_monitor_40000 on server1
Jun  5 17:08:24 server2 crmd: [6184]: info: te_rsc_command: Initiating action 39: start Clu-FS-Mount:0_start_0 on server1
Jun  5 17:08:24 server2 lrmd: [6181]: info: operation monitor[16] on Clu-FS-DRBD:0 for client 6184: pid 6733 exited with return code 8
Jun  5 17:08:24 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-DRBD:0_monitor_40000 (call=16, rc=8, cib-update=70, confirmed=false) master
Jun  5 17:08:24 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:0_monitor_40000 (9) confirmed on server2 (rc=0)
Jun  5 17:08:24 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-DRBD:1_monitor_40000 (14) confirmed on server1 (rc=0)
Jun  5 17:08:24 server2 lrmd: [6181]: info: RA output: (xenwin7:start:stderr) Error: Disk image does not exist: /cluster/xenwin7.img
Jun  5 17:08:24 server2 lrmd: [6181]: info: RA output: (xenwin7:start:stdout) Using config file "/home/cluster/xen/win7.cfg".
Jun  5 17:08:24 server2 lrmd: [6181]: info: operation start[12] on xenwin7 for client 6184: pid 6589 exited with return code 1
Jun  5 17:08:24 server2 crmd: [6184]: info: process_lrm_event: LRM operation xenwin7_start_0 (call=12, rc=1, cib-update=71, confirmed=true) unknown error
Jun  5 17:08:24 server2 crmd: [6184]: WARN: status_from_rc: Action 47 (xenwin7_start_0) on server2 failed (target: 0 vs. rc: 1): Error
Jun  5 17:08:24 server2 crmd: [6184]: WARN: update_failcount: Updating failcount for xenwin7 on server2 after failed start: rc=1 (update=INFINITY, time=1401968304)
Jun  5 17:08:24 server2 crmd: [6184]: info: abort_transition_graph: match_graph_event:277 - Triggered transition abort (complete=0, tag=lrm_rsc_op, id=xenwin7_last_failure_0, magic=0:1;47:5:0:e61e91a0-cc68-4093-9c39-b3be0421856b, cib=0.68.17) : Event failed
Jun  5 17:08:24 server2 crmd: [6184]: info: update_abort_priority: Abort priority upgraded from 0 to 1
Jun  5 17:08:24 server2 crmd: [6184]: info: update_abort_priority: Abort action done superceeded by restart
Jun  5 17:08:24 server2 crmd: [6184]: info: match_graph_event: Action xenwin7_start_0 (47) confirmed on server2 (rc=4)
Jun  5 17:08:24 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: fail-count-xenwin7 (INFINITY)
Jun  5 17:08:24 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 28: fail-count-xenwin7=INFINITY
Jun  5 17:08:24 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server2-fail-count-xenwin7, name=fail-count-xenwin7, value=INFINITY, magic=NA, cib=0.68.18) : Transient attribute: update
Jun  5 17:08:24 server2 crmd: [6184]: info: update_abort_priority: Abort priority upgraded from 1 to 1000000
Jun  5 17:08:24 server2 crmd: [6184]: info: update_abort_priority: 'Event failed' abort superceeded
Jun  5 17:08:24 server2 attrd: [6182]: notice: attrd_trigger_update: Sending flush op to all hosts for: last-failure-xenwin7 (1401968304)
Jun  5 17:08:24 server2 attrd: [6182]: notice: attrd_perform_update: Sent update 31: last-failure-xenwin7=1401968304
Jun  5 17:08:24 server2 crmd: [6184]: info: abort_transition_graph: te_update_diff:164 - Triggered transition abort (complete=0, tag=nvpair, id=status-server2-last-failure-xenwin7, name=last-failure-xenwin7, value=1401968304, magic=NA, cib=0.68.20) : Transient attribute: update
Jun  5 17:08:30 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-Mount:0_start_0 (39) confirmed on server1 (rc=0)
Jun  5 17:08:30 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:30 server2 crmd: [6184]: notice: run_graph: Transition 5 (Complete=16, Pending=0, Fired=0, Skipped=4, Incomplete=1, Source=/var/lib/pengine/pe-input-53.bz2): Stopped
Jun  5 17:08:30 server2 crmd: [6184]: info: te_graph_trigger: Transition 5 is now complete
Jun  5 17:08:30 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_FSA_INTERNAL origin=notify_crmd ]
Jun  5 17:08:30 server2 crmd: [6184]: info: do_state_transition: All 2 cluster nodes are eligible to run resources.
Jun  5 17:08:30 server2 crmd: [6184]: info: do_pe_invoke: Query 72: Requesting the current CIB: S_POLICY_ENGINE
Jun  5 17:08:30 server2 crmd: [6184]: info: do_pe_invoke_callback: Invoking the PE: query=72, ref=pe_calc-dc-1401968310-68, seq=932, quorate=1
Jun  5 17:08:30 server2 pengine: [6183]: notice: unpack_config: On loss of CCM Quorum: Ignore
Jun  5 17:08:30 server2 pengine: [6183]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server2: unknown error (1)
Jun  5 17:08:30 server2 pengine: [6183]: WARN: unpack_rsc_op: Processing failed op xenwin7_last_failure_0 on server1: unknown error (1)
Jun  5 17:08:30 server2 pengine: [6183]: WARN: common_apply_stickiness: Forcing xenwin7 away from server2 after 1000000 failures (max=1000000)
Jun  5 17:08:30 server2 pengine: [6183]: WARN: common_apply_stickiness: Forcing xenwin7 away from server1 after 1000000 failures (max=1000000)
Jun  5 17:08:30 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:0 on server1
Jun  5 17:08:30 server2 pengine: [6183]: notice: RecurringOp:  Start recurring monitor (120s) for Clu-FS-Mount:1 on server2
Jun  5 17:08:30 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:0#011(Master server2)
Jun  5 17:08:30 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-DRBD:1#011(Master server1)
Jun  5 17:08:30 server2 pengine: [6183]: notice: LogActions: Leave   Clu-FS-Mount:0#011(Started server1)
Jun  5 17:08:30 server2 pengine: [6183]: notice: LogActions: Start   Clu-FS-Mount:1#011(server2)
Jun  5 17:08:30 server2 pengine: [6183]: notice: LogActions: Stop    xenwin7#011(server2)
Jun  5 17:08:30 server2 crmd: [6184]: info: do_state_transition: State transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE origin=handle_response ]
Jun  5 17:08:30 server2 crmd: [6184]: info: unpack_graph: Unpacked transition 6: 7 actions in 7 synapses
Jun  5 17:08:30 server2 crmd: [6184]: info: do_te_invoke: Processing graph 6 (ref=pe_calc-dc-1401968310-68) derived from /var/lib/pengine/pe-input-54.bz2
Jun  5 17:08:30 server2 crmd: [6184]: info: te_rsc_command: Initiating action 42: monitor Clu-FS-Mount:0_monitor_120000 on server1
Jun  5 17:08:30 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 45 fired and confirmed
Jun  5 17:08:30 server2 crmd: [6184]: info: te_rsc_command: Initiating action 2: stop xenwin7_stop_0 on server2 (local)
Jun  5 17:08:30 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=2:6:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=xenwin7_stop_0 )
Jun  5 17:08:30 server2 lrmd: [6181]: info: rsc:xenwin7 stop[17] (pid 6781)
Jun  5 17:08:30 server2 crmd: [6184]: info: te_rsc_command: Initiating action 43: start Clu-FS-Mount:1_start_0 on server2 (local)
Jun  5 17:08:30 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=43:6:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-Mount:1_start_0 )
Jun  5 17:08:30 server2 lrmd: [6181]: info: rsc:Clu-FS-Mount:1 start[18] (pid 6782)
Jun  5 17:08:30 server2 pengine: [6183]: notice: process_pe_message: Transition 6: PEngine Input stored in: /var/lib/pengine/pe-input-54.bz2
Jun  5 17:08:30 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-Mount:0_monitor_120000 (42) confirmed on server1 (rc=0)
Jun  5 17:08:30 server2 Filesystem[6782]: INFO: Running start for /dev/drbd/by-res/r0 on /cluster
Jun  5 17:08:30 server2 lrmd: [6181]: info: RA output: (Clu-FS-Mount:1:start:stderr) FATAL: Module scsi_hostadapter not found.
Jun  5 17:08:31 server2 Xen[6781]: INFO: Xen domain xenwin7 already stopped.
Jun  5 17:08:31 server2 lrmd: [6181]: info: RA output: (xenwin7:stop:stderr) Error: Domain 'xenwin7' does not exist.
Jun  5 17:08:31 server2 lrmd: [6181]: info: operation stop[17] on xenwin7 for client 6184: pid 6781 exited with return code 0
Jun  5 17:08:31 server2 crmd: [6184]: info: process_lrm_event: LRM operation xenwin7_stop_0 (call=17, rc=0, cib-update=73, confirmed=true) ok
Jun  5 17:08:31 server2 crmd: [6184]: info: match_graph_event: Action xenwin7_stop_0 (2) confirmed on server2 (rc=0)
Jun  5 17:08:31 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 4 fired and confirmed
Jun  5 17:08:34 server2 kernel: [ 1467.060415] o2net: Connected to node server1 (num 0) at 10.0.0.1:7777
Jun  5 17:08:38 server2 kernel: [ 1471.078239] o2dlm: Joining domain F18CB82626444DD0913312B7AE741C5B ( 0 1 ) 2 nodes
Jun  5 17:08:38 server2 kernel: [ 1471.116107] ocfs2: Mounting device (147,0) on (node 1, slot 1) with ordered data mode.
Jun  5 17:08:38 server2 lrmd: [6181]: info: operation start[18] on Clu-FS-Mount:1 for client 6184: pid 6782 exited with return code 0
Jun  5 17:08:38 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-Mount:1_start_0 (call=18, rc=0, cib-update=74, confirmed=true) ok
Jun  5 17:08:38 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-Mount:1_start_0 (43) confirmed on server2 (rc=0)
Jun  5 17:08:38 server2 crmd: [6184]: info: te_rsc_command: Initiating action 44: monitor Clu-FS-Mount:1_monitor_120000 on server2 (local)
Jun  5 17:08:38 server2 crmd: [6184]: info: do_lrm_rsc_op: Performing key=44:6:0:e61e91a0-cc68-4093-9c39-b3be0421856b op=Clu-FS-Mount:1_monitor_120000 )
Jun  5 17:08:38 server2 lrmd: [6181]: info: rsc:Clu-FS-Mount:1 monitor[19] (pid 6960)
Jun  5 17:08:38 server2 crmd: [6184]: info: te_pseudo_action: Pseudo action 46 fired and confirmed
Jun  5 17:08:38 server2 lrmd: [6181]: info: operation monitor[19] on Clu-FS-Mount:1 for client 6184: pid 6960 exited with return code 0
Jun  5 17:08:38 server2 crmd: [6184]: info: process_lrm_event: LRM operation Clu-FS-Mount:1_monitor_120000 (call=19, rc=0, cib-update=75, confirmed=false) ok
Jun  5 17:08:38 server2 crmd: [6184]: info: match_graph_event: Action Clu-FS-Mount:1_monitor_120000 (44) confirmed on server2 (rc=0)
Jun  5 17:08:38 server2 crmd: [6184]: info: run_graph: ====================================================
Jun  5 17:08:38 server2 crmd: [6184]: notice: run_graph: Transition 6 (Complete=7, Pending=0, Fired=0, Skipped=0, Incomplete=0, Source=/var/lib/pengine/pe-input-54.bz2): Complete
Jun  5 17:08:38 server2 crmd: [6184]: info: te_graph_trigger: Transition 6 is now complete
Jun  5 17:08:38 server2 crmd: [6184]: info: notify_crmd: Transition 6 status: done - <null>
Jun  5 17:08:38 server2 crmd: [6184]: info: do_state_transition: State transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_FSA_INTERNAL origin=notify_crmd ]
-------------- next part --------------
kernel = "/usr/lib/xen/boot/hvmloader"
builder='hvm'
memory = '2048'
# Should be at least 2KB per MB of domain memory, plus a few MB per vcpu.
#shadow_memory = 8
name = "xenwin7"
vif = [ 'bridge=xenbr1' ]
on_poweroff = 'destroy'
on_reboot = 'restart'
on_crash = 'restart'
acpi = 1
apic = 1
disk = [ 'file:/cluster/xenwin7.img,hda,w', 'file:/home/cluster/vm/windows764bitnew.iso,hdc:cdrom,r' ]
#disk = [ 'phy:/dev/drbd/by-res/r0,ioemu:hda,w', 'file:/home/cluster/vm/windows764bitnew.iso,hdc:cdrom,r' ]
device_model = '/usr/lib/xen/bin/qemu-dm'
#device_model = '/usr/' + arch_libdir + '/xen/bin/qemu-dm'
#-----------------------------------------------------------------------------
# boot on floppy (a), hard disk (c) or CD-ROM (d)
# default: hard disk, cd-rom, floppy
boot="c"
sdl=0
vnc=1
#For user defined port for vncviewer(5900+10=5910)
vncdisplay=10
vncunused=0
vncconsole=1
vncpasswd=''
serial='pty'
usbdevice='tablet'


More information about the Pacemaker mailing list