[Pacemaker] Best Practice Flush Pacemaker Config / Troubvleshooting unknown error
Koch, Sebastian
Sebastian.Koch at netzwerk.de
Fri Jun 11 10:59:43 UTC 2010
Hi,
currently i am trying to deploy my already running 2 node active/passive
LAMP Cluster to physical machines. I got several problems while
importing the config and therefore i often need to fully flush the
pacemaker config and top aste my edited version. Pacemaker always
complains that the resource is running, which is a safe and correct
behaviour. Would like to know if there is a fast way to flush config and
import new one without stopping all resource and so on.
And my second question is how i can troubleshoot unknown erros. When i
try to start my drbd resource it just says unknown error. I am able to
mount it by hand ist a XFS Filesystem with underlying LVM.
Normally i troubleshoot like this:
1. Crm_mon
2. Tail -fn 100 /var/log/syslog
3. Crm_verify -LV
Does somebody have a advise how to troubleshoot this tricky errors?
Crm configure show
node pilot01-node1 \
attributes standby="off"
node pilot01-node2 \
attributes standby="off"
primitive drbd_pilot0 ocf:linbit:drbd \
params drbd_resource="pilot0" \
operations $id="drbd_pilot0-operations" \
op monitor interval="60"
primitive pinggw ocf:pacemaker:pingd \
params host_list="10.1.1.162" multiplier="200" \
op monitor interval="10"
primitive res_Apache lsb:apache2 \
operations $id="res_Apache-operations" \
op monitor interval="15" timeout="20" start-delay="15" \
meta target-role="Started" is-managed="true"
primitive res_ClusterIP ocf:heartbeat:IPaddr2 \
params iflabel="ClusterIP" ip="10.1.1.12" \
operations $id="res_ClusterIP_1-operations" \
op monitor interval="10" start-delay="0"
primitive res_Filesystem ocf:heartbeat:Filesystem \
params fstype="xfs" directory="/mnt/cluster" device="/dev/drbd0"
\
operations $id="res_Filesystem-operations" \
op monitor interval="20" start-delay="0"
primitive res_MySQL lsb:mysql \
operations $id="res_MySQL-operations" \
op monitor interval="15" timeout="60" start-delay="15"
group grp_MySQL res_Filesystem res_ClusterIP res_MySQL res_Apache \
meta target-role="Started"
ms ms_drbd_mysql0 drbd_pilot0 \
meta master-max="1" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true"
clone cl-pinggw pinggw \
meta globally-unique="false"
location cli-prefer-grp_MySQL grp_MySQL \
rule $id="cli-prefer-rule-grp_MySQL" inf: #uname eq
pilot01-node2
location drbd-fence-by-handler-ms_drbd_mysql0 ms_drbd_mysql0 \
rule $id="drbd-fence-by-handler-rule-ms_drbd_mysql0"
$role="Master" -inf: #uname ne pilot01-node1
location grp_MySQL-with-pinggw grp_MySQL \
rule $id="grp_MySQL-with-pinggw-rule-1" -inf: not_defined pingd
or pingd lte 0
colocation col_drbd_on_mysql inf: grp_MySQL ms_drbd_mysql0:Master
order mysql_after_drbd inf: ms_drbd_mysql0:promote grp_MySQL:start
property $id="cib-bootstrap-options" \
expected-quorum-votes="3" \
stonith-enabled="false" \
no-quorum-policy="ignore" \
dc-version="1.0.8-2c98138c2f070fcb6ddeab1084154cffbf44ba75" \
cluster-infrastructure="openais" \
last-lrm-refresh="1274352045" \
symmetric-cluster="true" \
default-action-timeout="120s"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
cat /proc/drbd
cat /proc/drbd
version: 8.3.7 (api:88/proto:86-91)
GIT-hash: ea9e28dbff98e331a62bcbcc63a6135808fe2917 build by
root at prolog01-pilot1, 2010-06-07 17:34:47
0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r----
ns:0 nr:0 dw:0 dr:72 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:0
cat /etc/drbd.d/pilot.res
resource pilot0 {
syncer {
rate 125M;
}
device /dev/drbd0;
disk /dev/cluster/block0;
meta-disk internal;
on pilot01-node1 {
address 192.168.100.1:7789;
}
on pilot01-node2 {
address 192.168.100.2:7789;
}
disk {
fencing resource-only;
}
handlers {
fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
}
}
Thanks in advance.
Best Regards
Sebastian Koch
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/pacemaker/attachments/20100611/028bc861/attachment-0001.html>
More information about the Pacemaker
mailing list