[Pacemaker] master/slave problems
Sam Pinar
sampinar at gmail.com
Wed Mar 16 01:27:22 UTC 2011
Hi,
I've setup a two node cluster for testing using the "Clusters from Scratch -
Apache, DRBD and GFS2" guide. I've set it up and fail over works like a
charm, but I want one of the nodes to be a master; and fail back resources
to the master when it comes back up. At the moment, the resource stays at
the failed back node. configs:
node ULTPLN30.DMZ
node ULTPLN31.DMZ \
attributes standby="off"
primitive ClusterIP ocf:heartbeat:IPaddr2 \
params ip="10.110.4.123" cidr_netmask="32" \
op monitor interval="30s"
property $id="cib-bootstrap-options" \
dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f"
\
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
xml:
<?xml version="1.0" ?>
<cib admin_epoch="0" cib-last-written="Wed Mar 16 12:00:35 2011"
crm_feature_set="3.0.5" dc-uuid="ULTPLN30.DMZ" epoch="42" have-quorum="0"
num_updates="84" validate-with="pacemaker-1.2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure"
name="cluster-infrastructure" value="openais"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes"
name="expected-quorum-votes" value="2"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="ULTPLN30.DMZ" type="normal" uname="ULTPLN30.DMZ"/>
<node id="ULTPLN31.DMZ" type="normal" uname="ULTPLN31.DMZ">
<instance_attributes id="nodes-ULTPLN31.DMZ">
<nvpair id="nodes-ULTPLN31.DMZ-standby" name="standby"
value="off"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive class="ocf" id="ClusterIP" provider="heartbeat"
type="IPaddr2">
<instance_attributes id="ClusterIP-instance_attributes">
<nvpair id="ClusterIP-instance_attributes-ip" name="ip"
value="10.110.4.123"/>
<nvpair id="ClusterIP-instance_attributes-cidr_netmask"
name="cidr_netmask" value="32"/>
</instance_attributes>
<operations>
<op id="ClusterIP-monitor-30s" interval="30s" name="monitor"/>
</operations>
</primitive>
</resources>
<constraints/>
</configuration>
</cib>
corosync.conf:
# Please read the corosync.conf.5 manual page
compatibility: whitetank
totem {
version: 2
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 10.110.4.120
mcastaddr: 226.94.1.1
mcastport: 4000
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
aisexec{
user: root
group: root
}
service{
# Load the Pacemaker Cluster Resource Manager
name: pacemaker
ver: 0
}
Thanks in advance.
--
Cheers,
Sam Pinar
m 0401 245 390
e sampinar at gmail.com
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/pacemaker/attachments/20110316/396e4916/attachment-0003.html>
More information about the Pacemaker
mailing list