[Pacemaker] How to tell pacemaker to start exportfs after filesystem resource
Александр Малаев
amalaev at alt-lan.ru
Mon Jun 20 19:40:04 UTC 2011
Hello,
I have configured pacemaker+ocfs2 cluster with shared storage connected by
FC.
Now I need to setup NFS export in Active/Active mode and I added all needed
resources and wrote the order of starting.
But then node is starting after reboot I got race condition between
Filesystem resource and exportfs.
Exportfs couldn't start because ocfs2 mountpoint isn't mounted yet.
How to tell ExportFS resource to start then filesystem resource will be
ready?
crm config is the following:
node msk-nfs-gw01
node msk-nfs-gw02
primitive nfs-kernel-server lsb:nfs-kernel-server \
op monitor interval="10s" timeout="30s"
primitive ping ocf:pacemaker:ping \
params host_list="10.236.22.35" multiplier="100" name="ping" \
op monitor interval="20s" timeout="60s" \
op start interval="0" timeout="60s"
primitive portmap upstart:portmap \
op monitor interval="10s" timeout="30s"
primitive res-dlm ocf:pacemaker:controld \
op monitor interval="120s"
primitive res-fs ocf:heartbeat:Filesystem \
params device="/dev/mapper/mpath0" directory="/media/media0"
fstype="ocfs2" \
op monitor interval="120s"
primitive res-nfs1-ip ocf:heartbeat:IPaddr2 \
params ip="10.236.22.38" cidr_netmask="27" nic="bond0" \
op monitor interval="30s"
primitive res-nfs2-ip ocf:heartbeat:IPaddr2 \
params ip="10.236.22.39" cidr_netmask="27" nic="bond0" \
op monitor interval="30s"
primitive res-o2cb ocf:pacemaker:o2cb \
op monitor interval="120s"
primitive res-share ocf:heartbeat:exportfs \
params directory="/media/media0/nfsroot/export1" clientspec="
10.236.22.0/24" options="rw,async,no_subtree_check,no_root_squash" fsid="1"
\
op monitor interval="10s" timeout="30s" \
op start interval="10" timeout="40s" \
op stop interval="0" timeout="40s"
primitive st-null stonith:null \
params hostlist="msk-nfs-gw01 msk-nfs-gw02"
group nfs portmap nfs-kernel-server
clone clone-dlm res-dlm \
meta globally-unique="false" interleave="true"
clone clone-fs res-fs \
meta globally-unique="false" interleave="true"
clone clone-nfs nfs \
meta globally-unique="false" interleace="true"
clone clone-o2cb res-o2cb \
meta globally-unique="false" interleave="true"
clone clone-share res-share \
meta globally-unique="false" interleave="true"
clone fencing st-null
clone ping_clone ping \
meta globally-unique="false"
location nfs1-ip-on-nfs1 res-nfs1-ip 50: msk-nfs-gw01
location nfs2-ip-on-nfs2 res-nfs2-ip 50: msk-nfs-gw02
colocation col-fs-o2cb inf: clone-fs clone-o2cb
colocation col-nfs-fs inf: clone-nfs clone-fs
colocation col-o2cb-dlm inf: clone-o2cb clone-dlm
colocation col-share-nfs inf: clone-share clone-nfs
order ord-dlm-o2cb 0: clone-dlm clone-o2cb
order ord-nfs-share 0: clone-nfs clone-share
order ord-o2cb-fs 0: clone-o2cb clone-fs
order ord-o2cb-nfs 0: clone-fs clone-nfs
order ord-share-nfs1 0: clone-share res-nfs1-ip
order ord-share-nfs2 0: clone-share res-nfs2-ip
property $id="cib-bootstrap-options" \
dc-version="1.0.9-da7075976b5ff0bee71074385f8fd02f296ec8a3" \
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="true" \
no-quorum-policy="ignore" \
last-lrm-refresh="1308040111"
--
Best Regards
Alexander Malaev
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/pacemaker/attachments/20110620/35770911/attachment-0003.html>
More information about the Pacemaker
mailing list