[Pacemaker] CMAN and Pacemaker

Fil lists at internyc.net
Sat Dec 24 03:13:38 EST 2011


Hi everyone,

Happy holidays!

I need some help with adding CMAN to my current cluster config.
Currently I have a two node Corosync/Pacemaker (Active/Passive) cluster.
It works as expected. Now I need to add a distributed filesystem to my
setup. I would like to test GFS2. As much as I understand I need to
setup CMAN to manage dlm/gfs_controld, am I correct? I have followed the
Clusters_from_Scratch document but I am having issues starting
pacemakerd once the cman is up and running. Is it possible to use
dlm/gfs_controld without cman, directly from pacemaker? How do I strat
pacemaker when CMAN is running, and do I even need to, and if not how do
I manage my resources? Currently I am using:

Fedora 16
corosync-1.4.2-1.fc16.x86_64
pacemaker-1.1.6-4.fc16.x86_64
cman-3.1.7-1.fc16.x86_64

Thanks
filip

cluster.conf
-------------
<?xml version="1.0"?>
<cluster config_version="3" name="adriatic">
  <logging debug="on"/>
  <clusternodes>
    <clusternode name="server01" nodeid="1">
      <altname name="server01.stor" port="5405" mcast="226.94.1.1"/>
      <fence>
        <method name="pcmk-redirect">
          <device name="pcmk" port="server01"/>
        </method>
      </fence>
    </clusternode>
    <clusternode name="server02" nodeid="2">
      <altname name="server02.stor" port="5405" mcast="226.94.1.1"/>
      <fence>
        <method name="pcmk-redirect">
          <device name="pcmk" port="server02"/>
        </method>
      </fence>
    </clusternode>
  </clusternodes>
  <fencedevices>
    <fencedevice name="pcmk" agent="fence_pcmk"/>
  </fencedevices>
  <cman two_node="1" expected_votes="1">
    <multicast addr="226.94.1.2"/>
  </cman>
</cluster>


corosync.conf
--------------
compatibility: whitetank

totem {
        version: 2
        secauth: off
        threads: 0
        rrp_mode: passive

        interface {
                ringnumber: 0
                bindnetaddr: 192.168.10.0
                mcastaddr: 226.94.1.1
                mcastport: 5405
        }
}

logging {
        fileline: off
        to_stderr: no
        to_logfile: yes
        to_syslog: yes
        logfile: /var/log/cluster/corosync.log
        debug: off
        timestamp: on
}

amf {
        mode: disabled
}

pacemaker conf
--------------
node server01 \
	attributes standby="off"
node server02 \
	attributes standby="off"
primitive scsi_reservation ocf:adriatic:sg_persist \
	params sg_persist_resource="scsi_reservation0"
devs="/dev/disk/by-path/ip-192.168.10.5:3260-iscsi-iqn.2004-04.com.qnap:ts-459proii:iscsi.test.cb4d16-lun-0"
required_devs_nof="1" reservation_type="1" \
	op start interval="0" timeout="30s" \
	op stop interval="0" timeout="30s"
primitive vm_test ocf:adriatic:VirtualDomain \
	params config="/etc/libvirt/qemu/test.xml" hypervisor="qemu:///system"
migration_transport="tcp" \
	meta allow-migrate="true" is-managed="true" target-role="Stopped" \
	op start interval="0" timeout="120s" \
	op stop interval="0" timeout="120s" \
	op migrate_from interval="0" timeout="120s" \
	op migrate_to interval="0" timeout="120s" \
	op monitor interval="10" timeout="30" depth="0" \
	utilization cpu="1" hv_memory="1024"
ms ms_scsi_reservation scsi_reservation \
	meta master-max="1" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true" migration-threshold="1"
allow-migrate="true" globally-unique="false" target-role="Stopped"
location cli-prefer-vm_test vm_test \
	rule $id="cli-prefer-rule-vm_test" inf: #uname eq server02
colocation service_on_scsi_reservation inf: vm_test
ms_scsi_reservation:Master
order service_after_scsi_reservation inf: ms_scsi_reservation:promote
vm_test:start
property $id="cib-bootstrap-options" \
	dc-version="1.1.6-4.fc16-89678d4947c5bd466e2f31acd58ea4e1edb854d5" \
	cluster-infrastructure="openais" \
	expected-quorum-votes="2" \
	stonith-enabled="false" \
	no-quorum-policy="ignore" \
	default-resource-stickiness="0" \
	last-lrm-refresh="1324069959"




More information about the Pacemaker mailing list