[Pacemaker] [pacemaker] DRBD + corosync + pacemaker + postgresql
Thomaz Luiz Santos
thomaz.santos at gmail.com
Fri Oct 11 16:04:53 UTC 2013
Dear all!
I'm trying to make a sample cluster, in virtual machine, and after migrate
to a physical machine, however i have problems to configure the pacemaker (
crm ), to startup the resources and failover.
I cant mount the device /dev/drbd0 in the primary node and start postgresql
manually, but use in crm resource, dont can mount the device, and start de
postgresql.
DRBD Version: 8.3.11 (api:88)
Corosync Cluster Engine, version '1.4.2'
Pacemaker 1.1.6
**** that is my configure on crm ****
crm(live)# configure
crm(live)configure# show
node ha-master
node ha-slave
primitive drbd_postgresql ocf:heartbeat:drbd \
params drbd_resource="postgresql"
primitive fs_postgresql ocf:heartbeat:Filesystem \
params device="/dev/drbd0" directory="/mnt" fstype="ext4" \
meta target-role="Started"
primitive postgresql lsb:postgresql \
meta target-role="Started"
primitive vip_cluster ocf:heartbeat:IPaddr2 \
params ip="172.70.65.200" nic="eth0:1" \
meta target-role="Started"
group postgresql_cluster fs_postgresql vip_cluster postgresql
ms ms_drbd_postgresql drbd_postgresql \
meta master-max="1" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true" target-role="Stopped"
colocation col_postgresql inf: postgresql_cluster ms_drbd_postgresql
order or_postgresql inf: ms_drbd_postgresql postgresql_cluster
property $id="cib-bootstrap-options" \
dc-version="1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c" \
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
**** that is my global_common on drbd ****
global {
usage-count yes;
# minor-count dialog-refresh disable-ip-verification
}
common {
protocol C;
handlers {
pri-on-incon-degr
"/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/not
ify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot
-f";
pri-lost-after-sb
"/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/not
ify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot
-f";
local-io-error "/usr/lib/drbd/notify-io-error.sh;
/usr/lib/drbd/notify-emergenc
y-shutdown.sh; echo
o > /proc/sysrq-trigger ; halt -f";
fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
# split-brain "/usr/lib/drbd/notify-split-brain.sh root";
# out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
# before-resync-target
"/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c
16k";
# after-resync-target
/usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
}
startup {
# wfc-timeout 15;
# degr-wfc-timeout 60;
# outdated-wfc-timeout wait-after-sb
}
disk {
# on-io-error fencing use-bmbv no-disk-barrier
no-disk-flushes
# no-disk-drain no-md-flushes max-bio-bvecs
}
net {
# cram-hmac-alg sha1;
# shared-secret "secret";
# sndbuf-size rcvbuf-size timeout connect-int ping-int
ping-timeout max-buffers
# max-epoch-size ko-count allow-two-primaries cram-hmac-alg
shared-secret
# after-sb-0pri after-sb-1pri after-sb-2pri
data-integrity-alg no-tcp-cork
}
syncer {
# rate 150M;
# rate after al-extents use-rle cpu-mask verify-alg
csums-alg
}
}
**** that is my postgresql.res ****
resource postgresql {
startup {
wfc-timeout 15;
degr-wfc-timeout 60;
}
syncer {
rate 150M;
verify-alg md5;
}
disk {
on-io-error detach;
no-disk-barrier;
no-disk-flushes;
no-disk-drain;
fencing resource-only;
}
on ha-master {
device /dev/drbd0;
disk /dev/sdb1;
address 172.70.65.210:7788;
meta-disk internal;
}
on ha-slave {
device /dev/drbd0;
disk /dev/sdb1;
address 172.70.65.220:7788;
meta-disk internal;
}
}
**** that is my corosync.conf ****
compatibility: whitetank
totem {
version: 2
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.70.65.200
mcastaddr: 226.94.1.1
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: yes
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: on
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
aisexec{
user : root
group : root
}
service{
# Load the Pacemaker Cluster Resource Manager
name : pacemaker
ver : 0
}
DRBD, postgresql, manually start :
version: 8.3.13 (api:88/proto:86-96)
srcversion: 697DE8B1973B1D8914F04DB
0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----
ns:0 nr:0 dw:0 dr:664 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:n oos:0
version: 8.3.13 (api:88/proto:86-96)
srcversion: 697DE8B1973B1D8914F04DB
0: cs:Connected ro:Secondary/Primary ds:UpToDate/UpToDate C r-----
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:n oos:0
root at ha-master:/mnt# df -hT
Sist. Arq. Tipo Tam. Usado Disp. Uso% Montado em
/dev/sda1 ext4 4,0G 1,8G 2,1G 47% /
udev devtmpfs 473M 4,0K 473M 1% /dev
tmpfs tmpfs 193M 264K 193M 1% /run
none tmpfs 5,0M 4,0K 5,0M 1% /run/lock
none tmpfs 482M 17M 466M 4% /run/shm
/dev/drbd0 ext4 2,0G 69M 1,9G 4% /mnt
root at ha-master:/mnt# service postgresql status
Running clusters: 9.1/main
--
------------------------------
Thomaz Luiz Santos
Linux User: #359356
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.clusterlabs.org/pipermail/pacemaker/attachments/20131011/6a3398b3/attachment-0003.html>
More information about the Pacemaker
mailing list