[Pacemaker] Networking and routing issues with Active-Active
Arturo Borrero Gonzalez
cer.inet at linuxmail.org
Wed Dec 21 10:24:54 CET 2011
Hi there!
I'm working on two possible solutions for this.
The first:
In the primitive corresponding the IPv4 assigned to the loopback
interface on the "non-doing-balancing" node, change the cidr_netmask
parameter to "32". This way, the local route table of each node is
modified and the machine will not try to route traffic to the subnet
of the IP to the loopback interface.
It's seems like a basis in linux networking. If the system has an IP
address assigned to one interface, the routes get modified so that any
package could potencially go throught that interface looking for that
subnet. But in this case, we want packages to not go via loopback but
eth0 (to all clients).
You may want to see what's happening to each node:
main table:
{ip route | route -n}
local table:
ip route show table local
cache table: (is being applied rigth now, for each package)
ip route show cache
The second:
The first option seems to work. But if not, the IPaddr2 RA has a lot
of advanced options and parameters to investigate.
Best regards.
2011/12/19 Arturo Borrero Gonzalez <cer.inet at linuxmail.org>:
> Hi there!
>
> I'm working on an active-active LDAP cluster based on
> pacemaker+corosync+openldap+debian.
>
> I have two nodes, one running ldirectord and openldap and the other
> just openldap (until failover). The schema is like this:
> http://2.bp.blogspot.com/-lDDiKhnwBhM/TqLOqDup4KI/AAAAAAAABdU/yTk29zlpmn8/s320/esqueleto+ipv4.png
>
>
> As all of you know, I assigned with pacemaker the VIP to the main
> interface of the main node (well, the node running ldirectord) and to
> the loopback devide in the other node.
>
> Surprisingly, the load balancing over IPv6 works extremely well, but
> not over ipv4.
>
> The "slave" node (the node not running ldirectord), who has the VIP in
> loopback, has a lot of weird behaviours with ARP and ROUTING
> services.
>
> When pinging some host in the same subnetwork, I see that the machine
> sends packages to the loopback interface, with the src ip and dst ip
> the same, the IP of the original dst host.
> In addition, sometimes, the node doesn't reply to ARP request for his
> real IP and I have to set the ARP statically in other host that want
> to contact the node.
>
> Here is my configuration:
>
> root at ldap1:~# sysctl -A | grep arp
> net.ipv4.conf.all.proxy_arp = 0
> net.ipv4.conf.all.arp_filter = 0
> net.ipv4.conf.all.arp_announce = 2
> net.ipv4.conf.all.arp_ignore = 1
> net.ipv4.conf.all.arp_accept = 0
> net.ipv4.conf.all.arp_notify = 0
> net.ipv4.conf.default.proxy_arp = 0
> net.ipv4.conf.default.arp_filter = 0
> net.ipv4.conf.default.arp_announce = 0
> net.ipv4.conf.default.arp_ignore = 0
> net.ipv4.conf.default.arp_accept = 0
> net.ipv4.conf.default.arp_notify = 0
> net.ipv4.conf.lo.proxy_arp = 0
> net.ipv4.conf.lo.arp_filter = 0
> net.ipv4.conf.lo.arp_announce = 0
> net.ipv4.conf.lo.arp_ignore = 0
> net.ipv4.conf.lo.arp_accept = 0
> net.ipv4.conf.lo.arp_notify = 0
> net.ipv4.conf.eth0.proxy_arp = 0
> net.ipv4.conf.eth0.arp_filter = 0
> net.ipv4.conf.eth0.arp_announce = 2
> net.ipv4.conf.eth0.arp_ignore = 1
> net.ipv4.conf.eth0.arp_accept = 0
> net.ipv4.conf.eth0.arp_notify = 0
> net.ipv4.conf.eth1.proxy_arp = 0
> net.ipv4.conf.eth1.arp_filter = 0
> net.ipv4.conf.eth1.arp_announce = 0
> net.ipv4.conf.eth1.arp_ignore = 0
> net.ipv4.conf.eth1.arp_accept = 0
> net.ipv4.conf.eth1.arp_notify = 0
>
>
>
>
> root at ldap1:~# crm configure show
> node ldap1 \
> attributes standby="off"
> node ldap2 \
> attributes standby="off"
> primitive IPV-lo_4 ocf:heartbeat:IPaddr \
> params ip="vip_v4" cidr_netmask="24" nic="lo" \
> op monitor interval="5s"
> primitive IPV-lo_6 ocf:heartbeat:IPv6addrLO \
> params ipv6addr="vip_v6" cidr_netmask="64" \
> op monitor interval="5s"
> primitive IPV_4 ocf:heartbeat:IPaddr2 \
> params ip="vip_v4" nic="eth0" cidr_netmask="24" lvs_support="true" \
> op monitor interval="5s"
> primitive IPV_6 ocf:heartbeat:IPv6addr \
> params ipv6addr="vip_v6" nic="eth0" cidr_netmask="64" \
> op monitor interval="5s"
> primitive lvs ocf:heartbeat:ldirectord \
> op monitor interval="20" timeout="10"
> group IPV_LVS IPV_4 IPV_6 lvs
> group IPV_lo IPV-lo_6 IPV-lo_4
> clone clon_IPV_lo IPV_lo \
> meta interleave="true" target-role="Started"
> colocation LVS_no_IPV_lo -inf: clon_IPV_lo IPV_LVS
> property $id="cib-bootstrap-options" \
> dc-version="1.0.9-74392a28b7f31d7ddc86689598bd23114f58978b" \
> cluster-infrastructure="openais" \
> expected-quorum-votes="2" \
> no-quorum-policy="ignore" \
> stonith-enabled="false" \
> last-lrm-refresh="1320860387"
> rsc_defaults $id="rsc-options" \
> resource-stickiness="1000"
>
>
> root at ldap1:~# ifconfig
> eth0 Link encap:Ethernet HWaddr 00:xx:xx:xx:xx
> inet addr: real_ipv4 Bcast:150.214.5.255 Mask:255.255.255.0
> inet6 addr: real_ipv6 Scope:Global
> inet6 addr: fe80::217:31ff:fe22:2b64/64 Scope:Link
> UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
> RX packets:4613984 errors:0 dropped:0 overruns:0 frame:0
> TX packets:3704110 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:1000
> RX bytes:979844318 (934.4 MiB) TX bytes:1572082949 (1.4 GiB)
> Interrupt:26
>
> eth1 Link encap:Ethernet HWaddr 00:xx:31:22:xx:xx
> inet addr:192.168.21.21 Bcast:192.168.21.255 Mask:255.255.255.0
> inet6 addr: fxxxxxxxxx/64 Scope:Link
> UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
> RX packets:8985987 errors:0 dropped:0 overruns:0 frame:0
> TX packets:9187433 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:1000
> RX bytes:1676364355 (1.5 GiB) TX bytes:2465255044 (2.2 GiB)
> Interrupt:27
>
> lo Link encap:Local Loopback
> inet addr:127.0.0.1 Mask:255.0.0.0
> inet6 addr: ::1/128 Scope:Host
> inet6 addr: vip_v6 Scope:Global
> UP LOOPBACK RUNNING MTU:16436 Metric:1
> RX packets:6996 errors:0 dropped:0 overruns:0 frame:0
> TX packets:6996 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:0
> RX bytes:2236681 (2.1 MiB) TX bytes:2236681 (2.1 MiB)
>
> lo:0 Link encap:Local Loopback
> inet addr:vip_v4 Mask:255.255.255.0
> UP LOOPBACK RUNNING MTU:16436 Metric:1
>
> root at ldap2:~# ifconfig
> eth0 Link encap:Ethernet HWaddr 00:xxxxxxxxxx
> inet addr:real_ipv4 Bcast:150.214.5.255 Mask:255.255.255.0
> inet6 addr: real_ipv6 Scope:Global
> inet6 addr: vip_v6 Scope:Global
> inet6 addr: xxxxxxxxxxxx Scope:Link
> UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
> RX packets:12879341 errors:0 dropped:0 overruns:0 frame:0
> TX packets:11568053 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:1000
> RX bytes:2798011078 (2.6 GiB) TX bytes:4604167850 (4.2 GiB)
> Interrupt:26
>
> eth1 Link encap:Ethernet HWaddr 00xxxxxxxxxxxxxx
> inet addr:192.168.21.22 Bcast:192.168.21.255 Mask:255.255.255.0
> inet6 addr: xxxxxxxxxxxxxx/64 Scope:Link
> UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
> RX packets:26024464 errors:0 dropped:0 overruns:0 frame:0
> TX packets:17389962 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:1000
> RX bytes:5983267668 (5.5 GiB) TX bytes:3446728676 (3.2 GiB)
> Interrupt:27
>
> lo Link encap:Local Loopback
> inet addr:127.0.0.1 Mask:255.0.0.0
> inet6 addr: ::1/128 Scope:Host
> UP LOOPBACK RUNNING MTU:16436 Metric:1
> RX packets:9075609 errors:0 dropped:0 overruns:0 frame:0
> TX packets:9075609 errors:0 dropped:0 overruns:0 carrier:0
> collisions:0 txqueuelen:0
> RX bytes:2726151268 (2.5 GiB) TX bytes:2726151268 (2.5 GiB)
>
>
> ¿Any idea?
>
> Best regards.
>
>
> --
> /* Arturo Borrero Gonzalez || cer.inet at linuxmail.org */
> /* Use debian gnu/linux! Best OS ever! */
--
/* Arturo Borrero Gonzalez || cer.inet at linuxmail.org */
/* Use debian gnu/linux! Best OS ever! */
More information about the Pacemaker
mailing list