Install elrepo and epel on pbxin a flash both nodes.
# wget http://epel.mirror.freedomvoice.com/6/i386/epel-release-6-8.noarch.rpm
# yum install epel-release-6-8.noarch.rpm
# wget http://www.elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
# yum install elrepo-release-6-5.el6.elrepo.noarch.rpm
Install DRBD
# yum install kmod-drbd83 drbd83-utils
Install heartbeat
# yum install heartbeat
Assuming the disk /dev/sdb is the one on both nodes to be mirrored.
create the file /etc/drbd.d/disk1.res with the following contents
resource disk1
{
startup {
wfc-timeout 30;
outdated-wfc-timeout 20;
degr-wfc-timeout 30;
}
net {
cram-hmac-alg sha1;
shared-secret sync_disk;
}
syncer {
rate 100M;
verify-alg sha1;
}
on node1 {
device /dev/drbd0;
disk /dev/sdb;
address ip-of-node1:7789;
meta-disk internal;
}
on node2 {
device /dev/drbd0;
disk /dev/sdb;
address ip-of-node2:7789;
meta-disk internal;
}
}
Create meta disk
#drbdadm create-md disk1
#service drbd start
On Node you want to make primary in this case node1
drbdadm -- --overwrite-data-of-peer primary disk1
To monitor the sync you can use
# watch "cat /proc/drbd"
Once the disk are synced you should see something like this in
/proc/drbd
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build32R6, 2013-09-27 15:59:12
0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----
ns:144600 nr:428 dw:145028 dr:32986 al:47 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
Now make filesystem
# mkfs.ext4 /dev/drbd0
For some reason heartbeat does not work on ucast so I used mcast. Create your heartbeat configuration in /etc/ha.d/ha.cf as follows
debugfile /var/log/ha-debug
logfile /var/log/ha-log
logfacility local0
keepalive 300ms
deadtime 4
warntime 2
initdead 10
udpport 694
mcast eth0 239.1.2.3 694 1 0
#bcast eth0
auto_failback on
node node1 node2
Create haresource file
# cat /etc/ha.d/haresources
node1 drbddisk::disk1 Filesystem::/dev/drbd0::/disk1::ext4 mysqld
node1 cluster-ip/24/eth0/cluster-ip-broadcast-ip IPsrcaddr::cluster-ip asterisk httpd
Explanation
The first line is to make primary the drbd disk and mount it on the primary node and start MySQL
The 2nd line is to create a cluster-IP and send all outbound traffic through it and start asterisk (using amportal) and httpd.
Create authkeys file with permission 600 on both nodes
# dd if=/dev/urandom count=4 2>/dev/null | md5sum | cut -c1-32
# cat > /etc/ha.d/authkeys
auth 1
1 sha1
Create symlinks to mysqld and asterisk in /etc/ha.d/resources.d
# cd /etc/ha.d/resources.d
# ln -s /etc/init.d/mysqld
# ln -s /usr/local/sbin/amportal asterisk
Create a folder /disk1 so that when you start heartbeat the drbd disk is mounted
# mkdir /disk1
# service heartbeat start
The above command will start mysqld asterisk and mount the drbd disk on /disk1
Stop mysqld and asterisk
Move /var/lib/mysql to /disk1/var/mysql
Move /var/lib/asterisk to /disk1/var/asterisk
Move /etc/asterisk to /disk1/etc/asterisk
Move /usr/lib/asterisk to /disk1/usr/asterisk
Move /tftpboot to /disk1/tftpboot
Move /var/spool/asterisk to /disk1/var/spool
on Node2 stop mysqld and asterisk
delete /var/lib/mysql /var/lib/asterisk /etc/asterisk /usr/lib/asterisk /tftpboot /var/spool/asterisk
Now create symlinks to the corresponding folders in disk1
example:
# cd /var/lib
# ln -s /disk1/var/mysql
# ln -s /disk1/var/asterisk
# cd /usr/lib
# ln -s /disk1/usr/asterisk
# cd /etc
# ln -s /disk1/etc/asterisk
# cd /
# ln -s /disk1/tftpboot
# cd /var/spool
# ln -s /disk1/var/spool/asterisk
If you have any other folders that need to be synced between two nodes just move them to the /disk1 and create symlinks on both nodes to point to it.
Disable mysqld and asterisk startup on boot using
# chkconfig mysqld off
# chkconfig asterisk off
Enable drbd to be started on boot on both nodes
# chkconfig drbd on
PIAF uses /etc/rc.local file to start asterisk so comment out the line
/usr/local/sbin/amportal
Start heartbeat on second node. You have now a two node pbx in a flash cluster which works on the same database and same asterisk configuration. All you have to do is to use the cluster IP to manage the instance.
# wget http://epel.mirror.freedomvoice.com/6/i386/epel-release-6-8.noarch.rpm
# yum install epel-release-6-8.noarch.rpm
# wget http://www.elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
# yum install elrepo-release-6-5.el6.elrepo.noarch.rpm
Install DRBD
# yum install kmod-drbd83 drbd83-utils
Install heartbeat
# yum install heartbeat
Assuming the disk /dev/sdb is the one on both nodes to be mirrored.
create the file /etc/drbd.d/disk1.res with the following contents
resource disk1
{
startup {
wfc-timeout 30;
outdated-wfc-timeout 20;
degr-wfc-timeout 30;
}
net {
cram-hmac-alg sha1;
shared-secret sync_disk;
}
syncer {
rate 100M;
verify-alg sha1;
}
on node1 {
device /dev/drbd0;
disk /dev/sdb;
address ip-of-node1:7789;
meta-disk internal;
}
on node2 {
device /dev/drbd0;
disk /dev/sdb;
address ip-of-node2:7789;
meta-disk internal;
}
}
Create meta disk
#drbdadm create-md disk1
#service drbd start
On Node you want to make primary in this case node1
drbdadm -- --overwrite-data-of-peer primary disk1
To monitor the sync you can use
# watch "cat /proc/drbd"
Once the disk are synced you should see something like this in
/proc/drbd
version: 8.3.16 (api:88/proto:86-97)
GIT-hash: a798fa7e274428a357657fb52f0ecf40192c1985 build by phil@Build32R6, 2013-09-27 15:59:12
0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----
ns:144600 nr:428 dw:145028 dr:32986 al:47 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0
Now make filesystem
# mkfs.ext4 /dev/drbd0
For some reason heartbeat does not work on ucast so I used mcast. Create your heartbeat configuration in /etc/ha.d/ha.cf as follows
debugfile /var/log/ha-debug
logfile /var/log/ha-log
logfacility local0
keepalive 300ms
deadtime 4
warntime 2
initdead 10
udpport 694
mcast eth0 239.1.2.3 694 1 0
#bcast eth0
auto_failback on
node node1 node2
Create haresource file
# cat /etc/ha.d/haresources
node1 drbddisk::disk1 Filesystem::/dev/drbd0::/disk1::ext4 mysqld
node1 cluster-ip/24/eth0/cluster-ip-broadcast-ip IPsrcaddr::cluster-ip asterisk httpd
Explanation
The first line is to make primary the drbd disk and mount it on the primary node and start MySQL
The 2nd line is to create a cluster-IP and send all outbound traffic through it and start asterisk (using amportal) and httpd.
Create authkeys file with permission 600 on both nodes
# dd if=/dev/urandom count=4 2>/dev/null | md5sum | cut -c1-32
# cat > /etc/ha.d/authkeys
auth 1
1 sha1
Create symlinks to mysqld and asterisk in /etc/ha.d/resources.d
# cd /etc/ha.d/resources.d
# ln -s /etc/init.d/mysqld
# ln -s /usr/local/sbin/amportal asterisk
Create a folder /disk1 so that when you start heartbeat the drbd disk is mounted
# mkdir /disk1
# service heartbeat start
The above command will start mysqld asterisk and mount the drbd disk on /disk1
Stop mysqld and asterisk
Move /var/lib/mysql to /disk1/var/mysql
Move /var/lib/asterisk to /disk1/var/asterisk
Move /etc/asterisk to /disk1/etc/asterisk
Move /usr/lib/asterisk to /disk1/usr/asterisk
Move /tftpboot to /disk1/tftpboot
Move /var/spool/asterisk to /disk1/var/spool
on Node2 stop mysqld and asterisk
delete /var/lib/mysql /var/lib/asterisk /etc/asterisk /usr/lib/asterisk /tftpboot /var/spool/asterisk
Now create symlinks to the corresponding folders in disk1
example:
# cd /var/lib
# ln -s /disk1/var/mysql
# ln -s /disk1/var/asterisk
# cd /usr/lib
# ln -s /disk1/usr/asterisk
# cd /etc
# ln -s /disk1/etc/asterisk
# cd /
# ln -s /disk1/tftpboot
# cd /var/spool
# ln -s /disk1/var/spool/asterisk
If you have any other folders that need to be synced between two nodes just move them to the /disk1 and create symlinks on both nodes to point to it.
Disable mysqld and asterisk startup on boot using
# chkconfig mysqld off
# chkconfig asterisk off
Enable drbd to be started on boot on both nodes
# chkconfig drbd on
PIAF uses /etc/rc.local file to start asterisk so comment out the line
/usr/local/sbin/amportal
Start heartbeat on second node. You have now a two node pbx in a flash cluster which works on the same database and same asterisk configuration. All you have to do is to use the cluster IP to manage the instance.
No comments:
Post a Comment