linux-keepalived-04 keepalived+Nginx+Apache部署

linux-keepalived-04 keepalived-+Nginx+Apache部署

实验要求:

node1、node2作为nginx代理服务器

node3、node4作为web服务器接,搭建apache服务即可。

在nginx配置中使用 修改日志格式,加上客户端的源ip,让web能知道是谁访问了他

初始准备

实验环境

node1(nginx1+keepalived主):192.168.141.53
node2(nginx2+keepalived从):192.168.141.69
node3(web1):192.168.141.12
node4(web2):192.168.141.132
VIP:192.168.10.100

node1、node2

  注意:node1、node2的防火墙必须关闭掉,不能像noed3、node4一样仅仅添加策略放行相应端口。如果node1、node2的防火墙没有关闭掉,那么会出现Nginx + keepalived高可用主备服务器均有VIP的问题。另外,如果二者keepalived的配置文件中virtual_router_id不同的话,也会出现Nginx + keepalived高可用主备服务器均有VIP的问题。

1
2
3
4
5
6
7
8
9
10
11
12
[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# vim /etc/selinux/config
[root@node1 ~]# cat /etc/selinux/config |grep SELINUX
# SELINUX= can take one of these three values:
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
SELINUXTYPE=targeted
[root@node1 ~]#
[root@node1 ~]# setenforce 0
[root@node1 ~]# getenforce
Permissive
[root@node1 ~]#

noed3、node4

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@node3 ~]# firewall-cmd --add-port={80,8080}/tcp --permanent
success
[root@node3 ~]# firewall-cmd --reload
success
[root@node3 ~]#
[root@node3 ~]# vim /etc/selinux/config
[root@node3 ~]# cat /etc/selinux/config |grep SELINUX
# SELINUX= can take one of these three values:
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
SELINUXTYPE=targeted
[root@node3 ~]#
[root@node3 ~]# setenforce 0
[root@node3 ~]# getenforce
Permissive
[root@node3 ~]#

nginx部署

node1、node2

1
2
[root@node2 ~]# yum install epel-release -y
[root@node2 ~]# yum install nginx -y

node1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@node1 ~]# cd /etc/nginx/conf.d/
[root@node1 conf.d]# vim proxy.conf
[root@node1 conf.d]# cat proxy.conf
upstream websers{
server 192.168.141.12;
server 192.168.141.132;
}
server{
listen 8080;
server_name 192.168.141.53;
location / {
proxy_pass http://websers;
}
}
[root@node1 conf.d]#
[root@node1 conf.d]# systemctl restart nginx
[root@node1 conf.d]# nginx -s reload
[root@node1 conf.d]#

node2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@node1 ~]# cd /etc/nginx/conf.d/
[root@node1 conf.d]# vim proxy.conf
[root@node1 conf.d]# cat proxy.conf
upstream websers{
server 192.168.141.12;
server 192.168.141.132;
}
server{
listen 8080;
server_name 192.168.141.69;
location / {
proxy_pass http://websers;
}
}
[root@node1 conf.d]#
[root@node1 conf.d]# systemctl restart nginx
[root@node1 conf.d]# nginx -s reload
[root@node1 conf.d]#

注意:这里也可以设置权重和端口号

1
2
3
4
upstream websers {
server 192.168.141.12:8080 weight=3;# 可以加上weight=5; 这样的语法设置权重
server 192.168.141.132:8080 down;
}

node3

1
2
3
4
[root@node3 ~]# yum install httpd -y
[root@node3 ~]# echo "i am web1" > /var/www/html/index.html
[root@node3 ~]# systemctl restart httpd
[root@node3 ~]# systemctl enable httpd

node4

1
2
3
4
[root@node4 ~]# yum install httpd -y
[root@node3 ~]# echo "i am web2" > /var/www/html/index.html
[root@node4 ~]# systemctl restart httpd
[root@node4 ~]# systemctl enable httpd

检查node3、node4的apache服务是否正常

1VARwF.png

检查node1、node2的nginx服务是否正常

node1、node2

1VATQx.png

检测完毕,如上图,apache和nginx服务都是正常的。

Keepalived部署

node1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@node1 conf.d]# cd
[root@node1 ~]# vim keepalive.sh
[root@node1 ~]# cat keepalive.sh
#!/bin/bash
yum install keepalived -y
mv /etc/keepalived/keepalived.conf{,.bak}
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id node1 # node2修改
}
vrrp_instance VI_1 {
state MASTER # node2节点BACKUP
interface ens33
virtual_router_id 10
priority 100 # node2节点小于100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.141.100
}
}
EOF
[root@node1 ~]# chmod +x ./keepalive.sh
[root@node1 ~]# ./keepalive.sh

node2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@node2 conf.d]# cd
[root@node2 ~]# vim keepalive.sh
[root@node2 ~]# cat keepalive.sh
#!/bin/bash
yum install keepalived -y
mv /etc/keepalived/keepalived.conf{,.bak}
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id node2
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 10
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.141.100
}
}
EOF
[root@node2 ~]# chmod +x ./keepalive.sh
[root@node2 ~]# ./keepalive.sh

keepalived+nginx实现高可用的特点

1VAjFH.png

如上图,我们需要自定义脚本检测nginx服务是否正常

node1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
给keepalived配置文件增加script内容

[root@node1 ~]# vim /etc/keepalived/keepalived.conf
[root@node1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id node1 # node2修改
}
# 定义script
vrrp_script chk_http_port {
script "/usr/local/src/check_nginx_pid.sh"
interval 1
weight -20 # 优先级-20
}
vrrp_instance VI_1 {
state MASTER # node2节点BACKUP
interface ens33
virtual_router_id 10
priority 100 # node2节点小于100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
# 调用script脚本
track_script {
chk_http_port
}
virtual_ipaddress {
192.168.141.100
}
}
[root@node1 ~]#

script解析:

1
2
3
4
5
6
7
8
9
# 定义script
vrrp_script chk_http_port {
script "/usr/local/src/check_nginx_pid.sh"
#check_nginx_pid.sh是判断nginx是否正常的脚本,值为1执行 weight -20,否则不执行weight -20


interval 1 #每秒钟检测一次check_nginx_pid.sh的返回值,或者说每秒执行一次脚本
weight -20 # 优先级-20
}

  注意:优先级减少之后需不需要加回来?不需要,从起nginx后自动回到初始值,也就是把nginx关了重启之后其优先级自动恢复到100,不需要你手动去加。

node1定义判断nginx是否正常的脚本,down的话返回1,正常的话返回0

1
2
3
4
5
6
7
8
9
10
11
12
13
[root@node1 ~]# vim /usr/local/src/check_nginx_pid.sh
[root@node1 ~]# cat /usr/local/src/check_nginx_pid.sh
#!/bin/bash
A=`ps -C nginx --no-header |wc -l`
if [ $A == 0 ];then
exit 1
else
exit 0
fi
[root@node1 ~]# cd /usr/local/src/
[root@node1 src]# chmod +x ./check_nginx_pid.sh
[root@node1 src]# cd
[root@node1 ~]# systemctl restart keepalived

  node2不需要上述改动,因为总共就两台nginx服务器,所以检测nginx运行是否正常的机器只需要一台就够了。

1
[root@node2 ~]# systemctl restart keepalived

测试keepalived的vip是否在主服务器上

node1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@node1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8b:6f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.53/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::45c1:b728:e8e7:a1fa/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node1 ~]#

node2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@node2 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:56:58:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.141.69/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::1edf:37bf:62b9:68e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node2 ~]#

检测vip是否可用

  解析:这里curl vip:8080,之所以要加上端口,是因为keepalived只将vip转换为真实ip:192.168.141.53&69,而之前我们定义的nginx反向代理所监听的端口是8080,所以就要加上端口才能访问了。但是这个访问到的并非是web上的80端口,例如直接访问:192.168.141.12:8080是打不开页面的。我们之前做的反向代理是将web服务器上的192.168.141.12:80和192.168.141.132:80反向代理成192.168.141.53:8080或者192.168.141.69:8080。

停止node1的Keepalived服务,观察VIP是否漂移,访问正常

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@node1 ~]# systemctl stop keepalived
[root@node1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8b:6f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.53/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::45c1:b728:e8e7:a1fa/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node1 ~]#

可以看到vip现在已经转移到了node2上面

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@node2 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:56:58:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.141.69/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::1edf:37bf:62b9:68e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node2 ~]#

测试漂移后的vip的可用性

  由于keepalived主设备的选举默认是抢占模式的,所以我们重启node1的keepalived之后,vip能够重新自node2上面漂移回到node1上面。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@node1 ~]# systemctl restart keepalived
[root@node1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8b:6f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.53/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::45c1:b728:e8e7:a1fa/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node1 ~]#
1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@node2 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:56:58:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.141.69/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::1edf:37bf:62b9:68e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node2 ~]#

可以看到vip已经漂移回到node1上了

我们解析一下之前用来判断nginx是否关闭的脚本

1
2
3
4
5
6
7
8
9
[root@node1 ~]# cat /usr/local/src/check_nginx_pid.sh
#!/bin/bash
A=`ps -C nginx --no-header |wc -l`
if [ $A == 0 ];then
exit 1
else
exit 0
fi
[root@node1 ~]#

这个脚本里有个变量A=ps -C nginx –no-header |wc -l

停止node1的nginx服务,查看一下变量A的值,可以看到关闭了nginx的话,A=0。

1
2
3
4
[root@node1 ~]# systemctl stop nginx
[root@node1 ~]# ps -C nginx --no-header |wc -l
0
[root@node1 ~]#

对比与node1,我们来看看node2这台nginx并未关闭时A的值,可以看到A!=0

1
2
3
[root@node2 ~]# ps -C nginx --no-header |wc -l
2
[root@node2 ~]#

停止node1的nginx服务,观察VIP是否漂移,访问正常

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@node1 ~]# systemctl stop nginx
[root@node1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8b:6f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.53/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::45c1:b728:e8e7:a1fa/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node1 ~]#

可以看到vip已经漂移到了node2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@node2 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:56:58:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.141.69/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::1edf:37bf:62b9:68e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node2 ~]#

测试漂移后的vip的可用性

1VEim8.png

现在将node1的nginx服务器重启,看看vip能都漂移回来

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@node1 ~]# systemctl restart nginx
[root@node1 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:8b:6f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.53/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::45c1:b728:e8e7:a1fa/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node1 ~]#

node2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@node2 ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:56:58:3c brd ff:ff:ff:ff:ff:ff
inet 192.168.141.69/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::1edf:37bf:62b9:68e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@node2 ~]#

可以看到vip已经漂移回node1上面了

最终测试一下漂移回来的vip的可用性

1VEim8.png

将node3上的httpd服务关闭

1
2
[root@node3 ~]# systemctl stop httpd
[root@node3 ~]#

访问vip

1VEZfs.png

重新启动node3的httpd服务

1
2
[root@node3 ~]# systemctl restart httpd
[root@node3 ~]#

再次访问vip

1VEnlq.png

测试结束

欢迎打赏,谢谢
------ 本文结束------
0%