Keepalive实战之LVS-DR

实验目的:构建LVS-DR架构,为了达到LVS的高可用目的,故在LVS-DRDirector端做Keepalive集群,在Director-A上做keepalive-A,在Director上做keepalive-BLVS-RS1LVS-RS2为后端的两台web服务器,通过在Director上做keepalive集群实现高可用的目的

网络拓扑图

实验环境(keepalive节点同时作为LVSdirectory节点)

keepalive-A(Director-A) 172.16.253.108

keepalive-B(Director-A) 172.16.253.105

LVS-RS1                 172.16.250.127

LVS-RS2                 172.16.253.193

VIP                     172.16.253.150

client                  172.16.253.177

LVS-RS web集群

为了更好的观察实验结果,故在此将RS1RS2web页面内容设置不一致,以致可以更清晰的区分RS1服务端和RS2服务端

LVS-RS1

[root@LVS-RS1 ~]# systemctl restart chronyd  \\多台服务器时间同步

[root@LVS-RS1 ~]# iptables -F

[root@LVS-RS1 ~]# setenforce 0

[root@LVS-RS1 ~]# yum -y install nginx

[root@LVS-RS1 ~]# vim /usr/share/nginx/html/index.html

<h1> Web RS1 </h1>

[root@LVS-RS1 ~]# systemctl start nginx

 

修改内核参数并添加VIP地址

[root@LVS-RS1 ~]# vim lvs_dr.sh

#!/bin/bash

#

vip=172.16.253.150

mask=255.255.255.255

iface="lo:0"

 

case $1 in

start)

    ifconfig $iface $vip netmask $mask broadcast $vip up

    route add -host $vip dev $iface

    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore

    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore

    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce

    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce

    ;;

stop)

    echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore

    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore

    echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce

    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce

    ifconfig $iface down

    ;;

*)

    echo "Usage:$(basename $0) start|stop"

    exit 1

    ;;

esac

[root@LVS-RS1 ~]# bash lvs_dr.sh start

[root@LVS-RS1 ~]# ifconfig

lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

    inet 172.16.253.150  netmask 255.255.255.255

    loop  txqueuelen 1  (Local Loopback)

LVS-RS2

[root@LVS-RS2 ~]# systemctl restart chronyd  \\多台服务器时间同步

[root@LVS-RS2 ~]# iptables -F

[root@LVS-RS2 ~]# setenforce 0

[root@LVS-RS2 ~]# yum -y install nginx

[root@LVS-RS2 ~]# vim /usr/share/nginx/html/index.html

<h1> Web RS2 </h1>

[root@LVS-RS2 ~]# systemctl start nginx

 

修改内核参数并添加VIP地址

[root@LVS-RS2 ~]# vim lvs_dr.sh

#!/bin/bash

#

vip=172.16.253.150

mask=255.255.255.255

iface="lo:0"

 

case $1 in

start)

    ifconfig $iface $vip netmask $mask broadcast $vip up

    route add -host $vip dev $iface

    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore

    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore

    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce

    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce

    ;;

stop)

    echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore

    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore

    echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce

    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce

    ifconfig $iface down

    ;;

*)

    echo "Usage:$(basename $0) start|stop"

    exit 1

    ;;

esac

[root@LVS-RS1 ~]# bash lvs_dr.sh start

[root@LVS-RS1 ~]# ifconfig

lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536

    inet 172.16.253.150  netmask 255.255.255.255

    loop  txqueuelen 1  (Local Loopback)

Keepalive集群

Director节点搭建

keepalive-A

[root@keepaliveA ~]# systemctl restart chronyd  \\多台服务器时间同步

[root@keepaliveA ~]# yum -y install ipvsadm

keepalive-B

[root@keepaliveB ~]# systemctl restart chronyd  \\多台服务器时间同步

[root@keepaliveB ~]# yum -y install ipvsadm

keepalive上配置websorry server

keepalive-A

[root@keepaliveA ~]# yum -y install nginx

[root@keepaliveA ~]# vim /usr/share/nginx/html/index.html

</h1> sorry from Director-A(keepalive-A) </h1>

[root@keepaliveA ~]# systemctl start nginx

keepalive-B

[root@keepalive-B ~]# yum -y install nginx

[root@keepalive-B ~]# vim /usr/share/nginx/html/index.html

</h1> sorry from Director-B(keepalive-B) </h1>

[root@keepaliveB ~]# systemctl start nginx

keepalive-A配置keepalive

keepalive-A

[root@keepalive-A ~]# iptables -F

[root@keepalive-A ~]# yum -y install keepalived

[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf

global_defs

    notification_email\\定义邮件通知设置

        jevon@danran.com  \\定义邮件接收地址

    }

    notification_email_from ka_admin@danran.com \\邮件发送者

    smtp_server 127.0.0.1 \\邮件server服务器

    smtp_connect_timeout 30  \\连接超时

    router_id keepaliveA \\routeID信息,自定义

    vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18

}

vrrp_instance VI_A {

    state MASTER

    interface ens33

    virtual_router_id 51

    priority 100

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass qr8hQHuL

    }

virtual_ipaddress {

    172.16.253.150/32 dev ens33

}

 

virtual_server 172.16.253.150 80 {

    delay_loop\\服务轮询的时间间隔

    lb_algo rr  \\定义调度方法;

    lb_kind DR  \\集群的类型;

    protocol TCP \\服务协议,仅支持TCP

    sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面

   

    real_server 172.16.250.127 80 {

        weight\\权重

        SSL_GET\\应用层检测

            url {

                path /  \\定义要监控的URL

                #digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;

                status_code 200  \\判断上述检测机制为健康状态的响应码

            }

            connect_timeout 3 \\连接请求的超时时长;

            nb_get_retry\\重试次数

            delay_before_retry\\重试之前的延迟时长

        }

    }

    real_server  172.16.253.193 80 {

        weight 1

        SSL_GET {

            url {

                path /

                #digest ff20ad2481f97b1754ef3e12ecd3a9cc

                status_code 200

            }

            connect_timeout 3

            nb_get_retry 3

            delay_before_retry 1

        }

    }

}

[root@keepaliveA ~]# systemctl start keepalived

[root@keepaliveA ~]# ip a l

2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff

     inet 172.16.253.150/32 scope global ens33

   valid_lft forever preferred_lft forever

[root@keepaliveA ~]# ipvsadm -Ln

IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

    -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

TCP  172.16.253.150:80 rr

    -> 172.16.250.127:80            Route   1      0          0        

-> 172.16.253.193:80            Route   1      0          0  

keepalive-B配置keepalive

keepalive-B

[root@keepalive-B ~]# iptables -F

[root@keepalive-B ~]# yum -y install keepalived

[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf

global_defs

    notification_email\\定义邮件通知设置

        jevon@danran.com  \\定义邮件接收地址

    }

    notification_email_from ka_admin@danran.com \\邮件发送者

    smtp_server 127.0.0.1 \\邮件server服务器

    smtp_connect_timeout 30  \\连接超时

    router_id keepaliveA \\routeID信息,自定义

    vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18

}

vrrp_instance VI_A {

    state BACKUP

    interface ens33

    virtual_router_id 51

    priority 95

    advert_int 1

    authentication {

        auth_type PASS

        auth_pass qr8hQHuL

    }

virtual_ipaddress {

    172.16.253.150/32 dev ens33

}

 

virtual_server 172.16.253.150 80 {

    delay_loop\\服务轮询的时间间隔

    lb_algo rr  \\定义调度方法;

    lb_kind DR  \\集群的类型;

    protocol TCP \\服务协议,仅支持TCP

    sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面

 

    real_server 172.16.250.127 80 {

        weight\\权重

        SSL_GET\\应用层检测

            url {

                path /  \\定义要监控的URL

                #digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;

                status_code 200  \\判断上述检测机制为健康状态的响应码

            }

            connect_timeout 3 \\连接请求的超时时长;

            nb_get_retry\\重试次数

            delay_before_retry\\重试之前的延迟时长

        }

    }

    real_server  172.16.253.193 80 {

        weight 1

        SSL_GET {

            url {

                path /

                #digest ff20ad2481f97b1754ef3e12ecd3a9cc

                status_code 200

            }

            connect_timeout 3

            nb_get_retry 3

            delay_before_retry 1

        }

    }

}

[root@keepaliveB ~]# systemctl start keepalived

[root@keepalive-B ~]# ipvsadm

IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

    -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

TCP  172.16.253.150:http rr

    -> 172.16.250.127:http          Route   1      0          0        

    -> 172.16.253.193:http          Route   1      0          0    

访问测试

client测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

keepalive-A故障时

[root@keepaliveA ~]# systemctl stop keepalived 

keepalive-B自动成为MASTER主节点,则LVSdirector调度服务器切换为keepalive-B上,LVS-RS1LVS-RS2web服务正常使用

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>

keepalive-A修恢复正常时,keepalive-A再次成为MASTER主节点

[root@keepaliveA ~]# systemctl start keepalived

[root@keepaliveA ~]# ip a l

: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff

    inet 172.16.253.150/32 scope global ens33

   valid_lft forever preferred_lft forever

LVS-RS1web服务故障时

[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT

client访问

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

<h1> Web RS2 </h1>

<h1> Web RS2 </h1>

<h1> Web RS2 </h1>

<h1> Web RS2 </h1>

LVS-RS1LVS-RS2web服务全部故障时

[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT

[root@LVS-RS2 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT

client访问到的时sorry server服务器,且sorry server服务器为keepalive-A

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

</h1> sorry from Director-A(keepalive-A) </h1>

</h1> sorry from Director-A(keepalive-A) </h1>

</h1> sorry from Director-A(keepalive-A) </h1>

</h1> sorry from Director-A(keepalive-A) </h1>

</h1> sorry from Director-A(keepalive-A) </h1>

keepalive-A故障时

[root@keepaliveA ~]# systemctl stop keepalived.service

client访问sorry server服务页面,且sorry server服务器为keepalive-B

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

</h1> sorry from Director-B(keepalive-B) </h1>

</h1> sorry from Director-B(keepalive-B) </h1>

</h1> sorry from Director-B(keepalive-B) </h1>

</h1> sorry from Director-B(keepalive-B) </h1>

</h1> sorry from Director-B(keepalive-B) </h1>

LVS-RS1web服务恢复正常后

[root@LVS-RS1 ~]# iptables -F

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

<h1> Web RS1 </h1>

<h1> Web RS1 </h1>

<h1> Web RS1 </h1>

<h1> Web RS1 </h1>

<h1> Web RS1 </h1>

LVS-RS1LVS-RS2web服务全部恢复正常后

[root@LVS-RS1 ~]# iptables -F  [root@LVS-RS2 ~]# iptables -F 

client访问测试

[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>

<h1> Web RS1 </h1>

<h1> Web RS2 </h1>   

保存及重载规则

保存:建议保存至/etc/sysconfig/ipvsadm

ipvsadm-save > /PATH/TO/IPVSADM_FILE

ipvsadm -S > /PATH/TO/IPVSADM_FILE

systemctl stop ipvsadm.service

重载:

ipvsadm-restore < /PATH/FROM/IPVSADM_FILE

ipvsadm -R < /PATH/FROM/IPVSADM_FILE

systemctl restart ipvsadm.service

keepalive节点通过DNS域名解析指向实现

获取web主页面内容的校验码

[root@keepaliveA ~]# genhash -s 172.16.250.127 -p 80 -u /

danran

 

已标记关键词 清除标记
©️2020 CSDN 皮肤主题: 游动-白 设计师:白松林 返回首页