haproxy + keepalived practice

Small site structure

Note: If you are deploying on the cloud, such as Ali cloud, do not need to deploy keepalived, direct buy Ali cloud can slb, slb then distribute traffic to two machines haproxy

First, the first deployment of two web servers

Compile and install nginx (script very rough)

[root@007-web2 ~]# cat nginx.sh 


yum install -y vim lrzsz tree screen psmisc lsof tcpdump wget ntpdate gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel systemd-devel net-tools iotop bc zip unzip zlib-devel bash-completion nfs-utils automake libxml2 libxml2-devel libxslt libxslt-devel perl perl-ExtUtils-Embed

wget https://mirrors.huaweicloud.com/nginx/nginx-1.17.5.tar.gz
tar xf nginx-1.17.5.tar.gz
cd nginx-1.17.5/
./configure --prefix=/apps/nginx --user=nginx --group=nginx --with-http_ssl_module --with-http_v2_module --with-http_realip_module --with-http_stub_status_module --with-http_gzip_static_module --with-pcre --with-stream --with-stream_ssl_module --with-stream_realip_module --with-debug

make -j 4 
make install

/usr/sbin/groupadd -g 995 -r nginx 2> /dev/null
/usr/sbin/useradd -c "Nginx web server" -u 997 -g nginx -s /sbin/nologin -r -d /var/lib/nginx nginx 2> /dev/null

cat > /usr/lib/systemd/system/nginx.service <<'EOF'
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/run/nginx.pid
ExecStartPre=/usr/bin/rm -f /run/nginx.pid
ExecStartPre=/apps/nginx/sbin/nginx -t
ExecStart=/apps/nginx/sbin/nginx
ExecReload=/bin/kill -s HUP $MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true
[Install]
WantedBy=multi-user.target
EOF

mkdir /apps/nginx/conf.d 
cat > /apps/nginx/conf/nginx.conf <<'EOF'
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
    worker_connections 1024;
}
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;
    include             /apps/nginx/conf/mime.types;
    default_type        application/octet-stream;
    include /apps/nginx/conf.d/*.conf;
    server {
        server_name  _;
        root         /apps/nginx/html;
        location / {
        }
        error_page 500 502 503 504 /50x.html;
            location = /50x.html {
        }
    }
}
EOF

systemctl daemon-reload  && systemctl enable nginx
systemctl start nginx 

Compile php script

[root@007-web2 ~]# cat php.sh 
#!/bin/bash
RED="\033[0;31m"
GREEN="\033[0;32m"
NO_COLOR="\033[0m"
PREFIX=/usr/local/php    #这个还是别改了,好多地方要改
SYSCONFDIR=
SRC=/usr/src
FLAG=$1
CPUS=`cat /proc/cpuinfo| grep "physical id"| sort| uniq| wc -l`
CORE=`cat /proc/cpuinfo| grep "cpu cores"| uniq | awk '{print $4}'`
J=$((${CPUS}*${CORE}))
FILEURL='http://ftp.ntu.edu.tw/pub/php/distributions/php-7.3.10.tar.gz'
# 判断是不是root
judge_root() {
    [ $(id -u) != "0" ] && { echo -e "${RED}Error:${NO_COLOR} You must be root to run this script."; exit 1; }   
}
# download 
download_source() {
    cd
    yum install wget -y
    wget ${FILEURL}
    if [ ! "$?" -eq 0 ];then
        echo "download failed!"
        exit 1
    fi
}
# install
install(){
    # Add the "apache" group and user,需要的话改为nginx
    #/usr/sbin/groupadd -g 48 -r apache 2> /dev/null || :
    #/usr/sbin/useradd -c "Apache" -u 48 -g apache \
    #-s /sbin/nologin -r -d /usr/share/httpd apache 2> /dev/null || : 
    /usr/sbin/groupadd -g 995 -r nginx 2> /dev/null
    /usr/sbin/useradd -c "Nginx web server" -u 997 -g nginx -s /sbin/nologin -r -d /var/lib/nginx nginx 2> /dev/null
    # epel
    wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
    yum install gcc openssl-devel pcre-devel libnghttp2-devel ncurses-devel  lbzip2  bzip2 expat-devel libxml2-devel libxml2  autoconf libtool -y
    tar xf php-7.3.10.tar.gz -C ${SRC}/
    cd ${SRC}/php-7.3.10/
    ./configure --prefix=/usr/local/php --enable-mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --with-openssl --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib --with-config-file-path=/usr/loca/php/etc --with-config-file-scan-dir=/usr/local/php/etc/php.d --enable-mbstring --enable-xml --enable-sockets --enable-fpm --enable-maintainer-zts --disable-fileinfo
    make -j ${J}
    make install
    # config
    cd ${PREFIX}/etc/
    cp php-fpm.conf.default php-fpm.conf
    cd ${PREFIX}/etc/php-fpm.d
    cp www.conf.default www.conf
    # init file
    cp ${SRC}/php-7.3.10/sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
    chmod +x /etc/init.d/php-fpm
    # modify config file
    sed -i 's@user = nobody@user = nginx@' ${PREFIX}/etc/php-fpm.d/www.conf
    sed -i 's@group = nobody@group = nginx@' ${PREFIX}/etc/php-fpm.d/www.conf
    sed -i 's@listen = 127.0.0.1:9000@listen = /var/run/php-fpm.sock@' ${PREFIX}/etc/php-fpm.d/www.conf
    sed -i 's@;listen.mode = [email protected] = 0666@' ${PREFIX}/etc/php-fpm.d/www.conf
    #reload
    systemctl daemon-reload && systemctl enable php-fpm
}
test_php(){
    systemctl start php-fpm
    sleep 3
    if  ps -ef |  grep php-fpm | grep -v grep;then
        echo -e "${GREEN}Maybe php installed successfully...${NO_COLOR}"
    else
        echo -e "${RED}Maybe php installed failed...${NO_COLOR}"
    fi
}
remove_php() {
    systemctl stop php-fpm
    sleep 3
    if  ps -ef |  grep php-fpm | grep -v grep;then
        echo -e "${RED}Php uninstall failed...Killing php process failed...${NO_COLOR}"
        exit 2
    fi
    rm -rf ${PREFIX}  ${SRC}/php-7.3.10 /etc/init.d/php-fpm
}
judge_uninstall(){
if [ "$FLAG" = "uninstall" ];then
    remove_php
    exit 0
fi
}
main() {
    judge_uninstall
    judge_root
    download_source
    install
    test_php   
}
main

nginx configuration wordpress

[root@007-web1 ~]# mkdir /var/www/wordpress -p
#下载wordpress
[root@007-web1 ~]# unzip wordpress-5.2.3-zh_CN.zip -d /var/www/
[root@007-web1 ~]# chown nginx.nginx -R /var/www/wordpress/
[root@007-web1 ~]# cat /apps/nginx/conf.d/blog.jd.com.conf 
server {
    listen       80;
    server_name  blog.jd.com;

    charset utf-8;
    access_log  /var/log/nginx/blog.jd.com-access.log  main;
    error_log  /var/log/nginx/blog.jd.com-error.log;
    root /var/www/wordpress;
    location / {
        root /var/www/wordpress;
        index  index.php index.html index.htm;
    }
    
    location ~ \.php$ {
        #fastcgi_pass             127.0.0.1:9000;
        fastcgi_pass             unix:/var/run/php-fpm.sock;
        fastcgi_split_path_info ^(.+\.php)(/.+)$;
        fastcgi_index index.php;
        fastcgi_buffer_size       128k;
        fastcgi_buffers           64 128k;
        fastcgi_intercept_errors  on;
        fastcgi_param   PATH_INFO $fastcgi_path_info;
        fastcgi_param             HTTP_AUTHORIZATION  $http_authorization;
        fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
        include                   fastcgi_params;
        fastcgi_connect_timeout 300;
        fastcgi_read_timeout 300;
        fastcgi_send_timeout 300;
        client_max_body_size 100m;
     }
     location ~ /.git
     {
         deny all;
     }
}

Test current server is normal

[root@007-web1 ~]# /apps/nginx/sbin/nginx -t
nginx: the configuration file /apps/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /apps/nginx/conf/nginx.conf test is successful
[root@007-web1 ~]# systemctl restart nginx
[root@007-web1 ~]# curl -vLH 'blog.jd.com' 127.0.0.1
* About to connect() to 127.0.0.1 port 80 (#0)
*   Trying 127.0.0.1...
* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 127.0.0.1
> Accept: */*
> 
< HTTP/1.1 302 Found
< Server: nginx/1.17.5
... 显示了中文的配置wordpress即为ok

Synchronization code to web2

[root@007-web1 ~]# scp -p /apps/nginx/conf.d/blog.jd.com.conf 192.168.38.136:/apps/nginx/conf.d/ 
[root@007-web1 ~]# ssh 192.168.38.136 'mkdir -p /var/www/wordpress/'
[root@007-web1 ~]# scp -p -r /var/www/wordpress 192.168.38.136:/var/www/
[root@007-web1 ~]# ssh 192.168.38.136 'chown nginx.nginx -R /var/www/wordpress'
[root@007-web1 ~]# ssh 192.168.38.136 'systemctl restart nginx'
#测试
[root@007-web1 ~]# curl -vLH 'blog.jd.com' 192.168.38.136

Second, from the master database deployment

Binary installation script

[root@007-mysql-master mysql-5.7-onekey-install]# cat mysql-install.sh 
#!/bin/bash
CURRENTDIR=`pwd`
NAME="mysql-5.7.28-linux-glibc2.12-x86_64.tar.gz"
FULL_NAME=${CURRENTDIR}/${NAME}
DATA_DIR="/data/mysql/"

StandardOutput(){
    echo -e "\033[1;32m$1 ... \033[0m"
}
ErrorOutput(){
    echo -e "\033[1;31m$1 ... \033[0m"
}

is_empty_dir(){ 
    return `ls -A $1 2>/dev/null | wc -l`
}

judge_mysql(){
    if [ -e /usr/local/mysql ];then
        ErrorOutput "/usr/local/mysql目录存在...Mysql 似乎已经安装...终止安装..."
        exit 10
    fi
    which mysqld &>/dev/null
    if [ "$?" -eq 0 ];then
        ErrorOutput "mysqld命令存在...Mysql 似乎已经安装...终止安装..."
        exit 20
    fi
    if  ss -ltn | grep 3306 -q;then
        ErrorOutput "3306端口被占用...Mysql 似乎已经安装...终止安装..."
        exit 30
    fi
    is_empty_dir $DATA_DIR
    if [ ! "$?" -eq 0 ];then
        ErrorOutput "$DATA_DIR 不为空...终止安装..."
        exit 40            
    fi
}

download_mysql(){
    if [ -f ${FULL_NAME} ];then
    StandardOutput "Mysql安装文件存在,开始安装..."
else
    StandardOutput "Mysql安装文件不存在...开始下载..."
    curl -o ${NAME} https://mirrors.huaweicloud.com/mysql/Downloads/MySQL-5.7/${NAME}
    if [ ! "$?" -eq 0 ];then
        ErrorOutput "下载失败...终止安装..." && exit 50
    fi
fi
}
mysql_install(){
    # install dependence
    yum install -y libaio-devel
    if id  mysql &>/dev/null;then
        StandardOutput "mysql 用户已经存在,跳过创建用户过程..."
    else
        useradd -r    -s /sbin/nologin mysql
    fi       
    tar xf ${FULL_NAME}   -C /usr/local/
    ln -sv /usr/local/mysql-5*  /usr/local/mysql 
    chown  -R mysql.mysql  /usr/local/mysql/
    if [ ! -d  $DATA_DIR ];then
        mkdir -pv $DATA_DIR && chown  -R mysql.mysql  $DATA_DIR   -R
    fi
    /usr/local/mysql/bin/mysqld  --initialize --datadir=$DATA_DIR --user=mysql &>mysql_install.log
    cp  /usr/local/mysql-5.*-x86_64/support-files/mysql.server /etc/init.d/mysqld
    chmod a+x /etc/init.d/mysqld
    cp ${CURRENTDIR}/my.cnf   /etc/my.cnf
    echo 'export PATH=/usr/local/mysql/bin/:$PATH' > /etc/profile.d/mysql.sh
    source /etc/profile.d/mysql.sh
    # /etc/init.d/mysqld start

    mkdir /var/lib/mysql /var/run/mysqld
    chown mysql.mysql /var/lib/mysql/ /var/run/mysqld
    systemctl daemon-reload
}

mysql_check(){
    systemctl start mysqld
    if ss -ltn | grep 3306 -q ;then
        StandardOutput "VERY GOOD..."
        rm -f ${CURRENTDIR}/${NAME}
    fi
    echo -e "请手动执行:\033[1;32msource /etc/profile\033[0m"
    echo -e "当前mysql数据库密码为:\033[1;32m$( grep "temporary password" ${CURRENTDIR}/mysql_install.log | awk '{print $NF}')\033[0m"    
}


main(){
    judge_mysql
    download_mysql
    mysql_install
    mysql_check
}
main

Database Password

[root@007-mysql-master mysql-5.7-onekey-install]# 
当前mysql数据库密码为:fO:Qd.jsl9GU
[root@007-mysql-master mysql-5.7-onekey-install]# mysqladmin -uroot -p$(grep " temporary password" mysql_install.log | awk '{print $NF}') password 123456
[root@007-slave mysql-5.7-onekey-install]#
当前mysql数据库密码为:le*gu=x4Emoi
[root@007-slave mysql-5.7-onekey-install]# mysqladmin -uroot -p$(grep " temporary password" mysql_install.log | awk '{print $NF}') password 123456

Master database configuration:

[root@007-mysql-master mysql-5.7-onekey-install]# cat /etc/my.cnf
# For advice on how to change settings please see
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html

[mysqld]
datadir=/data/mysql
socket=/var/lib/mysql/mysql.sock
log-bin
server-id=1
expire-logs-days=15
binlog-format=row
gtid_mode=on
enforce_gtid_consistency=on
skip_name_resolve

# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0

log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
[client]
socket      = /var/lib/mysql/mysql.sock
port        = 3306
[root@007-mysql-master ~]# systemctl restart mysqld && systemctl enable mysqld
mysqld.service is not a native service, redirecting to /sbin/chkconfig.
Executing /sbin/chkconfig mysqld on
[root@007-mysql-master ~]# /sbin/chkconfig mysqld on
mysql>  grant replication slave on *.* to 'repluser'@'192.168.38.147' identified by 'WWQD88OYrqSxLg';
mysql> create database wordpress character set utf8 collate utf8_bin;
mysql> grant all privileges on wordpress.* to 'wordpress'@'192.168.38.%' identified by 'j9DtO2Bf1DvRANQr';

From the database configuration

[root@007-slave mysql-5.7-onekey-install]# cat /etc/my.cnf
# For advice on how to change settings please see
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html

[mysqld]
datadir=/data/mysql
socket=/var/lib/mysql/mysql.sock
server-id=2
gtid_mode=on
enforce_gtid_consistency=on
skip_name_resolve

# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0

log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
[client]
socket      = /var/lib/mysql/mysql.sock
port        = 3306
[root@007-slave mysql-5.7-onekey-install]# systemctl restart mysqld && systemctl enable mysqld
[root@007-slave mysql-5.7-onekey-install]# /sbin/chkconfig mysqld on
mysql> CHANGE MASTER TO MASTER_HOST='192.168.38.138',MASTER_USER='repluser',MASTER_PASSWORD='WWQD88OYrqSxLg',MASTER_PORT=3306,MASTER_AUTO_POSITION=1;
mysql> start slave;
mysql> show slave status\G
...两个yes就OK...

Three, mysql server to act as an NFS server

Configure the primary NFS

[root@007-mysql-master ~]# yum install nfs-utils -y
[root@007-mysql-master ~]# mkdir /data/wordpress -p
[root@007-mysql-master ~]# cat /etc/exports
/data/wordpress 192.168.38.*(rw,sync,no_subtree_check,no_root_squash
[root@007-mysql-master ~]# systemctl restart nfs
[root@007-mysql-master ~]# systemctl enable nfs

007-web1 mount test

[root@007-web1 ~]# yum install nfs-utils autofs -y
[root@007-web1 ~]# showmount -e 192.168.38.138
Export list for 192.168.38.138:
/data/wordpress 192.168.38.*
[root@007-web1 ~]# grep wordpress /etc/auto.master
/- /etc/auto.wordpress
[root@007-web1 ~]# cat /etc/auto.wordpress
/var/www/wordpress/wp-content/uploads  192.168.38.138:/data/wordpress
[root@007-web1 ~]# systemctl restart autofs

No problem to sync test web2

[root@007-web1 ~]# ssh 192.168.38.136 ' yum install nfs-utils autofs -y'
[root@007-web1 ~]# scp /etc/auto.{master,wordpress} 192.168.38.136:/etc/
[root@007-web1 ~]# ssh 192.168.38.136 'systemctl restart autofs'
#测试没问题

Configure automatic syncing

[root@007-mysql-master ~]# yum install rsync inotify-tools -y
[root@007-mysql-master ~]# ssh 192.168.38.147 'yum install rsync -y'
[root@007-mysql-master ~]# ssh 192.168.38.147 'mkdir /data/wordpress -p'
#密钥分发
[root@007-mysql-master ~]# ssh-keygen -f ~/.ssh/id_rsa -P ''
[root@007-mysql-master ~]# ssh-copy-id 192.168.38.147
#同步脚本
[root@007-mysql-master ~]# mkdir /scripts/
[root@007-mysql-master ~]# cat /scripts/inotify-data2.sh 
[root@007-mysql-master wordpress]# cat /scripts/inotify-data2.sh 
#!/bin/bash

SRC=/data/wordpress
DEST='[email protected]:/data'
# BACKDIR=/tmp/backup/
/usr/bin/inotifywait -mrq --timefmt '%Y-%m-%d %H:%M' --format '%T %w %f %,e' -e delete,moved_to,moved_from,close_write,isdir,attrib ${SRC} | \
    while read DATE TIME DIR FILE EVENT;do
        FILEPATH=${DIR}${FILE}
        # if echo $EVENT | grep -i delete;then
        #     mkdir $BACKDIR -p && \cp -a $FILEPATH $BACKDIR
        #     rsync -avz --delete  ${SRC}  ${DEST} \
        #         && echo "At ${TIME} on ${DATE}, file $FILEPATH was deleted via rsync" >> /var/log/deletelist.log
        # else
            rsync -avz --delete  ${SRC}  ${DEST} \
            && echo "At ${TIME} on ${DATE}, file $FILEPATH was backuped up via rsync" >> /var/log/changelist.log
        # fi
    done
    
#screen运行
[root@007-mysql-master ~]# yum install screen -y
[root@007-mysql-master ~]# screen -S rsync
[root@007-mysql-master ~]# bash /scripts/inotify-data2.sh 

Fourth, configure haproxy

Compile and install HAProxy (rough script)

[root@007-lb1 ~]# cat haproxy.sh 
#!/bin/bash

yum install -y gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel systemd-devel net-tools vim iotop bc zip unzip zlib-devel lrzsz tree screen lsof tcpdump wget chrony readline-devel

if ! getent passwd haproxy >/dev/null ; then
    if ! getent passwd 188 >/dev/null ; then
        useradd -r -u 188 -g haproxy -d /var/lib/haproxy -s /sbin/nologin -c "haproxy" haproxy
    else
        useradd -r -g haproxy -d /var/lib/haproxy -s /sbin/nologin -c "haproxy" haproxy
    fi
fi

wget https://ftp.osuosl.org/pub/blfs/conglomeration/lua/lua-5.3.5.tar.gz
tar xf lua-5.3.5.tar.gz 
cd lua-5.3.5/
make linux test
src/lua -v

cd
wget https://mirrors.huaweicloud.com/haproxy/2.0/src/haproxy-2.0.8.tar.gz
tar xf haproxy-2.0.8.tar.gz
cd haproxy-2.0.8/
make ARCH=x86_64 TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_CPU_AFFINITY=1 USE_LUA=1 LUA_INC=/root/lua-5.3.5/src/   LUA_LIB=/root/lua-5.3.5/src/ PREFIX=/usr/local/haproxy
make install PREFIX=/usr/local/haproxy
/usr/local/haproxy/sbin/haproxy -v
cat >  /usr/lib/systemd/system/haproxy.service <<'EOF'
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target
[Service]
ExecStartPre=/usr/local/haproxy/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/local/haproxy/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID
[Install]
WantedBy=multi-user.target
EOF

mkdir /etc/haproxy
cat > /etc/haproxy/haproxy.cfg <<'EOF'
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    uid haproxy
    uid haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
listen web_port 
    bind *:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth haadmin:2NweRqCs
EOF

mkdir /var/lib/haproxy/
systemctl daemon-reload
systemctl enable haproxy
systemctl start haproxy

Writing configuration file (do not write it exactly the same stat)

[root@007-lb1 ~]# cat /etc/haproxy/haproxy.cfg 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user haproxy
    group haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
listen web_port 
    bind 192.168.38.131:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth haadmin:2NweRqCs
frontend 007-web
    bind *:80
    mode http
    default_backend bk_web
backend bk_web
    mode http
    balance source
    cookie  SERVERID insert indirect nocache
    option forwardfor    #转发客户IP到后端服务器
    server 007-web1 192.168.38.133:80 cookie 007-web1 check inter 10000 rise 2 fall 5 weight 20
    server 007-web2 192.168.38.136:80 cookie 007-web2 check inter 10000 rise 2 fall 5 weight 20

5, configuration keepalived

VIP with 192.168.38.188

Mail configuration

yum install mailx -y
cat >> /etc/mail.rc <<'EOF'
set [email protected]
set smtp=smtps://smtp.163.com:465  
set [email protected]
set smtp-auth-password=zhj6b266
set smtp-auth=login
set nss-config-dir=/root/.certs
set ssl-verify=ignore
EOF
mkdir -p /root/.certs/
echo -n | openssl s_client -connect smtp.163.com:465 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ~/.certs/163.crt
certutil -A -n "GeoTrust SSL CA" -t "C,," -d ~/.certs -i ~/.certs/163.crt
certutil -A -n "GeoTrust Global CA" -t "C,," -d ~/.certs -i ~/.certs/163.crt
certutil -L -d /root/.certs
cd /root/.certs/
certutil -A -n "GeoTrust SSL CA - G3" -t "Pu,Pu,Pu" -d ./ -i 163.crt

Compile and install keepalived

[root@007-lb1 ~]# cat keepalived.sh 
#!/bin/bash
 yum install -y libnfnetlink-devel libnfnetlink ipvsadm  libnl libnl-devel  libnl3 libnl3-devel   lm_sensors-libs net-snmp-agent-libs net-snmp-libs  openssh-server openssh-clients  openssl openssl-devel automake iproute gcc pcre pcre-devel openssl  openssl-devel

wget https://mirrors.sjtug.sjtu.edu.cn/archlinux/sources/community/keepalived-2.0.19-1.src.tar.gz
tar xf keepalived-2.0.19-1.src.tar.gz
tar xf keepalived/keepalived-2.0.19.tar.gz -C /usr/local/src/
cd /usr/local/src/keepalived-2.0.19/
 ./configure --prefix=/usr/local/keepalived --disable-fwmark
make && make install

cat > /usr/lib/systemd/system/keepalived.service <<'EOF'
[Unit]
Description=LVS and VRRP High Availability Monitor
After=network-online.target syslog.target 
Wants=network-online.target 
[Service]
Type=forking
PIDFile=/run/keepalived.pid
KillMode=process
EnvironmentFile=-/usr/local/keepalived/etc/sysconfig/keepalived
ExecStart=/usr/local/keepalived/sbin/keepalived $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
EOF


cp bin/keepalived /usr/sbin/

mkdir /etc/keepalived

cat > /etc/keepalived/keepalived.conf <<'EOF'
global_defs {
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 80
    priority 100
    advert_int 1
    unicast_src_ip 192.168.38.131
    unicast_peer {
        192.168.38.130
    }

    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.38.172 dev eth0 label eth0:0
    }
}
EOF

 systemctl deamon-reload
 systemctl start keepalived

ps -ef | grep keepalived

Mail script

[root@007-lb1 keepalived]# cat notify.sh 
#!/bin/bash
#
#contact='root@localhost'
contact='[email protected]'

notify() {
    local mailsubject="$(hostname) to be $1, vip floating"
    local mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
    echo "$mailbody" | mail -s "$mailsubject" $contact
}

case $1 in
master)
    notify master
    ;;
backup)
    notify backup
    ;;
fault)
    notify fault
    ;;
*)
    echo "Usage: $(basename $0) {master|backup|fault}"
    exit 1
    ;;
esac

nginx examination script

[root@007-lb1 keepalived]# cat check_nginx.sh 
#!/bin/bash
GREP_OPTIONS=""
Count1=`netstat -antp |grep -v grep |grep haproxy |wc -l`

if [ $Count1 -eq 0 ]; then
    exit 1
fi

exit 0

lb1的keepalived.conf

[root@007-lb1 keepalived]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id node1
   vrrp_mcast_group4 224.123.0.18
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_nginx.sh" 
    interval 2 
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1dadcvsd
    }
    virtual_ipaddress {
        192.168.38.188/16 dev eth0
    }
    track_script {
       chk_nginx 
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
}

lb2的keepalived.conf

[root@007-lb2 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id node1
   vrrp_mcast_group4 224.123.0.18
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_nginx.sh" 
    interval 2 
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1dadcvsd
    }
    virtual_ipaddress {
        192.168.38.188/16 dev eth0
    }
    track_script {
       chk_nginx 
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
}

VIP transfer test

[root@007-lb1 ~]# pkill haproxy
#vip转移
[root@007-lb2 keepalived]# ip addr show dev eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:5c:b9:ce brd ff:ff:ff:ff:ff:ff
    inet 192.168.38.132/24 brd 192.168.38.255 scope global noprefixroute dynamic eth0
       valid_lft 2586860sec preferred_lft 2586860sec
    inet6 fe80::20c:29ff:fe5c:b9ce/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

Six, DNS after it, resolve to vip

You can install wordpress

Seven, the haproxy + keepalived replaced keepalived + LVS

lvs1:192.168.38.134

lvs2:192.168.38.135

VIP:192.168.38.189

Note: If you are deploying on the cloud, such as Ali cloud, do not need to deploy their own lvs, direct buy Ali cloud can slb, slb then distribute web traffic to two machines, but this is not recommended, recommended slb go its own haproxy , a 7-layer anti-Generation

Compile and install keepalived ibid.

Mail service configuration as above

notify script above

check script

#!/bin/bash
GREP_OPTIONS=""
Count1=`ps -ef  |grep keepalived | grep -v grep  |wc -l`

if [ $Count1 -eq 0 ]; then
    exit 1
fi

exit 0

lvs1的keepalived.conf

[root@lvs1 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id node1
   vrrp_mcast_group4 224.123.0.19
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_keepalived.sh" 
    interval 2 
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 52
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1dadcvsd2
    }
    virtual_ipaddress {
        192.168.38.189/16 dev eth0
    }
    track_script {
       chk_nginx 
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
}

virtual_server 192.168.38.189 80 {
    delay_loop 3
    lb_algo sh
    lb_kind DR
    protocol TCP

    real_server 192.168.38.133 80 {
    weight 1
    HTTP_GET {
    url {
        path /
        status_code 200
    }
    connect_timeout 1
    nb_get_retry 3
    delay_before_retry 1
    }
    }
    real_server 192.168.38.136 80 {
    weight 1
    HTTP_GET {
    url {
        path /
        status_code 200
    }
    connect_timeout 1
    nb_get_retry 3
    delay_before_retry 1
    }
    }
}

lvs2的keepalivbed.conf

[root@lvs2 keepalived]# cat keepalived.conf 
[root@lvs1 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id node1
   vrrp_mcast_group4 224.123.0.19
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_keepalived.sh" 
    interval 2 
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 52
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1dadcvsd2
    }
    virtual_ipaddress {
        192.168.38.189/16 dev eth0
    }
    track_script {
       chk_nginx 
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
}

virtual_server 192.168.38.189 80 {
    delay_loop 3
    lb_algo sh
    lb_kind DR
    protocol TCP

    real_server 192.168.38.133 80 {
    weight 1
    HTTP_GET {
    url {
        path /
        status_code 200
    }
    connect_timeout 1
    nb_get_retry 3
    delay_before_retry 1
    }
    }
    real_server 192.168.38.136 80 {
    weight 1
    HTTP_GET {
    url {
        path /
        status_code 200
    }
    connect_timeout 1
    nb_get_retry 3
    delay_before_retry 1
    }
    }
}

VIP transfer test

[root@lvs1 keepalived]# systemctl stop keepalived
[root@lvs2 keepalived]# ip a show dev eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:d3:0f:22 brd ff:ff:ff:ff:ff:ff
    inet 192.168.38.135/24 brd 192.168.38.255 scope global noprefixroute dynamic eth0
       valid_lft 2591783sec preferred_lft 2591783sec
    inet 192.168.38.189/16 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fed3:f22/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

web1 and web2 binding VIP

[root@007-web1 ~]# cat lvs-dr.sh 
#!/bin/sh   
#LVS DR模式初始化脚本
#Zhang Shijie:2017-08-18 
LVS_VIP=192.168.38.189
source /etc/rc.d/init.d/functions  
case "$1" in  
start)  
       /sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP  
       /sbin/route add -host $LVS_VIP dev lo:0  
       echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore  
       echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce  
       echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore  
       echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce  
       echo "RealServer Start OK"  
       ;;  
stop)  
       /sbin/ifconfig lo:0 down  
       /sbin/route del $LVS_VIP >/dev/null 2>&1  
       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore  
       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce  
       echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore  
       echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce  
       echo "RealServer Stoped"  
       ;;  
*)  
       echo "Usage: $0 {start|stop}"  
       exit 1  
esac  
exit 0
[root@007-web1 ~]# bash lvs-dr.sh start
RealServer Start OK
#web2同理

Eight, investigation, lvs back-end servers are not displayed

[root@lvs1 keepalived]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.38.189:80 sh

Found 302 returns manually curl

[root@lvs1 keepalived]# curl 192.168.38.133 -I
HTTP/1.1 302 Found
Server: nginx/1.17.5
Date: Mon, 11 Nov 2019 03:39:12 GMT
Content-Type: text/html; charset=UTF-8
Connection: keep-alive
X-Powered-By: PHP/7.3.10
Location: http://192.168.38.133/wp-admin/setup-config.php

Solve: web server to create a test page test.html health check

[root@007-web1 wordpress]# echo test > test.html
[root@007-web1 wordpress]# scp test.html 192.168.38.136:/va/www/wordpress/

Two lvs modify health check

[root@lvs2 keepalived]# vim /etc/keepalived/keepalived.conf
.......
    real_server 192.168.38.133 80 {
        weight 1
        HTTP_GET {
        url {
                path /test.html
                status_code 200
        }
        connect_timeout 1
        nb_get_retry 3
        delay_before_retry 1
        }
    }
    real_server 192.168.38.136 80 {
        weight 1
        HTTP_GET {
        url {
                path /test.html
                status_code 200
        }
        connect_timeout 1
        nb_get_retry 3
        delay_before_retry 1
        }
    }
......

随后停止并启动keepalived,就好了

[root@lvs2 keepalived]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.38.189:80 sh
  -> 192.168.38.133:80            Route   1      0          0         
  -> 192.168.38.136:80            Route   1      0          0     

另外wordpress的图片目录为uploads不是upload

九、优化

各个服务优化
NFS设置好权限控制,不能777
wordpress测试好友一些东西可以放到NFS,或者同步web1的网站数据到web2,主要是一些主题什么的

NFS权限限制

权限改成755还是成功了

[root@007-mysql-master ~]# chmod 755 -R /data/wordpress/
[root@007-mysql-master ~]# ll /data/wordpress/2019/11/
total 292
-rwxr-xr-x 1 mysql mysql  98728 Nov 11 20:18 89a9f78ed9a0e3a9d0ad9a4836a79c7f.png
-rw-r--r-- 1 mysql mysql   5101 Nov 11 20:20 9ad3c6f0d10514c88930974484e7a143.png
-rwxr-xr-x 1 mysql mysql 186355 Nov 11 20:17 da7999e9b9174995d894cab24e20565a.png

问什么呢?为啥能上传成功,且用户为mysql

原因:web客户端的nginx用户ID为995

[root@007-web2 ~]# id nginx
uid=997(nginx) gid=995(nginx) groups=995(nginx)

NFS这边mysql用户的id也是995

[root@007-mysql-master ~]# id mysql
uid=997(mysql) gid=995(mysql) groups=995(mysql)

/data/wordpress/2019是已经生成的,属主为mysql(id995)

[root@007-mysql-master ~]# ls -l /data/wordpress/
total 0
drwxr-xr-x 3 mysql mysql 16 Nov 11 20:17 2019

但是wordpress无法在/data/wordpress目录下生成新目录,可以设置acl权限给id为995用户(最好手动创建一个id为995的用户)

[root@007-mysql-master ~]# setfacl -m u:mysql:rwx /data/wordpress/
[root@007-mysql-master ~]# getfacl /data/wordpress/
getfacl: Removing leading '/' from absolute path names
# file: data/wordpress/
# owner: root
# group: root
user::rwx
user:mysql:rwx
group::r-x
mask::rwx
other::r-x

或者NFS设置权限,类似于下面这样:

web1和web2的

Guess you like

Origin www.cnblogs.com/uscWIFI/p/11840023.html