linux 自动化运维工具-----saltstack

**

saltstack

**

Saltstack是一个服务器集中管理中心平台,可以帮助管理员轻松的对若干台服务器进行统一操作。类似的工具还有Ansible,Puppet,func等等。相比于这些工具,salt的特点在于其用python实现,支持各种操作系统类型,采取了C/S架构,部署方便简单, 且可扩展性很强。

**

环境准备

**
centos6.5
两台虚拟机

准备安装包
[root@foundation15 rhel6]# ls
libyaml-0.1.3-4.el6.x86_64.rpm
python-babel-0.9.4-5.1.el6.noarch.rpm
python-backports-1.0-5.el6.x86_64.rpm
python-backports-ssl_match_hostname-3.4.0.2-2.el6.noarch.rpm
python-chardet-2.2.1-1.el6.noarch.rpm
python-cherrypy-3.2.2-4.el6.noarch.rpm
python-crypto-2.6.1-3.el6.x86_64.rpm
python-crypto-debuginfo-2.6.1-3.el6.x86_64.rpm
python-enum34-1.0-4.el6.noarch.rpm
python-futures-3.0.3-1.el6.noarch.rpm
python-impacket-0.9.14-1.el6.noarch.rpm
python-jinja2-2.8.1-1.el6.noarch.rpm
python-msgpack-0.4.6-1.el6.x86_64.rpm
python-ordereddict-1.1-2.el6.noarch.rpm
python-requests-2.6.0-3.el6.noarch.rpm
python-setproctitle-1.1.7-2.el6.x86_64.rpm
python-six-1.9.0-2.el6.noarch.rpm
python-tornado-4.2.1-1.el6.x86_64.rpm
python-urllib3-1.10.2-1.el6.noarch.rpm
python-zmq-14.5.0-2.el6.x86_64.rpm
PyYAML-3.11-1.el6.x86_64.rpm
repodata
salt-2016.11.3-1.el6.noarch.rpm
salt-api-2016.11.3-1.el6.noarch.rpm
salt-cloud-2016.11.3-1.el6.noarch.rpm
salt-master-2016.11.3-1.el6.noarch.rpm
salt-minion-2016.11.3-1.el6.noarch.rpm
salt-ssh-2016.11.3-1.el6.noarch.rpm
salt-syndic-2016.11.3-1.el6.noarch.rpm
zeromq-4.0.5-4.el6.x86_64.rpm

**

使用

**

主机安装
yum install salt-master
server端
yum install salt-minion

**

一键安装httpd

**

server端修改配置文件minion
打开master  指定主机地址

编写文件来执行任务
vim  master
# file_roots:
**********************
*********************
file_roots:
  base:
    - /srv/salt

mkdir  /srv/salt
 cd  /srv/salt/
 mkdir  httpd
 cd httpd/



vim install.sls

apache-install:
  pkg.installed:
    - pkgs:
      - httpd
      - php

  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://httpd/files/httpd.conf
    - mode: 644
    - user: root


  service.running:
    - name: httpd
    - enable: True
    - reload: True
    - watch:
      - file: apache-install

mkdir  files

server端
 scp  /etc/httpd/conf/httpd.conf root@server1:/srv/salt/httpd/files/

启动任务
salt server2 state.sls httpd.install

[root@server1 httpd]# salt server2 state.sls httpd.install
server2:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 10:44:21.696917
    Duration: 349.741 ms
     Changes:   
----------
          ID: apache-install
    Function: file.managed
        Name: /etc/httpd/conf/httpd.conf
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf is in the correct state
     Started: 10:44:22.048334
    Duration: 40.833 ms
     Changes:   
----------
          ID: apache-install
    Function: service.running
        Name: httpd
      Result: True
     Comment: Service httpd has been enabled, and is running
     Started: 10:44:22.089892
    Duration: 147.796 ms
     Changes:   
              ----------
              httpd:
                  True

Summary for server2
------------
Succeeded: 3 (changed=1)
Failed:    0
------------
Total states run:     3
Total run time: 538.370 ms


server端查看

[root@server2 ~]# netstat -antple|grep httpd
tcp        0      0 :::80                       :::*                        LISTEN      0          16092      1705/httpd          

[root@server2 ~]# ps ax |grep  httpd
 1705 ?        Ss     0:00 /usr/sbin/httpd
 1713 ?        S      0:00 /usr/sbin/httpd
 1714 ?        S      0:00 /usr/sbin/httpd
 1715 ?        S      0:00 /usr/sbin/httpd
 1716 ?        S      0:00 /usr/sbin/httpd
 1717 ?        S      0:00 /usr/sbin/httpd
 1718 ?        S      0:00 /usr/sbin/httpd
 1719 ?        S      0:00 /usr/sbin/httpd
 1720 ?        S      0:00 /usr/sbin/httpd
 1740 pts/0    S+     0:00 grep httpd



修改配置文件中指定的files下httpd文件来控制端口
主机与server端md5码产生改变时  发送主机文件至客户端  并执行reload 而不是restart

修改前
[root@server2 conf]# md5sum   httpd.conf 
27a5c8d9e75351b08b8ca1171e8a0bbd  httpd.conf
一致
[root@server1 files]# md5sum httpd.conf 
27a5c8d9e75351b08b8ca1171e8a0bbd  httpd.conf
修改后
[root@server1 files]# md5sum httpd.conf 
b7ca7a0e786418ba7b5ad84efac70265  httpd.conf

执行任务
salt server2 state.sls httpd.install

diff:
                  ---  
                  +++  
                  @@ -133,7 +133,7 @@
                   # prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
                   #
                   #Listen 12.34.56.78:80
                  -Listen 80###############
                  +Listen 8080#############发生改变

                   #
                   # Dynamic Shared Object (DSO) Support

server端查看

[root@server2 conf]# netstat -antple|grep httpd
tcp        0      0 :::8080                     :::*                        LISTEN      0          16606      1705/httpd        
修改成功

**

源码一键安装nginx

**

[root@server1 salt]# cd  nginx/
[root@server1 nginx]# cat install.sls 
nginx-install:
  pkg.installed:
    - pkgs:
      - gcc-c++
      - zlib-devel
      - openssl-devel
      - pcre-devel

  file.managed:
    - name: /mnt/nginx-1.10.1.tar.gz
    - source: salt://nginx/files/nginx-1.10.1.tar.gz

  cmd.run:
    - name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module  &> /dev/null  && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
    - creates: /usr/local/nginx
安装部分


用户部分

[root@server1 salt]# cd  user/
[root@server1 user]# ls
nginx.sls
[root@server1 user]# cat nginx.sls 
nginx:
  user.present:
    - uid: 800
    - shell: /sbin/nologin
    - home: /usr/local/nginx
    - createhome: false

开启服务(结合数个任务一起启动)
[root@server1 nginx]# cat service.sls 
include:
  - nginx.install
  - user.nginx

/usr/local/nginx/conf/nginx.conf:
  file.managed:
    - source: salt://nginx/files/nginx.conf
    - mode: 644

/etc/init.d/nginx:(启动脚本)
  file.managed:
    - source: salt://nginx/files/nginx(脚本本机存放位置)
    - mode: 755

nginx-service:
  service.running:
    - name: nginx
    - enable: true
    - reload: true
    - require:
      - file: /etc/init.d/nginx
    - watch:
      - file: /usr/local/nginx/conf/nginx.conf

脚本代码
#!/bin/sh
#
# nginx        Startup script for nginx
#
# chkconfig: - 85 15
# processname: nginx
# config: /usr/local/nginx/conf/nginx/nginx.conf
# pidfile: /usr/local/nginx/logs/nginx.pid
# description: nginx is an HTTP and reverse proxy server
#
### BEGIN INIT INFO
# Provides: nginx
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop nginx
### END INIT INFO

# Source function library.
. /etc/rc.d/init.d/functions

if [ -L $0 ]; then
    initscript=`/bin/readlink -f $0`
else
    initscript=$0
fi

#sysconfig=`/bin/basename $initscript`

#if [ -f /etc/sysconfig/$sysconfig ]; then
#    . /etc/sysconfig/$sysconfig
#fi

nginx=${NGINX-/usr/local/nginx/sbin/nginx}
prog=`/bin/basename $nginx`
conffile=${CONFFILE-/usr/local/nginx/conf/nginx.conf}
lockfile=${LOCKFILE-/var/lock/subsys/nginx}
pidfile=${PIDFILE-/usr/local/nginx/logs/nginx.pid}
SLEEPMSEC=${SLEEPMSEC-200000}
UPGRADEWAITLOOPS=${UPGRADEWAITLOOPS-5}
RETVAL=0

start() {
    echo -n $"Starting $prog: "

    daemon --pidfile=${pidfile} ${nginx} -c ${conffile}
    RETVAL=$?
    echo
    [ $RETVAL = 0 ] && touch ${lockfile}
    return $RETVAL
}

stop() {
    echo -n $"Stopping $prog: "
    killproc -p ${pidfile} ${prog}
    RETVAL=$?
    echo
    [ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile}
}

reload() {
    echo -n $"Reloading $prog: "
    killproc -p ${pidfile} ${prog} -HUP
    RETVAL=$?
    echo
}

upgrade() {
    oldbinpidfile=${pidfile}.oldbin

    configtest -q || return
    echo -n $"Starting new master $prog: "
    killproc -p ${pidfile} ${prog} -USR2
    echo

    for i in `/usr/bin/seq $UPGRADEWAITLOOPS`; do
        /bin/usleep $SLEEPMSEC
        if [ -f ${oldbinpidfile} -a -f ${pidfile} ]; then
            echo -n $"Graceful shutdown of old $prog: "
            killproc -p ${oldbinpidfile} ${prog} -QUIT
            RETVAL=$?
            echo
            return
        fi
    done

    echo $"Upgrade failed!"
    RETVAL=1
}

configtest() {
    if [ "$#" -ne 0 ] ; then
        case "$1" in
            -q)
                FLAG=$1
                ;;
            *)
                ;;
        esac
        shift
    fi
    ${nginx} -t -c ${conffile} $FLAG
    RETVAL=$?
    return $RETVAL
}

rh_status() {
    status -p ${pidfile} ${nginx}
}

# See how we were called.
case "$1" in
    start)
        rh_status >/dev/null 2>&1 && exit 0
        start
        ;;
    stop)
        stop
        ;;
    status)
        rh_status
        RETVAL=$?
        ;;
    restart)
        configtest -q || exit $RETVAL
        stop
        start
        ;;
    upgrade)
        rh_status >/dev/null 2>&1 || exit 0
        upgrade
        ;;
    condrestart|try-restart)
        if rh_status >/dev/null 2>&1; then
            stop
            start
        fi
        ;;
    force-reload|reload)
        reload
        ;;
    configtest)
        configtest
        ;;
    *)
        echo $"Usage: $prog {start|stop|restart|condrestart|try-restart|force-reload|upgrade|reload|status|help|configtest}"
        RETVAL=2
esac

exit $RETVAL



启动测试
[root@server1 nginx]# salt  server2 state.sls  nginx.service
server2:
----------
          ID: nginx-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 12:10:03.592059
    Duration: 400.953 ms
     Changes:   
----------
          ID: nginx-install
    Function: file.managed
        Name: /mnt/nginx-1.10.1.tar.gz
      Result: True
     Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
     Started: 12:10:03.994657
    Duration: 66.481 ms
     Changes:   
----------
          ID: nginx-install
    Function: cmd.run
        Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module  &> /dev/null  && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
      Result: True
     Comment: /usr/local/nginx exists
     Started: 12:10:04.061875
    Duration: 0.361 ms
     Changes:   
----------
          ID: nginx
    Function: user.present
      Result: True
     Comment: New user nginx created
     Started: 12:10:04.062773
    Duration: 130.165 ms
     Changes:   
              ----------
              fullname:
              gid:
                  800
              groups:
                  - nginx
              home:
              homephone:
              name:
                  nginx
              passwd:
                  x
              roomnumber:
              shell:
                  /sbin/nologin
              uid:
                  800
              workphone:
----------
          ID: /usr/local/nginx/conf/nginx.conf
    Function: file.managed
      Result: True
     Comment: File /usr/local/nginx/conf/nginx.conf is in the correct state
     Started: 12:10:04.193104
    Duration: 52.364 ms
     Changes:   
----------
          ID: /etc/init.d/nginx
    Function: file.managed
      Result: True
     Comment: File /etc/init.d/nginx updated
     Started: 12:10:04.245611
    Duration: 51.516 ms
     Changes:   
              ----------
              diff:
                  New file
              mode:
                  0755
----------
          ID: nginx-service
    Function: service.running
        Name: nginx
      Result: True
     Comment: Service nginx has been enabled, and is running
     Started: 12:10:04.298296
    Duration: 122.8 ms
     Changes:   
              ----------
              nginx:
                  True

Summary for server2
------------
Succeeded: 7 (changed=3)
Failed:    0
------------
Total states run:     7
Total run time: 824.640 ms


启动成功


测试修改配置文件后的结果
修改最大两个进程
#user  nobody;
worker_processes  2;

再启动一次测试结果
[root@server2 nginx]# ps ax|grep nginx
 4635 ?        Ss     0:00 nginx: master process /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
 4702 ?        S      0:00 nginx: worker process                                          
 4703 ?        S      0:00 nginx: worker process                                          
 4707 pts/0    S+     0:00 grep nginx

测试成功

**

一键部署 httpd nginx 基于haproxy的负载均衡配置

**

部署harpoxy

haproxy-install:
  pkg.installed:
    - pkgs:
      - haproxy

  file.managed:
    - name: /etc/haproxy/haproxy.cfg
    - source: salt://haproxy/files/haproxy.cfg

  service.running:
    - name: haproxy
    - reload: True
    - watch:
      - file: haproxy-install
注意配置yum源

准备配置文件
[root@server1 haproxy]# cd  files/
[root@server1 files]# ls
haproxy.cfg

vim  haproxy.cfg

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  main *:80
    default_backend             app

#---------------------------------------------------------------------
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    server  app1 172.25.15.8:80 check
    server  app2 172.25.15.7:80 check

通过salt目录下的top.sls将三个任务结合

[root@server1 salt]# ls
haproxy  httpd  nginx  top.sls  user
[root@server1 salt]# vim top.sls 

base:
  'server1':
    - haproxy.install
  'server2':
    - httpd.install
  'server3':
    - nginx.install

运行任务

[root@server1 salt]# salt  '*'  state.highstate
server3:
----------
          ID: nginx-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 14:47:03.692262
    Duration: 441.91 ms
     Changes:   
----------
          ID: nginx-install
    Function: file.managed
        Name: /mnt/nginx-1.10.1.tar.gz
      Result: True
     Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
     Started: 14:47:04.137157
    Duration: 107.952 ms
     Changes:   
----------
          ID: nginx-install
    Function: cmd.run
        Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module  &> /dev/null  && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
      Result: True
     Comment: /usr/local/nginx exists
     Started: 14:47:04.246020
    Duration: 0.46 ms
     Changes:   

Summary for server3
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 550.322 ms
server2:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 14:47:03.246948
    Duration: 657.376 ms
     Changes:   
----------
          ID: apache-install
    Function: file.managed
        Name: /etc/httpd/conf/httpd.conf
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf updated
     Started: 14:47:03.907012
    Duration: 74.654 ms
     Changes:   
              ----------
              diff:
                  ---  
                  +++  
                  @@ -133,7 +133,7 @@
                   # prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
                   #
                   #Listen 12.34.56.78:80
                  -Listen 8080
                  +Listen 80

                   #
                   # Dynamic Shared Object (DSO) Support
----------
          ID: apache-install
    Function: service.running
        Name: httpd
      Result: True
     Comment: Service reloaded
     Started: 14:47:04.015879
    Duration: 80.605 ms
     Changes:   
              ----------
              httpd:
                  True

Summary for server2
------------
Succeeded: 3 (changed=2)
Failed:    0
------------
Total states run:     3
Total run time: 812.635 ms
server1:
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 14:47:04.219933
    Duration: 502.258 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 14:47:04.725139
    Duration: 47.684 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 14:47:04.773771
    Duration: 35.477 ms
     Changes:   

Summary for server1
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 585.419 ms

再浏览器查看结果

这里写图片描述

这里写图片描述

**

修改grain roles方式来匹配主机

**

 vim  /etc/salt/minion

# Custom static grains for this minion can be specified here and used in SLS
# files just like all other grains. This example sets 4 custom grains, with
# the 'roles' grain having two values that can be matched against.
grains:
  roles:
    - apache
#    - memcache
#  deployment: datacenter4
#  cabinet: 13
#  cab_u: 14-15
#


 /etc/init.d/salt-minion restart

server3同样修改为httpd即可

server1端

[root@server1 salt]# salt '*' grains.item roles
server2:
    ----------
    roles:
        - apache
server3:
    ----------
    roles:
        - httpd
server1:
    ----------
    roles:

查看到结果结果 通过结果来修改top.sls

base:
  'server1':
    - haproxy.install
  'roles:apache':
    - match: grain
    - httpd.install
  'roles:httpd':
    - match: grain
    - nginx.install

启动服务

[root@server1 salt]# salt  '*'  state.highstate
server1:
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 15:41:59.844101
    Duration: 398.342 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 15:42:00.245248
    Duration: 48.378 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 15:42:00.294540
    Duration: 39.048 ms
     Changes:   

Summary for server1
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 485.768 ms
server3:
----------
          ID: nginx-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 15:42:00.467240
    Duration: 438.167 ms
     Changes:   
----------
          ID: nginx-install
    Function: file.managed
        Name: /mnt/nginx-1.10.1.tar.gz
      Result: True
     Comment: File /mnt/nginx-1.10.1.tar.gz is in the correct state
     Started: 15:42:00.907928
    Duration: 83.286 ms
     Changes:   
----------
          ID: nginx-install
    Function: cmd.run
        Name: cd /mnt && tar zxf nginx-1.10.1.tar.gz && cd nginx-1.10.1 && ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module  &> /dev/null  && make && make install && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
      Result: True
     Comment: /usr/local/nginx exists
     Started: 15:42:00.992076
    Duration: 0.492 ms
     Changes:   

Summary for server3
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 521.945 ms
server2:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 15:41:59.941709
    Duration: 447.302 ms
     Changes:   
----------
          ID: apache-install
    Function: file.managed
        Name: /etc/httpd/conf/httpd.conf
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf is in the correct state
     Started: 15:42:00.391541
    Duration: 50.933 ms
     Changes:   
----------
          ID: apache-install
    Function: service.running
        Name: httpd
      Result: True
     Comment: The service httpd is already running
     Started: 15:42:00.443469
    Duration: 30.044 ms
     Changes:   

Summary for server2
------------
Succeeded: 3
Failed:    0
------------
Total states run:     3
Total run time: 528.279 ms

**

推送方式匹配

**

mkdir  _grains
 cd  _grains/
vim  my_grains.py

#!/usr/bin/env python
def my_grains():
      grains = {}
      grains['hello'] = 'world'
      grains['salt'] = 'stack'
      return  grains

运行结果
[root@server1 _grains]# salt  server2  saltutil.sync_grains
server2:
    - grains.my_grains


查看
[root@server1 _grains]# salt '*'  grains.item   hello
server2:
    ----------
    hello:
        world
server3:
    ----------
    hello:
server1:
    ----------
    hello:
[root@server1 _grains]# salt '*'  grains.item   salt
server1:
    ----------
    salt:
server3:
    ----------
    salt:
server2:
    ----------
    salt:
        stack



**

通过pillar方式匹配

**


打开pillar

# Salt Pillars allow for the building of global data that can be made selectively
# available to different minions based on minion grain filtering. The Salt
# Pillar is laid out in the same fashion as the file server, with environments,
# a top file and sls files. However, pillar data does not need to be in the
# highstate format, and is generally just key/value pairs.
pillar_roots:
  base:
    - /srv/pillar
#


mkdir  /srv/pillar
  cd  /srv/pillar/
  mkdir web
  cd  web
  vim  install.sls

{% if grains['fqdn'] == 'server2'%}
webserver: httpd
{% elif grains['fqdn'] == 'server3'%}
webserver: nginx
{% endif %}


刷新pillar
cd  /srv/salt/pillar/

vim  top.sls

base:
  '*':
    - web.install

测试pillar刷新

[root@server1 pillar]# salt '*'  saltutil.refresh_pillar
server3:
    True
server2:
    True
server1:
    True


运行pillar任务

[root@server1 pillar]# salt '*'  pillar.items
server2:
    ----------
    webserver:
        httpd
server3:
    ----------
    webserver:
        nginx
server1:
    ----------   (没有定义为空)

**

通过模块修改配置文件/lib方式

**

修改httpd配置文件使其接受参数  ####listen

# Listen: Allows you to bind Apache to specific IP addresses and/or
# ports, in addition to the default. See also the <VirtualHost>
# directive.
#
# Change this to Listen on specific IP addresses as shown below to 
# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
#
#Listen 12.34.56.78:80
Listen {{bind}}:{{port}}  ###########修改点


通过lib.sls修改
再httpd目录下创建  lib.sls
{% set port = 8080 %}

httpd配置文件第一行添加
{% from 'httpd/lib.sls'  import port with  context %}


启动服务

[root@server1 httpd]# salt server2 state.sls httpd.install

server2:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 17:13:20.598640
    Duration: 409.492 ms
     Changes:   
----------
          ID: apache-install
    Function: file.managed
        Name: /etc/httpd/conf/httpd.conf
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf updated
     Started: 17:13:21.010265
    Duration: 116.424 ms
     Changes:   
              ----------
              diff:
                  ---  
                  +++  
                  @@ -134,7 +134,7 @@
                   # prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
                   #
                   #Listen 12.34.56.78:80
                  -Listen 
                  +Listen 172.25.15.7:8080 

                   #
                   # Dynamic Shared Object (DSO) Support
----------
          ID: apache-install
    Function: service.running
        Name: httpd
      Result: True
     Comment: Service reloaded
     Started: 17:13:21.157426
    Duration: 75.082 ms
     Changes:   
              ----------
              httpd:
                  True

Summary for server2
------------
Succeeded: 3 (changed=2)
Failed:    0
------------
Total states run:     3
Total run time: 600.998 ms



第二种方式   vim install.sls 


添加jinja模块 指定ip端口即可

启动服务
看到修改后的8080又变为80
apache-install:
  pkg.installed:
    - pkgs:
      - httpd
      - php

  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://httpd/files/httpd.conf
    - mode: 644
    - user: root
    - template: jinja
    - context:
        bind: 172.25.15.7
        port: 8080

  service.running:
    - name: httpd
    - enable: True
    - reload: True
    - watch:
      - file: apache-install

启动服务

[root@server1 httpd]# salt server2 state.sls httpd.install
server2:
----------
          ID: apache-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 17:17:56.983768
    Duration: 417.926 ms
     Changes:   
----------
          ID: apache-install
    Function: file.managed
        Name: /etc/httpd/conf/httpd.conf
      Result: True
     Comment: File /etc/httpd/conf/httpd.conf updated
     Started: 17:17:57.403983
    Duration: 87.204 ms
     Changes:   
              ----------
              diff:
                  ---  
                  +++  
                  @@ -1,4 +1,3 @@
                  -
                   #
                   # This is the main Apache server configuration file.  It contains the
                   # configuration directives that give the server its instructions.
                  @@ -134,7 +133,7 @@
                   # prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
                   #
                   #Listen 12.34.56.78:80
                  -Listen 172.25.15.7:8080 
                  +Listen 172.25.15.7:80 

                   #
                   # Dynamic Shared Object (DSO) Support
----------
          ID: apache-install
    Function: service.running
        Name: httpd
      Result: True
     Comment: Service reloaded
     Started: 17:17:57.522911
    Duration: 74.63 ms
     Changes:   
              ----------
              httpd:
                  True

Summary for server2
------------
Succeeded: 3 (changed=2)
Failed:    0
------------
Total states run:     3
Total run time: 579.760 ms

一键部署 httpd nginx hproxy keepalived的高可用配置

编写keepalived源码安装

将已有的安装包  配置文件  启动脚本放在 files目录下即可
[root@server1 salt]# cd  keepalived/files/
[root@server1 files]# ls
keepalived  keepalived-2.0.6.tar.gz  keepalived.conf


开始编辑安装部分
[root@server1 keepalived]# vim install.sls 
#################################################################
 pkg.installed:  依赖性的这一部分可以独立作为一个模块  再使用时include即可 
    - pkgs:
      - gcc-c++
      - zlib-devel
      - openssl-devel
      - pcre-devel

kp-install:
  file.managed:
    - name: /mnt/keepalived-2.0.6.tar.gz
    - source: salt://keepalived/files/keepalived-2.0.6.tar.gz
  cmd.run:
    - name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd  keepalived-2.0.6  &&  ./configure --prefix=/usr/local/keepalived  --with-init=SYSV &> /dev/null  && make && make install &> /dev/null
    - creates:  /usr/local/keepalived

/etc/keepalived:
  file.directory:
    - mode: 755
/etc/sysconfig/keepalived:
  file.symlink:
    - target: /usr/local/keepalived/etc/sysconfig/keepalived

/sbin/keepalived:
  file.symlink:
    - target: /usr/local/keepalived/sbin/keepalived


编写pillar中关于keepalived部分
在pillar中mkdir keepalived即可
####################################################
{% if grains['fqdn'] == 'server1'%}
state: MASTER
vrid: 66
priority: 100
{% elif grains['fqdn'] == 'server4'%}
state: BACKUP
vrid: 66
priority: 50
{% endif %}

这一部分主要用于对keepalived推送时配置文件中参数修改

编写keepalived的启动及配置文件部分
################################################################
include:
  - keepalived.install

/etc/keepalived/keepalived.conf:
  file.managed:
    - source: salt://keepalived/files/keepalived.conf
    - template: jinja  ####注意模块的使用
    - context:
        STATE: {{ pillar['state'] }}  大小写尽量区分
        VRID:  {{ pillar['vrid'] }}
        PRIORITY: {{ pillar['priority'] }}

kp-service:
  file.managed:
    - name: /etc/init.d/keepalived
    - source: salt://keepalived/files/keepalived
    - mode: 755
  service.running:
    - name: keepalived
    - reload: True
    - watch:
      - file: /etc/keepalived/keepalived.conf

便加top.sls文件 做到一键部署
########################################################
base:
  'server1':
    - keepalived.service
    - haproxy.install
  'server4':
    - keepalived.service
    - haproxy.install

  'roles:apache':
    - match: grain
    - httpd.install
  'roles:httpd':
    - match: grain
    - nginx.install

修改files下的配置文件 使其可以接受参数来改变
[root@server1 keepalived]# cd  files/
[root@server1 files]# vim  keepalived.conf 
#############################################################
! Configuration File for keepalived

global_defs {
   notification_email {
             root@localhost
}
   notification_email_from  keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
#   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state {{ STATE }}
    interface eth0
    virtual_router_id {{  VRID }}
    priority {{  PRIORITY }}
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
     172.25.15.100
}
}



测试 
#####################################################
#######################################################
[root@server1 salt]# salt '*'  state.highstate
server2 3结果没有贴出  上一个模块已经测试出结果 这里不再赘述

server4
----------
          ID: kp-install
    Function: file.managed
        Name: /mnt/keepalived-2.0.6.tar.gz
      Result: True
     Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
     Started: 11:10:27.235394
    Duration: 304.01 ms
     Changes:   
----------
          ID: kp-install
    Function: cmd.run
        Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd  keepalived-2.0.6  &&  ./configure --prefix=/usr/local/keepalived  --with-init=SYSV &> /dev/null  && make && make install &> /dev/null
      Result: True
     Comment: /usr/local/keepalived exists
     Started: 11:10:27.540540
    Duration: 3.811 ms
     Changes:   
----------
          ID: /etc/keepalived
    Function: file.directory
      Result: True
     Comment: Directory /etc/keepalived is in the correct state
     Started: 11:10:27.544569
    Duration: 1.025 ms
     Changes:   
----------
          ID: /etc/sysconfig/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
     Started: 11:10:27.545784
    Duration: 16.949 ms
     Changes:   
----------
          ID: /sbin/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /sbin/keepalived is present and owned by root:root
     Started: 11:10:27.562941
    Duration: 5.863 ms
     Changes:   
----------
          ID: /etc/keepalived/keepalived.conf
    Function: file.managed
      Result: True
     Comment: File /etc/keepalived/keepalived.conf updated
     Started: 11:10:27.569010
    Duration: 121.545 ms
     Changes:   
              ----------
              diff:
                  ---  
                  +++  
                  @@ -9,7 +9,7 @@
                      smtp_connect_timeout 30
                      router_id LVS_DEVEL
                      vrrp_skip_check_adv_addr
                  -   vrrp_strict
                  +#   vrrp_strict
                      vrrp_garp_interval 0
                      vrrp_gna_interval 0
                   }
----------
          ID: kp-service
    Function: file.managed
        Name: /etc/init.d/keepalived
      Result: True
     Comment: File /etc/init.d/keepalived is in the correct state
     Started: 11:10:27.690769
    Duration: 69.875 ms
     Changes:   
----------
          ID: kp-service
    Function: service.running
        Name: keepalived
      Result: True
     Comment: Service reloaded
     Started: 11:10:27.844983
    Duration: 82.323 ms
     Changes:   
              ----------
              keepalived:
                  True
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:10:28.781608
    Duration: 587.1 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 11:10:29.368942
    Duration: 35.71 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 11:10:29.405251
    Duration: 39.054 ms
     Changes:   

Summary for server4
-------------
Succeeded: 11 (changed=2)
Failed:     0
-------------
Total states run:     11
Total run time:    1.267 s
server1:
----------
          ID: kp-install
    Function: file.managed
        Name: /mnt/keepalived-2.0.6.tar.gz
      Result: True
     Comment: File /mnt/keepalived-2.0.6.tar.gz is in the correct state
     Started: 11:10:27.294920
    Duration: 311.378 ms
     Changes:   
----------
          ID: kp-install
    Function: cmd.run
        Name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd  keepalived-2.0.6  &&  ./configure --prefix=/usr/local/keepalived  --with-init=SYSV &> /dev/null  && make && make install &> /dev/null
      Result: True
     Comment: /usr/local/keepalived exists
     Started: 11:10:27.609328
    Duration: 0.769 ms
     Changes:   
----------
          ID: /etc/keepalived
    Function: file.directory
      Result: True
     Comment: Directory /etc/keepalived is in the correct state
     Started: 11:10:27.610336
    Duration: 1.081 ms
     Changes:   
----------
          ID: /etc/sysconfig/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /etc/sysconfig/keepalived is present and owned by root:root
     Started: 11:10:27.611605
    Duration: 14.098 ms
     Changes:   
----------
          ID: /sbin/keepalived
    Function: file.symlink
      Result: True
     Comment: Symlink /sbin/keepalived is present and owned by root:root
     Started: 11:10:27.625926
    Duration: 4.974 ms
     Changes:   
----------
          ID: /etc/keepalived/keepalived.conf
    Function: file.managed
      Result: True
     Comment: File /etc/keepalived/keepalived.conf updated
     Started: 11:10:27.631108
    Duration: 196.926 ms
     Changes:   
              ----------
              diff:
                  ---  
                  +++  
                  @@ -9,7 +9,7 @@
                      smtp_connect_timeout 30
                      router_id LVS_DEVEL
                      vrrp_skip_check_adv_addr
                  -   vrrp_strict
                  +#   vrrp_strict
                      vrrp_garp_interval 0
                      vrrp_gna_interval 0
                   }
----------
          ID: kp-service
    Function: file.managed
        Name: /etc/init.d/keepalived
      Result: True
     Comment: File /etc/init.d/keepalived is in the correct state
     Started: 11:10:27.828277
    Duration: 67.606 ms
     Changes:   
----------
          ID: kp-service
    Function: service.running
        Name: keepalived
      Result: True
     Comment: Service reloaded
     Started: 11:10:27.959411
    Duration: 59.854 ms
     Changes:   
              ----------
              keepalived:
                  True
----------
          ID: haproxy-install
    Function: pkg.installed
      Result: True
     Comment: All specified packages are already installed
     Started: 11:10:28.821571
    Duration: 487.254 ms
     Changes:   
----------
          ID: haproxy-install
    Function: file.managed
        Name: /etc/haproxy/haproxy.cfg
      Result: True
     Comment: File /etc/haproxy/haproxy.cfg is in the correct state
     Started: 11:10:29.309049
    Duration: 46.521 ms
     Changes:   
----------
          ID: haproxy-install
    Function: service.running
        Name: haproxy
      Result: True
     Comment: The service haproxy is already running
     Started: 11:10:29.356102
    Duration: 34.616 ms
     Changes:   

Summary for server1
-------------
Succeeded: 11 (changed=2)
Failed:     0
-------------
Total states run:     11
Total run time:    1.225 s


在server1查看虚拟ip
配置成功

[root@server1 files]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:2d:98:9a brd ff:ff:ff:ff:ff:ff
    inet 172.25.15.6/24 brd 172.25.15.255 scope global eth0
    inet 172.25.15.100/32 scope global eth0
    inet6 fe80::5054:ff:fe2d:989a/64 scope link 
       valid_lft forever preferred_lft forever


再浏览器中测试这里写图片描述

这里写图片描述

**

测试高可用性能

**
关闭server1 keepalived

[root@server1 files]# /etc/init.d/keepalived stop
Stopping keepalived:                                       [  OK  ]
[root@server1 files]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:2d:98:9a brd ff:ff:ff:ff:ff:ff
    inet 172.25.15.6/24 brd 172.25.15.255 scope global eth0
    inet6 fe80::5054:ff:fe2d:989a/64 scope link 
       valid_lft forever preferred_lft forever

server4查看虚拟ip是否漂移
#############################################
[root@server4 init.d]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:b7:b9:49 brd ff:ff:ff:ff:ff:ff
    inet 172.25.15.9/24 brd 172.25.15.255 scope global eth0
    inet 172.25.15.100/32 scope global eth0
    inet6 fe80::5054:ff:feb7:b949/64 scope link 
       valid_lft forever preferred_lft forever

成功
浏览器在测试一次

成功
这里写图片描述

这里写图片描述

猜你喜欢

转载自blog.csdn.net/iaMay_____/article/details/81780729