I. Configuration source yum
1. Place the bag on saltstack default publish directory
/ var / the WWW / HTML / saltstack / rhel7 / 2018
2.yum source file
[root@server1 ~]# vim /etc/yum.repos.d/salt.repo
[salt]
name=salt
baseurl=http://172.25.17.250/saltstack/rhel7/2018/
gpgcheck=0
[root@server1 ~]# scp /etc/yum.repos.d/salt.repo [email protected]:/etc/yum.repos.d/salt.repo
[root@server1 ~]# scp /etc/yum.repos.d/salt.repo [email protected]:/etc/yum.repos.d/salt.repo
II. Installation saltstack
All packets may be directly disposed without mounting yum source
libsodium-1.0.16-1.el7.x86_64.rpm PyYAML-3.11-1.el7.x86_64.rpm
openpgm-5.2.122-2.el7.x86_64.rpm
python2-libcloud-2.0.0-2.el7.noarch.rpm salt-2018.3.3-1.el7.noarch.rpm
python-cherrypy-5.6.0-2.el7.noarch.rpm salt-api-2018.3.3-1.el7.noarch.rpm
python-crypto-2.6.1-2.el7.x86_64.rpm salt-cloud-2018.3.3-1.el7.noarch.rpm
python-futures-3.0.3-1.el7.noarch.rpm salt-master-2018.3.3-1.el7.noarch.rpm
python-msgpack-0.4.6-1.el7.x86_64.rpm salt-minion-2018.3.3-1.el7.noarch.rpm
python-psutil-2.2.1-1.el7.x86_64.rpm salt-ssh-2018.3.3-1.el7.noarch.rpm
python-tornado-4.2.1-1.el7.x86_64.rpm salt-syndic-2018.3.3-1.el7.noarch.rpm
python-zmq-15.3.0-3.el7.x86_64.rpm zeromq-4.1.4-7.el7.x86_64.rpm
[root@server1 2018]# yum install -y *
Modify the configuration file
[root@server1 2018]# vim /etc/salt/minion
16 master: 172.25.17.1
Open master service, and set the boot from Kai
[root@server1 2018]# systemctl start salt-master
[root@server1 2018]# systemctl enable salt-master
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-master.service to /usr/lib/systemd/system/salt-master.service.
server3 server2 configured in the same configuration, installation Minion, modify the configuration file
[root@server2 2018]# yum install -y salt-minion-2018.3.3-1.el7.noarch.rpm
[root@server2 2018]# vim /etc/salt/minion
master: 172.25.17.1
[root@server2 2018]# systemctl start salt-minion
[root@server2 2018]# systemctl enable salt-minion
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-minion.service to /usr/lib/systemd/system/salt-minion.service.
[root@server3 2018]# yum install -y salt-minion-2018.3.3-1.el7.noarch.rpm
[root@server3 2018]# vim /etc/salt/minion
master: 172.25.17.1
[root@server3 2018]# systemctl start salt-minion
[root@server3 2018]# systemctl enable salt-minion
Created symlink from /etc/systemd/system/multi-user.target.wants/salt-minion.service to /usr/lib/systemd/system/salt-minion.service.
View nodes, add nodes
[root@server1 salt]# salt-key -L
[root@server1 salt]# salt-key -A
[root@server1 salt]# salt-key -L
test:
[root@server1 salt]# salt '*' test.ping
server3:
True
server2:
True
[root@server1 ~]# netstat -atnlp
4505:发布订阅
4506:接受信息
[root@server1 salt]# yum install -y lsof
[root@server1 salt]# lsof -i :4505
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
salt-mast 19645 root 15u IPv4 55029 0t0 TCP *:4505 (LISTEN)
salt-mast 19645 root 17u IPv4 58303 0t0 TCP server1:4505->server3:38256 (ESTABLISHED)
salt-mast 19645 root 18u IPv4 58308 0t0 TCP server1:4505->server2:45670 (ESTABLISHED)
[root@server1 salt]# yum install -y python-setproctitle.x86_64
[root@server1 salt]# systemctl restart salt-master
[root@server1 ~]# ps ax
View file md5 value can be seen on master.pub the same node minion_master.pub
[root@server1 master]# pwd
/etc/salt/pki/master
[root@server1 master]# md5sum master.pub
9903336a1fbf632a33a9a37ae9a5fd40 master.pub
[root@server2 minion]# pwd
/etc/salt/pki/minion
[root@server2 minion]# md5sum minion_master.pub
9903336a1fbf632a33a9a37ae9a5fd40 minion_master.pub
[root@server3 2018]# cd /etc/salt/pki/minion
[root@server3 minion]# md5sum minion_master.pub
9903336a1fbf632a33a9a37ae9a5fd40 minion_master.pub
'*': Calling all hosts
calling module: cmd.run 'df -h'
[root@server1 ~]# salt '*' cmd.run 'df -h'
[root@server1 ~]# salt '*' cmd.run 'hostname'
Change profile
[root@server1 ~]# cd /etc/salt/
[root@server1 salt]# vim master
673 #
674 file_roots:
675 base:
676 - /srv/salt
Establish base directory, edit the installation file
[root@server1 salt]# mkdir /srv/salt
[root@server1 salt]# systemctl restart salt-master
[root@server1 salt]# cd /srv/salt/
[root@server1 salt]# mkdir apache
[root@server1 salt]# ls
apache
[root@server1 salt]# cd apache/
[root@server1 apache]# ls
[root@server1 apache]# vim install.sls
httpd:
pkg.installed #安装
[root@server1 apache]# salt server2 state.sls apache.install
Multiple installations
[root@server1 apache]# vim install.sls
httpd:
pkg.installed:
- pkgs:
- httpd
- php
- httpd-tools
server2 can be seen httpd-tools package is already installed
[root@server2 minion]# yum list httpd-tools
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
Installed Packages
httpd-tools.x86_64 2.4.6-45.el7 @rhel7.3
Adding to restart the service
[root@server1 apache]# vim install.sls
httpd-install:
pkg.installed:
- pkgs:
- httpd
- php
- httpd-tools
service.running:
- name: httpd
- enable: true
- reload: true
[root@server1 apache]# salt server2 state.sls apache.install
Add monitoring files
[root@server1 apache]# mkdir files
[root@server1 apache]# cd files/
[root@server1 files]# ls
[root@server1 files]# scp server2:/etc/httpd/conf/httpd.conf .
The authenticity of host 'server2 (172.25.17.2)' can't be established.
ECDSA key fingerprint is a9:93:85:8a:a4:2b:70:ea:48:a1:8b:71:1f:38:de:6c.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'server2,172.25.17.2' (ECDSA) to the list of known hosts.
root@server2's password:
httpd.conf 100% 11KB 11.5KB/s 00:00
[root@server1 apache]# vim install.sls
httpd-install:
pkg.installed:
- pkgs:
- httpd
- php
- httpd-tools
service.running:
- name: httpd
- enable: true
- reload: True
- watch:
- file: /etc/httpd/conf/httpd.conf
/etc/httpd/conf/httpd.conf:
file.managed:
- source: salt://apache/files/httpd.conf
- user: root
- group: root
- mode: 644
[root@server1 apache]# salt server2 state.sls apache.install
server2:
[root@server2 minion]# yum install tree -y
[root@server2 minion]# cd /var/cache/salt/minion/
[root@server2 minion]# ls
[root@server2 minion]# tree .
.
|-- accumulator
|-- extmods
|-- files
| `-- base
| `-- apache
| |-- files
| | `-- httpd.conf
| `-- install.sls
|-- highstate.cache.p
|-- pkg_refresh
|-- proc
`-- sls.p
7 directories, 5 files
Start the installation and service of separate
[root@server1 pki]# cd /srv/salt/apache/
[root@server1 apache]# vim install.sls
# 复制以下并删除
service.running:
- name: httpd
- enable: true
- reload: true
- watch:
- file: /etc/httpd/conf/httpd.conf
The final result
[root@server1 apache]# vim service.sls
include:
- apache.install
httpd-service:
service.running:
- name: httpd
- enable: true
- reload: true
- watch:
- file: httpd-install
Test:
Close httpd service on server2
[root@server2 minion]# systemctl stop httpd
Implementation of open file services
[root@server1 apache]# salt server2 state.sls apache.service
View httpd status on server2:
[root@server2 minion]# systemctl status httpd
Two .nginx deployment
[root@server1 apache]# cd ..
[root@server1 salt]# mkdir nginx
[root@server1 salt]# cd nginx/
[root@server1 nginx]# ls
[root@server1 nginx]# touch install.sls
[root@server1 nginx]# mkdir files
下载nginx的安装包到files文件夹中
nginx-1.15.8.tar.gz
[root@server1 nginx]# vim install.sls
nginx-install:
pkg.installed:
- pkgs:
- gcc
- make
- pcre-devel
- zlib-devel
file.managed:
- name: /mnt/nginx-1.15.8.tar.gz
- source: salt://nginx/files/nginx-1.15.8.tar.gz
cmd.run:
- name: cd /mnt && tar zxf nginx-1.15.8.tar.gz && cd nginx-1.15.8
[root@server1 nginx]# salt server3 state.sls nginx.install
server3 modify files installed
[root@server3 etc]# cd /mnt/
[root@server3 mnt]# ls
nginx-1.15.8 nginx-1.15.8.tar.gz
[root@server1 nginx]# vim install.sls
nginx-install:
pkg.installed:
- pkgs:
- gcc
- make
- pcre-devel
- zlib-devel
file.managed:
- name: /mnt/nginx-1.15.8.tar.gz
- source: salt://nginx/files/nginx-1.15.8.tar.gz
cmd.run:
- name: cd /mnt && tar zxf nginx-1.15.8.tar.gz && cd nginx-1.15.8 && sed -i 's/CFLAGS="$CFLAGS -g"/#CFLAGS="$CFLAGS -g"/g' auto/cc/gcc && ./configure --prefix=/usr/local/nginx &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/nginx
[root@server1 nginx]# salt server3 state.sls nginx.install
View, less than 1M Description Note the debug
[root@server3 local]# du -h /usr/local/nginx/
796K /usr/local/nginx/sbin #小于1M说明成功关闭debug
68K /usr/local/nginx/conf
0 /usr/local/nginx/logs
8.0K /usr/local/nginx/html
872K /usr/local/nginx/
Setting startup scripts
[root@server3 ~]# scp /usr/local/nginx/conf/nginx.conf server1:/srv/salt/nginx/files
The authenticity of host 'server1 (172.25.17.1)' can't be established.
ECDSA key fingerprint is a9:93:85:8a:a4:2b:70:ea:48:a1:8b:71:1f:38:de:6c.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'server1,172.25.17.1' (ECDSA) to the list of known hosts.
root@server1's password:
nginx.conf 100% 2656 2.6KB/s 00:00
[root@server1 nginx]# vim service.sls
include:
- nginx.install
nginx-service:
file.managed:
- name: /usr/local/nginx/conf/nginx.conf
- source: salt://nginx/files/nginx.conf
[root@server3 ~]# cd /etc/systemd/system
[root@server3 system]# vim nginx.service
[Unit]
Description=The NGINX HTTP and reverse proxy server
After=syslog.target network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
test:
[root@server3 system]# systemctl daemon-reload
[root@server3 system]# systemctl status nginx
[root@server3 system]# systemctl stop nginx
[root@server3 system]# scp nginx.service server1:/srv/salt/nginx/files
root@server1's password:
nginx.service 100% 415 0.4KB/s 00:00
[root@server1 nginx]# vim service.sls
include:
- nginx.install
/usr/local/nginx/conf/nginx.conf:
file.managed:
- source: salt://nginx/files/nginx.conf
nginx-service:
file.managed:
- name: /etc/systemd/system/nginx.service
- source: salt://nginx/files/nginx.service
service.running:
- name: nginx
- enable: true
- reload: true
- watch:
- file: /usr/local/nginx/conf/nginx.conf
Nginx service script execution
[root@server1 nginx]# salt server3 state.sls nginx.service
Nginx view the status of the server3
[root@server3 system]# curl localhost
Changing the number of worker processes
[root@server3 system]# lscpu #如果是两个cpu则可以编辑server1/srv/salt/nginx/files/nginx.conf文件修改worker_processes值为auto
[root@server1 nginx]# salt server3 state.sls nginx.service
[root@server1 nginx]# ps ax #可以看到两个worker process进程
Different host performs a different script file
[root@server1 nginx]# cd /srv/salt/
[root@server1 salt]# vim top.sls
base:
'server2':
- apache.service
'server3':
- nginx.service
[root@server1 salt]# salt '*' state.highstate
Three .grains
[root@server2 minion]# cd /etc/salt/
[root@server2 salt]# vim minion
119 # the 'roles' grain having two values that can be matched against.
120 grains:
121 roles:
122 - apache
123 # - memcache
124 # deployment: datacenter4
server2:
[root@server2 salt]# systemctl restart salt-minion
server1:
[root@server1 salt]# salt '*' grains.item roles
server3:
[root@server3 system]# cd /etc/salt/
[root@server3 salt]# ls
cloud cloud.maps.d master minion.d proxy
cloud.conf.d cloud.profiles.d master.d minion_id proxy.d
cloud.deploy.d cloud.providers.d minion pki roster
[root@server3 salt]# vim grains
roles: nginx
server1:
[root@server1 salt]# salt server3 saltutil.sync_grains
server3:
[root@server1 salt]# salt '*' grains.item roles
server3:
----------
roles:
nginx
server2:
----------
roles:
- apache
root@server1 salt]# mkdir _grains
[root@server1 salt]# cd _grains/
[root@server1 _grains]# ls
[root@server1 _grains]# vim my_grains.py
#!/usr/bin/env python
def my_grains():
grains = {'foo': 'bar', 'hello': 'world'}
grains['salt'] = 'stack'
return grains
View
[root@server1 _grains]# salt '*' saltutil.sync_grains
server3:
- grains.my_grains
server2:
- grains.my_grains
[root@server1 _grains]# salt '*' grains.item hello
server2:
----------
hello:
world
server3:
----------
hello:
world
[root@server1 _grains]# salt '*' grains.item salt
server2:
----------
salt:
stack
server3:
----------
salt:
stack
View on server2:
[root@server2 ~]# cd /var/cache/salt/minion/files/base/_grains
server1:
[root@server1 _grains]# cd ..
[root@server1 salt]# ls
apache _grains nginx top.sls
[root@server1 salt]# salt -G 'roles:apache' test.ping
server2:
True
[root@server1 salt]# salt -G 'roles:nginx' test.ping
server3:
True
[root@server1 salt]# salt -G 'salt:stack' test.ping
server3:
True
server2:
True
Perform different script by matching different hosts
[root@server1 salt]# vim top.sls
base:
'roles:apacha':
- match: grain
- apache.service
'roles:nginx':
- nginx.service
[root@server1 salt]# salt '*' state.highstate
[root@server1 salt]# salt server2 grains.item os
[root@server1 salt]# salt server2 grains.item fqdn
server2:
----------
fqdn:
server2
[root@server1 salt]# salt server2 grains.item hostname
server2:
----------
hostname:
Four .pillar
Change the main configuration file
[root@server1 salt]# vim /etc/salt/master
844 pillar_roots:
845 base:
846 - /srv/pillar
[root@server1 salt]# mkdir /srv/pillar
[root@server1 salt]# systemctl restart salt-master
[root@server1 salt]# cd /srv/pillar/
[root@server1 pillar]# ls
[root@server1 pillar]# vim vars.sls
{% if grains['fqdn'] == 'server2' %}
webserver: httpd
state: master
{% elif grains['fqdn'] == 'server3' %}
webserver: nginx
state: backup
{% endif %}
[root@server1 pillar]# vim top.sls
base:
'*':
- vars
You can also write path:
[root@server1 pillar]# mkdir web
[root@server1 pillar]# mv vars.sls web/
[root@server1 pillar]# ls
top.sls web
[root@server1 pillar]# vim top.sls
base:
'*':
- web.vars
test:
[root@server1 web]# salt '*' pillar.items
[root@server1 pillar]# salt server2 test.ping
[root@server1 pillar]# salt '*' saltutil.refresh_pillar #当pillar数据在master端更新时,minion端需要刷新本地数据,刷新pillar数据
[root@server1 pillar]# salt -I 'state:master' test.ping
server2:
True
[root@server1 pillar]# salt -I 'state:backup' test.ping
server3:
True
Five .jinja
Use a template to set variable
[root@server1 ~]# cd /srv/salt/
[root@server1 salt]# vim apache/install.sls
httpd-install:
pkg.installed:
- pkgs:
- httpd
- php
- httpd-tools
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://apache/files/httpd.conf
- user: root
- group: root
- mode: 644
- template: jinja
- context:
port: 80
host: 172.25.17.2
[root@server1 files]# pwd
/srv/salt/apache/files
[root@server1 files]# vim httpd.conf
42 Listen {{ host }}:{{ port }}
[root@server1 files]# salt server2 state.sls apache.service
See no 80-port, because changing the interface restart to take effect
[root@server2 salt]# netstat -atnlp
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 966/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1226/master
tcp 0 0 172.25.17.2:45914 172.25.17.1:4505 ESTABLISHED 13121/python
tcp 0 0 172.25.17.2:22 172.25.17.250:44796 ESTABLISHED 2172/sshd: root@pts
tcp6 0 0 :::22 :::* LISTEN 966/sshd
tcp6 0 0 ::1:25 :::* LISTEN 1226/master
View the contents of the file has been changed
[root@server2 salt]# vim /etc/httpd/conf/httpd.conf
Listen 172.25.17.2:80
Set the variable
[root@server1 salt]# cd apache/
[root@server1 apache]# vim lib.sls
{% set host = '172.25.17.2' %}
[root@server1 apache]# vim files/httpd.conf
{% from 'apache/lib.sls' import host with context %}
[root@server1 apache]# salt server2 state.sls apache.service
http server2's main configuration file will be automatically deleted
[root@server2 ~]# vim /etc/httpd/conf/httpd.conf
python setting variables
[root@server1 apache]# salt server2 grains.item ipv4
server2:
----------
ipv4:
- 127.0.0.1
- 172.25.17.2
[root@server1 apache]# vim files/httpd.conf
Listen {{ grains['ipv4'][-1]}}:{{ port }}
[root@server1 apache]# salt server2 state.sls apache.service
View Results
[root@server2 minion]# vim /etc/httpd/conf/httpd.conf
Listen 172.25.17.2:80
[root@server1 apache]# vim files/httpd.conf
Listen {{ port }}
[root@server2 salt]# systemctl restart httpd