OpenStack 安装脚本

OpenStack 安装脚本

准备

  • 设置主机名和hosts文件

    给每个节点设置主机名,并在所有节点设置hosts,虚拟IP在hosts文件中配置主机名为controller

    # 设置主机名
    # controller1 执行
    hostnamectl set-hostname controller1
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller1
    # [root@localhost ~]# hostname
    # controller1
    
    # controller2
    hostnamectl set-hostname controller2
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller2
    # [root@localhost ~]# hostname
    # controller2
    
    # controller3
    hostnamectl set-hostname controller3
    hostname
    # [root@localhost ~]# hostnamectl set-hostname controller3
    # [root@localhost ~]# hostname
    # controller3
    
    # 配置hosts
    cat << EOF >> /etc/hosts
    192.168.5.20        controller
    192.168.5.21        controller1
    192.168.5.22        controller2
    192.168.5.23        controller3
    EOF
    
    # 验证
    cat /etc/hosts
    # [root@localhost ~]# cat /etc/hosts
    # 127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    # ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    # 192.168.5.20        controller
    # 192.168.5.21        controller1
    # 192.168.5.22        controller2
    # 192.168.5.23        controller3
    
    ping -c 4 controller1
    # [root@localhost ~]# ping -c 4 controller1
    # PING controller1 (192.168.5.21) 56(84) bytes of data.
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=1 ttl=64 time=0.052 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=2 ttl=64 time=0.080 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=3 ttl=64 time=0.073 ms
    # 64 bytes from controller1 (192.168.5.21): icmp_seq=4 ttl=64 time=0.061 ms
    # 
    # --- controller1 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 2999ms
    # rtt min/avg/max/mdev = 0.052/0.066/0.080/0.013 ms
    
    ping -c 4 controller2
    # [root@localhost ~]# ping -c 4 controller2
    # PING controller2 (192.168.5.22) 56(84) bytes of data.
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=1 ttl=64 time=0.316 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=2 ttl=64 time=0.583 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=3 ttl=64 time=0.426 ms
    # 64 bytes from controller2 (192.168.5.22): icmp_seq=4 ttl=64 time=0.445 ms
    # 
    # --- controller2 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 2999ms
    # rtt min/avg/max/mdev = 0.316/0.442/0.583/0.097 ms
    
    ping -c 4 controller3
    # [root@localhost ~]# ping -c 4 controller3
    # PING controller3 (192.168.5.23) 56(84) bytes of data.
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=1 ttl=64 time=0.287 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=2 ttl=64 time=0.385 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=3 ttl=64 time=0.454 ms
    # 64 bytes from controller3 (192.168.5.23): icmp_seq=4 ttl=64 time=0.374 ms
    # 
    # --- controller3 ping statistics ---
    # 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
    # rtt min/avg/max/mdev = 0.287/0.375/0.454/0.059 ms
    
    
  • 防火墙相互放行

    在全部主机设置,放行各个主机的IP

    firewall-cmd --permanent --zone=trusted --add-source=192.168.5.20 --add-source=192.168.5.21 --add-source=192.168.5.22 --add-source=192.168.5.23
    # 验证
    firewall-cmd --zone=trusted --list-sources --permanent
    # [root@localhost ~]# firewall-cmd --zone=trusted --list-sources --permanent
    # 192.168.5.20 192.168.5.21 192.168.5.22 192.168.5.23
    
    firewall-cmd --zone=trusted --add-source=192.168.5.20 --add-source=192.168.5.21 --add-source=192.168.5.22 --add-source=192.168.5.23
    # 验证
    firewall-cmd --zone=trusted --list-sources
    # [root@localhost ~]# firewall-cmd --zone=trusted --list-sources
    # 192.168.5.20 192.168.5.21 192.168.5.22 192.168.5.23
    
  • 节点互信

    # controller1 操作
    ssh-keygen
    # [root@localhost ~]# ssh-keygen
    # Generating public/private rsa key pair.
    # Enter file in which to save the key (/root/.ssh/id_rsa):
    # Created directory '/root/.ssh'.
    # Enter passphrase (empty for no passphrase):
    # Enter same passphrase again:
    # Your identification has been saved in /root/.ssh/id_rsa.
    # Your public key has been saved in /root/.ssh/id_rsa.pub.
    # The key fingerprint is:
    # SHA256:BvqnKepnbPEirSfukBb9PrAFYpwtz3PiHP0uc1WvjU0 root@controller1
    # The key's randomart image is:
    # +---[RSA 2048]----+
    # |                 |
    # |                 |
    # |. o   .          |
    # | *.o . .   .     |
    # |..=.+   S . .    |
    # | ..Bo= . .   E   |
    # |o.ooO+o o   *    |
    # |..o+@+.*   o o   |
    # | +*X +O.         |
    # +----[SHA256]-----+
    
    ssh-copy-id root@controller1
    # [root@localhost ~]# ssh-copy-id root@controller1
    # /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
    # The authenticity of host 'controller1 (192.168.5.21)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
    # /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
    # root@controller1's password:
    # 
    # Number of key(s) added: 1
    # 
    # Now try logging into the machine, with:   "ssh 'root@controller1'"
    # and check to make sure that only the key(s) you wanted were added.
    
    scp -r ~/.ssh root@controller2:~/
    # [root@localhost ~]# scp -r ~/.ssh root@controller2:~/
    # The authenticity of host 'controller2 (192.168.5.22)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # Warning: Permanently added 'controller2,192.168.5.22' (ECDSA) to the list of known hosts.
    # root@controller2's password:
    # id_rsa                           100% 1675     1.4MB/s   00:00
    # id_rsa.pub                       100%  398   431.6KB/s   00:00
    # known_hosts                      100%  372   516.4KB/s   00:00
    # authorized_keys                  100%  398   453.9KB/s   00:00
    
    scp -r ~/.ssh root@controller3:~/
    # [root@localhost ~]# scp -r ~/.ssh root@controller3:~/
    # The authenticity of host 'controller3 (192.168.5.23)' can't be established.
    # ECDSA key fingerprint is SHA256:1UIr4UMccY+KofSegOIfp/SbKDH2cpLSlWYzTtVBUQo.
    # ECDSA key fingerprint is MD5:63:ea:b3:bc:0c:42:17:db:c0:ca:f0:45:a1:84:2e:c3.
    # Are you sure you want to continue connecting (yes/no)? yes
    # Warning: Permanently added 'controller3,192.168.5.23' (ECDSA) to the list of known hosts.
    # root@controller3's password:
    # id_rsa                          100% 1675     1.2MB/s   00:00
    # id_rsa.pub                      100%  398   332.9KB/s   00:00
    # known_hosts                     100%  558   540.3KB/s   00:00
    # authorized_keys                 100%  398   494.5KB/s   00:00
    
    
    # 验证
    ssh root@controller1 hostname
    # [root@localhost ~]# ssh root@controller1 hostname
    # controller1
    
    ssh root@controller2 hostname
    # [root@localhost ~]# ssh root@controller2 hostname
    # controller2
    
    ssh root@controller3 hostname
    # [root@localhost ~]# ssh root@controller3 hostname
    # controller3
    
    
  • 上传安装软件包,解压到指定文件夹

    mkdir -p /data/packages
    tar xvf openstack.tar.gz -C /data/packages/
    # 验证
    ls /data/packages/openstack/
    # [root@localhost ~]# ls /data/packages/openstack/
    # base  ceph-deploy  cinder  haproxy  keystone  memcached 
    # neutron-controller  nova-controller  rabbitmq ceph  chrony 
    # glance  images   mariadb   neutron-compute  nova-compute pacemaker
    
    

修改集群配置文件

使用文本编辑工具,按照集群的规格,编辑config文件内容。

  • 检查配置文件内容
    egrep -v '(^#|^$)' config
    

基础配置

# 加载集群配置信息
source config
#执行基础环境脚本
bash base/base.sh
# 安装基础软件包
yum localinstall -y $PACKAGES_DIR/base/*.rpm
# 重启服务器
reboot

配置网络时间

# 加载集群配置信息
source config
#执行安装脚本
bash chrony/chrony.sh

PACEMAKER集群

# 加载集群配置信息
source config
# 执行安装脚本(主节点存在交互操作)
bash pacemaker/pacemaker.sh
# 验证
pcs status
ping -c 4 $OPENSTACK_VIP

HaProxy集群

# 加载集群配置信息
source config
# 执行安装脚本
bash haproxy/haproxy.sh
# 验证
# 访问网页 http://ip:8888/admin
# 账号: admin  密码通过命令 echo $HAPROXY_PASS 查看

MariaDB集群

# 加载集群配置信息
source config
# 执行安装脚本(主节点存在交互操作)
bash mariadb/mariadb.sh
# 删除不安全用户
mysql -uroot -e "DELETE FROM mysql.user WHERE user='' OR host='`hostname`';"
mysql -uroot -e 'FLUSH PRIVILEGES;'
# 验证
mysql -uhaproxy -h controller -e 'SHOW STATUS LIKE "wsrep%";'

RabbitMQ

# 加载集群配置信息
source config
# 执行安装脚本(其它节点存在交互操作)
bash rabbitmq/rabbitmq.sh
# 验证
rabbitmqctl authenticate_user openstack $RABBIT_PASS

Memcached

# 加载集群配置信息
source config
# 执行安装脚本
bash memcached/memcached.sh

ceph

# 加载集群配置信息
source config
# 执行安装脚本(存在交互操作)
bash ceph/ceph.sh
# 验证
ceph -s | grep health:
# 设置副本数(非必须,默认值为3)
openstack-config --set /etc/ceph/ceph.conf 'global' 'osd pool default size' '3'
systemctl restart ceph.target

keystone

# 加载集群配置信息
source config
# 执行安装脚本
bash keystone/keystone.sh
# 验证
. ~/openstack/admin-openrc
openstack token issue

glance

# 加载集群配置信息
source config
# 执行安装脚本
bash glance/glance.sh
# 集成ceph
bash glance/glance-ceph.sh
# 验证
# 上传镜像 controller1执行
openstack image create "cirros" \
--file $PACKAGES_DIR/images/cirros-0.4.0-x86_64-disk.raw \
--disk-format raw --container-format bare \
--property hw_scsi_model=virtio-scsi \
--property hw_disk_bus=scsi \
--property hw_qemu_guest_agent=yes \
--property os_require_quiesce=yes \
--property os_type=linux \
--property os_admin_user=root \
--property login_name=cirros \
--property login_password=gocubsgo \
--public 
# 查看镜像列表
openstack image list
# 查看ceph文件
rbd ls images

nova controller

# 加载集群配置信息
source config
# 执行安装脚本(存在交互操作)
bash nova/nova-controller.sh
# 验证
openstack compute service list --host `hostname`
# [root@controller1 openstack-script]# openstack compute service list --host `hostname`
# +----+------------------+-------------+----------+---------+-------+----------------------------+
# | ID | Binary           | Host        | Zone     | Status  | State | Updated At                 |
# +----+------------------+-------------+----------+---------+-------+----------------------------+
# | 10 | nova-consoleauth | controller1 | internal | enabled | up    | 2019-07-11T10:06:55.000000 |
# | 49 | nova-scheduler   | controller1 | internal | enabled | up    | 2019-07-11T10:06:55.000000 |
# | 88 | nova-conductor   | controller1 | internal | enabled | up    | 2019-07-11T10:06:56.000000 |
# +----+------------------+-------------+----------+---------+-------+----------------------------+

nova compute

# 加载集群配置信息
source config
# 执行安装脚本
bash nova/nova-compute.sh
# 验证
openstack compute service list --host `hostname` --service nova-compute
# [root@controller1 openstack-script]# openstack compute service list --host `hostname` --service nova-compute
# +-----+--------------+-------------+------+---------+-------+----------------------------+
# |  ID | Binary       | Host        | Zone | Status  | State | Updated At                 |
# +-----+--------------+-------------+------+---------+-------+----------------------------+
# | 154 | nova-compute | controller1 | nova | enabled | up    | 2019-07-11T10:11:56.000000 |
# +-----+--------------+-------------+------+---------+-------+----------------------------+

# 如不物理机不支持kvm虚拟机,则需要配置qemu虚拟化(可选)
egrep -o '(vmx|svm)' /proc/cpuinfo
openstack-config --set /etc/nova/nova.conf 'libvirt' 'virt_type' 'qemu'
openstack-config --set /etc/nova/nova.conf 'libvirt' 'cpm_mode' 'none'
systemctl restart openstack-nova-compute.service
systemctl status openstack-nova-compute.service

neutron controller

# 加载集群配置信息
source config
# 执行安装脚本(存在交互操作)
bash neutron/neutron-controller.sh
# 验证
openstack network agent list --host `hostname`
# [root@controller1 openstack-script]# openstack network agent list --host `hostname`
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | 07ed26e0-6a1a-4405-a3a2-c6f2413ebfe6 | DHCP agent         | controller1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
# | 30bc443f-e54b-466d-baa6-1d2646f6e290 | L3 agent           | controller1 | nova              | :-)   | UP    | neutron-l3-agent          |
# | 82966bcd-b726-493c-a03c-490fa14b0764 | Metadata agent     | controller1 | None              | :-)   | UP    | neutron-metadata-agent    |
# | d796e67b-3772-4eb1-8d18-889a6dbb0b4a | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

neutron compute

# 加载集群配置信息
source config
# 执行安装脚本
bash neutron/neutron-compute.sh
# 验证
openstack network agent list --host `hostname` --agent-type linux-bridge
# [root@controller1 openstack-script]# openstack network agent list --host `hostname` --agent-type linux-bridge
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | ID                                   | Agent Type         | Host        | Availability Zone | Alive | State | Binary                    |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+
# | d796e67b-3772-4eb1-8d18-889a6dbb0b4a | Linux bridge agent | controller1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
# +--------------------------------------+--------------------+-------------+-------------------+-------+-------+---------------------------+

创建网络

注意:在任意一个控制节点执行

  • 创建外部网络

    [controller1]

    openstack network create  --share --external \
      --provider-physical-network provider \
      --provider-network-type flat provider
    
  • 创建外部网络子网

    # 创建一个IP范围为192.168.5.100-192.168.5.120,子网掩码24,网关192.168.5.1,dns为114.114.114.114,8.8.8.8的外部网络子网
    openstack subnet create --network provider \
      --allocation-pool start=192.168.5.100,end=192.168.5.120 \
      --dns-nameserver 114.114.114.114 --dns-nameserver 8.8.8.8 \
      --gateway 192.168.5.1 \
      --subnet-range 192.168.5.0/24 provider
    
  • 创建内部网络

    openstack network create selfservice
    
  • 创建内部网络子网

    # 创建一个IP范围为172.16.1.1-172.16.1.254,子网掩码24,网关172.16.1.1,dns为114.114.114.114,8.8.8.8的内部网络子网
    openstack subnet create --network selfservice \
      --dns-nameserver 114.114.114.114 --dns-nameserver 8.8.8.8 \
      --gateway 172.16.1.1 \
      --subnet-range 172.16.1.0/24 selfservice
    
  • 创建路由

    openstack router create router
    
  • 内部网络添加路由

    neutron router-interface-add router selfservice
    
  • 路由设置外部网关

    neutron router-gateway-set router provider
    

cinder

# 加载集群配置信息
source config
# 执行安装脚本(存在交互操作)
bash cinder/cinder.sh
# 集成ceph
bash cinder/cinder-ceph.sh
# 设置单个卷大小限制
openstack quota set --class --per-volume-gigabytes 100 default
# 验证
openstack volume service list
# [root@controller1 openstack-script]# openstack volume service list
# +------------------+------------------+------+---------+-------+----------------------------+
# | Binary           | Host             | Zone | Status  | State | Updated At                 |
# +------------------+------------------+------+---------+-------+----------------------------+
# | cinder-scheduler | controller2      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-scheduler | controller3      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-scheduler | controller1      | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-volume    | controller2@ceph | nova | enabled | up    | 2019-07-11T10:43:15.000000 |
# | cinder-volume    | controller3@ceph | nova | enabled | up    | 2019-07-11T10:43:16.000000 |
# | cinder-volume    | controller1@ceph | nova | enabled | up    | 2019-07-11T10:43:15.000000 |
# +------------------+------------------+------+---------+-------+----------------------------+

huluer

# 加载集群配置信息
source config
# 执行安装脚本
bash huluer/huluer.sh

服务检测和重启脚本

# 服务状态检测
cp -f utils/check-openstack-service.sh ~/openstack/check-openstack-service.sh
chmod +x ~/openstack/check-openstack-service.sh
# 验证
~/openstack/check-openstack-service.sh

# 重启服务脚本
cp -f utils/restart-openstack-service.sh ~/openstack/restart-openstack-service.sh
chmod +x ~/openstack/restart-openstack-service.sh
# 验证,注意:重启脚本不要同时执行
~/openstack/restart-openstack-service.sh

数据库恢复脚本

启动关闭的数据库,支持以下几种情况:1.集群还存在运行节点。2.集群全部正常关闭。3.集群全部异常关闭。

cp -f utils/recovery-mariadb.sh ~/openstack/recovery-mariadb.sh
chmod +x ~/openstack/recovery-mariadb.sh
发布了20 篇原创文章 · 获赞 40 · 访问量 5789

猜你喜欢

转载自blog.csdn.net/file_data/article/details/104037382