配置yum源:
server4:master主节点
[root@server4 ~]# vim /etc/hosts
[root@server4 ~]# yum repolist
Loaded plugins: product-id, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
HighAvailability | 3.9 kB 00:00
HighAvailability/primary_db | 43 kB 00:00
LoadBalancer | 3.9 kB 00:00
LoadBalancer/primary_db | 7.0 kB 00:00
ResilientStorage | 3.9 kB 00:00
ResilientStorage/primary_db | 47 kB 00:00
ScalableFileSystem | 3.9 kB 00:00
ScalableFileSystem/primary_db | 6.8 kB 00:00
rhel-source | 3.9 kB 00:00
repo id repo name status
HighAvailability HighAvailability 56
LoadBalancer LoadBalancer 4
ResilientStorage ResilientStorage 62
ScalableFileSystem ScalableFileSystem 7
rhel-source Red Hat Enterprise Linux 6Server - x86_64 - Source 3,690
repolist: 3,819
[root@server4 ~]# yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm -y
[root@server4 ~]# yum install pacemaker corosync -y
配置文件
[root@server4 ~]# cd /etc/corosync/
[root@server4 corosync]# cp corosync.conf.example corosync.conf
[root@server4 corosync]# vim corosync.conf
bindnetaddr: 172.25.23.0
mcastaddr: 226.94.1.1
mcastport: 5423
service {
name: pacemaker
ver: 0
}
server7:作为高可用节点:配置和主master相同
[root@server7 ~]# yum repolist
Loaded plugins: product-id, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
HighAvailability | 3.9 kB 00:00
HighAvailability/primary_db | 43 kB 00:00
LoadBalancer | 3.9 kB 00:00
LoadBalancer/primary_db | 7.0 kB 00:00
ResilientStorage | 3.9 kB 00:00
ResilientStorage/primary_db | 47 kB 00:00
ScalableFileSystem | 3.9 kB 00:00
ScalableFileSystem/primary_db | 6.8 kB 00:00
rhel-source | 3.9 kB 00:00
rhel-source/primary_db | 3.1 MB 00:00
repo id repo name status
HighAvailability HighAvailability 56
LoadBalancer LoadBalancer 4
ResilientStorage ResilientStorage 62
ScalableFileSystem ScalableFileSystem 7
rhel-source Red Hat Enterprise Linux 6Server - x86_64 - Source 3,690
repolist: 3,819
[root@server7 ~]# ls
crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm
[root@server7 ~]# yum install * -y
[root@server7 ~]# yum install pacemaker corosync -y
[root@server7 ~]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
[root@server4 ~]# cd /etc/corosync/
[root@server4 corosync]# cp corosync.conf.example corosync.conf
[root@server4 corosync]# vim corosync.conf
bindnetaddr: 172.25.23.0
mcastaddr: 226.94.1.1
mcastport: 5423
service {
name: pacemaker
ver: 0
server5:
[root@server5 ~]# yum install scsi-target-utils.x86_64 -y
[root@server5 ~]# vim /etc/tgt/targets.conf
[root@server5 ~]# /etc/init.d/tgtd start
Starting SCSI target daemon: [ OK ]
添加虚拟磁盘:
[root@server5 ~]# fdisk -l
Disk /dev/vdb: 8589 MB, 8589934592 bytes
16 heads, 63 sectors/track, 16644 cylinders
Units = cylinders of 1008 * 512 = 516096 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
添加一个分区
master:172.25.23.4
发现虚拟设备
[root@server4 corosync]# yum install iscsi-initiator-utils.x86_64 -y
[root@server4 corosync]# iscsiadm -m discovery -t st -p 172.25.23.5
Starting iscsid: [ OK ]
172.25.23.5:3260,1 iqn.2018-08.com.example:server.target1
[root@server4 corosync]# iscsiadm -m node -l
Logging in to [iface: default, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260] (multiple)
Login to [iface: default, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260] successful.
表示摘除,可以不做
[root@server4 corosync]# iscsiadm -m node -u
Logging out of session [sid: 1, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260]
^[[ALogout of [sid: 1, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260] successful.
[root@server4 corosync]# fdisk -l
Disk /dev/sda: 8589 MB, 8589934592 bytes
64 heads, 32 sectors/track, 8192 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
虚拟机server7:
[root@server7 ~]# yum install -y iscsi-initiator-utils.x86_64
[root@server7 ~]# iscsiadm -m discovery -t st -p 172.25.23.5
Starting iscsid: [ OK ]
172.25.23.5:3260,1 iqn.2018-08.com.example:server.target1
[root@server7 ~]# iscsiadm -m node -l
Logging in to [iface: default, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260] (multiple)
Login to [iface: default, target: iqn.2018-08.com.example:server.target1, portal: 172.25.23.5,3260] successful.
[root@server7 ~]# fdisk -l
Disk /dev/sda: 8589 MB, 8589934592 bytes
64 heads, 32 sectors/track, 8192 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x79758518
server4:添加分区
[root@server4 corosync]# fdisk -cu /dev/sda
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0x79758518.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4):
Value out of range.
Partition number (1-4): 1
First sector (2048-16777215, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-16777215, default 16777215):
Using default value 16777215
Command (m for help): p
Disk /dev/sda: 8589 MB, 8589934592 bytes
64 heads, 32 sectors/track, 8192 cylinders, total 16777216 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x79758518
Device Boot Start End Blocks Id System
/dev/sda1 2048 16777215 8387584 83 Linux
Command (m for help): w
扫描二维码关注公众号,回复:
5109147 查看本文章
[root@server4 corosync]# /etc/init.d/moosefs-master stop
Stopping mfsmaster: [ OK ]
[root@server4 corosync]# mount /dev/sda1 /mnt
[root@server4 corosync]# /var/lib/mfs/
-bash: /var/lib/mfs/: is a directory
[root@server4 corosync]# cd /var/lib/mfs/
[root@server4 mfs]# ll
total 3612
-rw-r-----. 1 mfs mfs 200 Aug 26 11:34 changelog.3.mfs
-rw-r-----. 1 mfs mfs 2520 Aug 26 10:56 changelog.4.mfs
-rw-r-----. 1 mfs mfs 478 Aug 26 09:47 changelog.5.mfs
-rw-r-----. 1 mfs mfs 3860 Aug 26 13:50 metadata.mfs
-rw-r-----. 1 mfs mfs 3860 Aug 26 13:00 metadata.mfs.back.1
-rwxr--r--. 1 mfs mfs 8 Aug 2 2017 metadata.mfs.empty
-rw-r-----. 1 mfs mfs 3672832 Aug 26 13:50 stats.mfs
[root@server4 mfs]# cp -p * /mnt/
[root@server4 mfs]# cd /mnt
[root@server4 mnt]# ll
total 3628
[root@server4 ~]# umount /mnt
[root@server4 ~]# vim /etc/init.d/moosefs-master
即使非正常退出mfs也会刷新,生成镜像,可以再次正常开启
[root@server4 ~]# /etc/init.d/moosefs-master stop
Stopping mfsmaster: [ OK ]
[root@server4 ~]# /etc/init.d/moosefs-master start
Starting mfsmaster: [ OK ]
[root@server4 ~]# /etc/init.d/moosefs-master stop
Stopping mfsmaster: [ OK ]
主机上操作
[root@foundation23 Desktop]# netstat -antlup | grep :1229
[root@foundation23 Desktop]# systemctl status fence_virtd
● fence_virtd.service - Fence-Virt system host daemon
Loaded: loaded (/usr/lib/systemd/system/fence_virtd.service; disabled; vendor preset: disabled)
Active: inactive (dead)
Aug 26 14:19:33 foundation23.ilt.example.com systemd[1]: [/usr/lib/systemd/s...
Hint: Some lines were ellipsized, use -l to show in full.
[root@foundation23 Desktop]# systemctl start fence_virtd
[root@foundation23 Desktop]# systemctl status fence_virtd
● fence_virtd.service - Fence-Virt system host daemon
Loaded: loaded (/usr/lib/systemd/system/fence_virtd.service; disabled; vendor preset: disabled)
Active: active (running) since Sun 2018-08-26 14:19:41 CST; 1s ago
Process: 14202 ExecStart=/usr/sbin/fence_virtd $FENCE_VIRTD_ARGS (code=exited, status=0/SUCCESS)
Main PID: 14207 (fence_virtd)
CGroup: /system.slice/fence_virtd.service
└─14207 /usr/sbin/fence_virtd -w
[root@foundation23 Desktop]# netstat -antlup | grep :1229
udp 0 0 0.0.0.0:1229 0.0.0.0:* 14207/fence_virtd
[root@foundation23 Desktop]# cd /etc/cluster/
[root@foundation23 cluster]# ls
fence_xvm.key
[root@foundation23 cluster]# scp fence_xvm.key [email protected]:/etc/cluster/
root@172.25.23.4's password:
scp: /etc/cluster/: Is a directory
[root@foundation23 cluster]# scp -r fence_xvm.key [email protected]:/etc/cluster/
root@172.25.23.4's password:
scp: /etc/cluster/: Is a directory
[root@foundation23 cluster]# vim /root/
[root@foundation23 cluster]# cd /root/.ssh/
[root@foundation23 .ssh]# vim known_hosts
两个节点自己建立/etc/cluster
[root@foundation23 .ssh]# scp fence_xvm.key [email protected]:/etc/cluster/
The authenticity of host '172.25.23.4 (172.25.23.4)' can't be established.
RSA key fingerprint is c0:14:f5:46:87:e5:1b:d7:4d:a7:2d:fc:ef:89:03:c7.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '172.25.23.4' (RSA) to the list of known hosts.
root@172.25.23.4's password:
[root@server7 ~]# mount /dev/sda1 /var/lib/mfs/
[root@server7 ~]# ll -d /var/lib/mfs/
drwxr-xr-x. 3 mfs mfs 4096 Aug 26 13:51 /var/lib/mfs/
[root@server7 ~]# yum install fence-virt
[root@server7 ~]# stonith_admin -I
fence_xvm
fence_virt
fence_pcmk
fence_legacy
4 devices found
[root@server7 ~]# stonith_admin -M -a fence_xvm
<resource-agent name="fence_xvm" shortdesc="Fence agent for virtual machines">
<longdesc>
</longdesc>
<parameters>
<parameter name="debug">
<getopt mixed="-d"/>
<content type="boolean"/>
<shortdesc lang="en">
</shortdesc>
[root@server7 ~]#umount /var/lib/mfs/
server4:
[root@server4 ~]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
[root@server4 ~]# crm
crm(live)# configure
crm(live)configure# show
node server4
node server7
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
crm(live)configure# bye
bye
[root@server4 ~]# stonith_admin -I
fence_pcmk
fence_legacy
2 devices found
[root@server4 ~]# yum install fence-virt
Loaded plugins: product-id, subscription-manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
Setting up Install Process
从主机上传钥匙
Reconnecting...[root@server4 ~]# mkdir /etc/cluster
[root@server4 ~]# cd /etc/cluster/
[root@server4 cluster]# ls
fence_xvm.key
[root@server4 cluster]# crm
crm(live)# configure
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server4:server4;server7:server7" op monitor interval=1min
crm(live)configure# commit
crm(live)configure# primitive vip ocf:heartbeat:IP
IPaddr IPaddr2 IPsrcaddr
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.23.100 cicrm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.23.100 cidr_netmask=24 op monitor interval=30s
crm(live)configure# primitive mfsdata ocf:heartbeat:Filesystem params device=/dev/sda1 directory=/var/lib/mfs fstype=ext4 op monitor interval=1min
crm(live)configure# primitive mfsmaster lsb:moosefs-master op monitor interval=30s
crm(live)configure# group mfsgroup vip mfsdata mfsmaster
crm(live)configure# commit
WARNING: mfsdata: default timeout 20s for start is smaller than the advised 60
WARNING: mfsdata: default timeout 20s for stop is smaller than the advised 60
WARNING: mfsdata: default timeout 20s for monitor is smaller than the advised 40
crm(live)configure# bye
测试:
注意:在其他所有节点解析vip
172.25.23.100 mfsmater
[root@server4 cluster]# vim /etc/hosts
[root@server4 cluster]# crm_mon
Connection to the CIB terminated
当standby将fence所在主机停掉,另外一台接替工作
[root@server7 ~]# mkdir /etc/cluster
[root@server7 ~]#
[root@server7 ~]# vim /etc/hosts
[root@server7 ~]# crm node standby
[root@server7 ~]# crm node online
[root@server7 ~]# crm node standby
[root@server7 ~]# crm node online