Foreword
Thanks to my concerned friends, I nor he collected most of the script finishing.
Basic Edition
Mass script XSync (xsync)
#!/bin/bash
#1 获取参数,如果不够直接退出
pcount=$#
if ((pcount==0)); then
echo 没有输入需要传递的文件或文件夹!!;
exit;
fi
#2 获取文件名称
p1=$1
fname=`basename $p1`
echo fname=$fname
#3 获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
echo pdir=$pdir
#4 获取当前用户名称
user=`whoami`
#5 循环遍历工作主机 执行分发命令
for host in hadoop102 hadoop103 hadoop104
do
echo ------------------- $host --------------
rsync -av $pdir/$fname $user@$host:$pdir
done
Group Control command script XCall (xcall)
#!/bin/bash
#接收命令
params=$@
#验证参数
if(($#==0))
then
echo 请传入要执行的命令!
exit;
fi
echo "要执行的命令是:$params"
for((i=102 ;i<=104 ;i=$i+1 ));
do
echo ==========hadoop$i $params==========
ssh hadoop$i "source /etc/profile;$params"
done
Rallied journalNode
#!/bin/bash
for i in hadoop102 hadoop103 hadoop104
do
echo "================ $i ================"
#根据自己的zkServer.sh所在位置适当修改
ssh $i 'source /etc/profile && /opt/module/hadoop-2.7.2/sbin/hadoop-daemon.sh start journalnode'
done
Rallied ZK (zkstart)
#!/bin/bash
for i in hadoop102 hadoop103 hadoop104
do
echo "================ $i ================"
#根据自己的zkServer.sh所在位置适当修改
ssh $i 'source /etc/profile && /opt/module/zookeeper-3.4.10/bin/zkServer.sh start'
done
Group off zk (zkstop)
#!/bin/bash
for i in hadoop102 hadoop103 hadoop104
do
echo "================ $i ================"
#根据自己的zkServer.sh所在位置适当修改
ssh $i 'source /etc/profile && /opt/module/zookeeper-3.4.10/bin/zkServer.sh stop'
done
Hadoop rallied zk and related processes (zhstart)
#!/bin/bash
echo "================ 开始启动所有节点服务 ==========="
echo "================ 正在启动Zookeeper ==========="
for i in hadoop102 hadoop103 hadoop104
do
ssh $i 'source /etc/profile && /opt/module/zookeeper-3.4.10/bin/zkServer.sh start'
done
echo "================ 正在启动HDFS ==========="
#根据自身情况修改用户名
ssh zhengkw@hadoop102 '/opt/module/hadoop-2.7.2/sbin/start-dfs.sh'
echo "================ 正在启动YARN ==========="
ssh zhengkw@hadoop103 '/opt/module/hadoop-2.7.2/sbin/start-yarn.sh'
echo "================ 正在开启JobHistoryServer ==========="
ssh zhengkw@hadoop103 '/opt/module/hadoop-2.7.2/sbin/mr-jobhistory-daemon.sh start historyserver'
Hadoop group off zk and related processes (zhstop)
#!/bin/bash
echo "================ 开始关闭所有节点服务 ==========="
echo "================ 正在关闭Zookeeper ==========="
for i in hadoop102 hadoop103 hadoop104
do
ssh $i 'source /etc/profile && /opt/module/zookeeper-3.4.10/bin/zkServer.sh stop'
done
echo "================ 正在关闭HDFS ==========="
ssh zhengkw@hadoop102 '/opt/module/hadoop-2.7.2/sbin/stop-dfs.sh'
echo "================ 正在关闭YARN ==========="
ssh zhengkw@hadoop103 '/opt/module/hadoop-2.7.2/sbin/stop-yarn.sh'
echo "================ 正在关闭JobHistoryServer ==========="
ssh zhengkw@hadoop103 '/opt/module/hadoop-2.7.2/sbin/mr-jobhistory-daemon.sh stop historyserver'
Rallied kafka script kstart
#!/bin/bash
for i in hadoop102 hadoop103 hadoop104
do
echo "================ $i ================"
#根据自己的kafka所在位置适当修改
ssh $i 'source /etc/profile && /opt/module/kafka/bin/kafka-server-start.sh -daemon /opt/module/kafka/config/server.properties'
done
Group off kafka script kstop
#!/bin/bash
for i in hadoop102 hadoop103 hadoop104
do
echo "================ $i ================"
#根据自己的kafka所在位置适当修改
ssh $i 'source /etc/profile && /opt/module/kafka/bin/kafka-server-stop.sh'
done
pay attention
The official kafka service shutdown scripts may no longer work, put a link here to solve!
note
- After creating all the scripts configure ssh-free secret service, may reference point me! here! ! Ssh inside free tight configuration.
- Given the script execution permissions!
- Modify the user name or host name based on its own configuration! ! ! !
- Appropriate to distribute the script / home / username / bin / under
Premium!
zk.sh
#!/bin/bash
#脚本只接受start或stop或status三个参数中任意一个
#参数数量校验
if(($#!=1))
then
echo 脚本只接受start或stop或status三个参数中任意一个
exit;
fi
#对参数的内容进行校验,通过后,执行启动或停止或查看状态的命令
if [ $1 = start ] || [ $1 = stop ] || [ $1 = status ]
then
xcall /opt/module/zookeeper-3.4.10/bin/zkServer.sh $1
else
echo 脚本只接受start或stop或status三个参数中任意一个
fi
hd-yar.sh
#!/bin/bash
#提供对hadoop集群的一键启动和停止,只接受start或stop参数中的一个
#判断参数的个数
if(($#!=1))
then
echo 请输入start或stop参数中的任意一个
exit;
fi
#校验参数内容
if [ $1 = start ] || [ $1 = stop ]
then
$1-dfs.sh
#根据RM配置启动
ssh hadoop103 $1-yarn.sh
#高可用单启RM
ssh hadoop104 yarn-daemon.sh start resourcemanager
#根据历史所在配置启动历史服务
ssh hadoop103 mr-jobhistory-daemon.sh $1 historyserver
xcall jps
else
echo 请输入start或stop参数中的任意一个
fi