Shell script to quickly start Hadoop HA cluster

1. Edit allzkServer.sh

vi allzkServer.sh

#!/bin/bash

case $1 in
"start"){
    
    
  for i in slave1 slave2 slave3;
  do
    echo "*********$i zkServer start*********"
    ssh $i "source /etc/profile;/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/zkServer.sh start";
  done
};;
"stop"){
    
    
  for i in slave1 slave2 slave3;
  do
    echo "*********$i zkServer stop*********"
    ssh $i "source /etc/profile;/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/zkServer.sh stop";
  done
};;
esac

Put allzkServer.sh under ZOOKEEPER_HOME/bin of slave1 node

2. Edit alljournal.sh

vi alljournal.sh

#!/bin/bash

case $1 in
"start"){
    
    

  echo "*********slave1 journalnode start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode

  for i in slave2 slave3;
  do
    echo "*********$i journalnode start*********"
    ssh $i "source /etc/profile;/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start journalnode";
  done
};;

"stop"){
    
    

  echo "*********slave1 journalnode stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop journalnode

  for i in slave2 slave3;
  do
    echo "*********$i journalnode stop*********"
    ssh $i "source /etc/profile;/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop journalnode";
  done
};;
esac

Put alljournal.sh in the HADOOP_HOME/bin directory of slave1 node

3. Edit allhdfs.sh

vi allhdfs.sh

#!/bin/bash

case $1 in
"start"){
    
    

  echo "*********slave zkServer start*********"
  ssh slave1 "/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/allzkServer.sh start";

  echo "*********slave journalnode start*********"
  ssh slave1 "/usr/Software/Hadoop/hadoop-2.7.3/bin/alljournal.sh start";

  echo "*********master1 namenode start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode

  echo "*********master1 zkfc start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc

  echo "*********master1 yarn start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/start-yarn.sh

  echo "*********slave datanode start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemons.sh start datanode

  echo "*********master2 namenode-bootstrapStandby start*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/bin/hdfs namenode -bootstrapStandby";

  echo "*********master2 namenode start*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start namenode";

  echo "*********master2 zkfc start*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh start zkfc";

  echo "*********master2 resourcemanager start*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/yarn-daemon.sh start resourcemanager";

  echo "*********master1 MRhistory start*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/mr-jobhistory-daemon.sh start historyserver

};;

"stop"){
    
    
  
  echo "*********master1 MRhistory stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/mr-jobhistory-daemon.sh stop historyserver

  echo "*********master2 resourcemanager stop*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/yarn-daemon.sh stop resourcemanager";

  echo "*********master2 zkfc stop*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop zkfc";

  echo "*********master2 namenode stop*********"
  ssh master2 "/usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop namenode";

  echo "*********slave datanode stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemons.sh stop datanode

  echo "*********master1 yarn stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/stop-yarn.sh

  echo "*********master1 zkfc stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop zkfc

  echo "*********master1 namenode stop*********"
  /usr/Software/Hadoop/hadoop-2.7.3/sbin/hadoop-daemon.sh stop namenode

  echo "*********slave journalnode stop*********"
  ssh slave1 "/usr/Software/Hadoop/hadoop-2.7.3/bin/alljournal.sh stop";

  echo "*********slave zkServer stop*********"
  ssh slave1 "/usr/Software/ZooKeeper/zookeeper-3.4.13/bin/allzkServer.sh stop";

};;

esac

Put allhdfs.sh in the HADOOP_HOME/bin directory of the master1 node

4. Start the cluster

The above script is placed in the bin directory mainly because it is equipped with corresponding environment variables, which can be lazy

//在master1中启动

allhdfs.sh start  //启动所有进程,包括MR历史任务进程

allhdfs.sh stop  //关闭集群

Previous: Building Hadoop HA cluster

Guess you like

Origin blog.csdn.net/qq_42599616/article/details/107030387