设置systemd自启守护进程

systemd 自启动守护进程

zookeeper

vi /usr/lib/systemd/system/zookeeper.service

[Unit]
Description=zookeeper
After=network.target

[Service]
Type=forking
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
#PIDFile=/opt/zookeeper/data/zookeeper_server.pid
Restart=always
RestartSec=0s
ExecStart=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh start
ExecStop=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh stop
ExecReload=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh restart
#PrivateTmp=true

[Install]
WantedBy=multi-user.target

kafka

vi /usr/lib/systemd/system/kafka.service
[Unit]
Description=kafka
Requires=network.target  zookeeper.service
After=network.target  zookeeper.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
#TimeoutSec=120
#SuccessExitStatus=143
User=root
Group=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
#Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_181-cloudera/bin"
ExecStart=/opt/kafka_2.12-2.7.0/bin/kafka-server-start.sh -daemon /opt/kafka_2.12-2.7.0/config/server.properties
ExecStop=/opt/kafka_2.12-2.7.0/bin/kafka-server-stop.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target

hadoop

namenode

vi /usr/lib/systemd/system/namenode.service
[Unit]
Description=hadoop
After=network.target

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startnn.sh
ExecStop=/opt/start/stopnn.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startnn.sh
#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh start namenode
hdfs --daemon start namenode

vi /opt/start/stopnn.sh

#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh stop namenode
hdfs --daemon stop namenode

datanode

vi /usr/lib/systemd/system/datanode.service
[Unit]
Description=hadoop
After=network.target namenode.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startdn.sh
ExecStop=/opt/start/stopdn.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startdn.sh
#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh start namenode
hdfs --daemon start datanode

vi /opt/start/stopdn.sh

#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh stop namenode
hdfs --daemon stop datanode

resourcemanager

vi /usr/lib/systemd/system/resourcemanager.service
[Unit]
Description=hadoop
After=network.target datanode.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startrm.sh
ExecStop=/opt/start/stoprm.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startrm.sh
#!/bin/bash
source /etc/profile
yarn --daemon start resourcemanager

nodemanager同resourcemanager

yarn --daemon start nodemanager

proxyserver同resourcemanager

yarn --daemon start proxyserver

historyserver同resourcemanager

mapred --daemon start historyserver

spark

vi /usr/lib/systemd/system/spark.service
[Unit]
Description=spark
Requires=network.target
After=network.target

[Service]
#Restart=always
#RestartSec=1
Type=forking
#TimeoutSec=120
#SuccessExitStatus=143
User=root
Group=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
ExecStart=/opt/spark-2.3.1-bin-hadoop2.7/sbin/start-all.sh
ExecStop=/opt/spark-2.3.1-bin-hadoop2.7/sbin/stop-all.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target

flink

vi /usr/lib/systemd/system/flink.service
[Unit]
Description=flink-1.12.1 service
#After=syslog.target network.target zookeeper.service
After=network.target

[Service]
Restart=always
RestartSec=1
Type=forking
TimeoutSec=120

User=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera

ExecStart=/opt/flink-1.12.1/bin/start-cluster.sh
ExecStop=/opt/flink-1.12.1/bin/stop-cluster.sh

[Install]
WantedBy=multi-user.target

hive

vi /usr/lib/systemd/system/hive.service
[Unit]
Description=hive
After=network.target nodemanager.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/starthive.sh
ExecStop=/opt/start/stophive.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/starthive.sh
#!/bin/bash
source /etc/profile
nohup hive --service hiveserver2 > /opt/start/hive.log &
vi /opt/start/stophive.sh
#!/bin/bash
/opt/start/kill.sh hiveserver2
vi /opt/start/kill.sh
#!/bin/sh
#根据进程名杀死进程
if [ $# -lt 1 ]
then
  echo "缺少参数:procedure_name"
  exit 1
fi

PROCESS=`ps -ef|grep $1|grep -v grep|grep -v PPID|awk '{ print $2}'`
for i in $PROCESS
do
  echo "Kill the $1 process [ $i ]"
  kill -9 $i
done

metastore

vi /opt/start/startmetastore.sh
#!/bin/bash
source /etc/profile
nohup hive  --service metastore > /opt/start/hivemetastore.log &
vi /opt/start/stopmetastore.sh
#!/bin/bash
/opt/start/kill.sh metastore
vi /usr/lib/systemd/system/metastore.service
[Unit]
Description=hive
After=network.target nodemanager.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startmetastore.sh
ExecStop=/opt/start/stopmetastore.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target

猜你喜欢

转载自blog.csdn.net/xfp1007907124/article/details/129422991