linux常用软件安装配置及基础命令(全)

linux安装笔记
home/soft —所有软件安装包
usr/local/soft —所有软件安装的路径
常用命令————————————————————-
网络ip配置:

source /etc/profile—-配置文件即使生效
echo ‘hello word’>>1.txt—-向1.txt文本中写入数据
ifconfig—-查看ip地址
ifconfig eth0 192.168.15.16—-临时修改ip地址
vi /etc/sysconfig/network-scripts/ifcfg-node1
主机名:

hostname—-查看主机名
hostname 主机名—-临时设置主机名,主机重启后主机名会变回去 测试的时候 : logout
vim /etc/sysconfig/network—-永久设置主机名
主机到ip的映射:
vi /etc/hosts
免密码登陆:
cd ~/.ssh
生成公私钥对:
ssh-keygen -t rsa
id_rsa id_rsa.pub
测试免密码:ssh node16
将id_rsa.pub中内容写入到node16的认证文件中
authorized_keys
测试免密码:ssh node16,此时就不需输入密码了
man : 对命令的介绍
–help :命令的帮助
pwd:打印当前目录
whoami: 查看当前用户
mdir : make directories 创建文件
mkdir -p : 需要时创建目标目录的上层目录,但即使这些目录已存在也不当作错误处理
rm -r :递归删除目录及其内容
-f :强制删除。忽略不存在的文件,不提示确认
mv : 重命名,剪切 move (rename) files
cp : copy files and directories
cd :
ls -a :不隐藏任何以. 开始的项目
-l :使用较长格式列出信息
ll
more: 分页查看
head : 默认查看前10行数据
head -5 :查看前5行数据
tail : 默认查看文件的后10行数据
tail -5
cat : 查看文件的内容
vi / vim
————————-
i (insert): 启用编辑模式
esc :结束编辑模式
: : 启用底部命令行
wq : 保存退出
q! : 不保存退出
G :跳到文件的最后一行
gg : 跳到文件的第一行
dd : 删除光标所在行
ndd : 删除光标所在的向下n行
d1G : 删除光标所在行到第一行的所有数据
dG : 删除光标所在到最后一行的所有数据
d : d 0 : y y : n y y : n y 1 G y G y 0 y 复制光标所在的那个字符到该行行尾的所有数据
p :将已复制的数据在光标下一行粘贴上
P :则为贴在光标的上一行
:/word :输入查找的字符
n :向下查找
N :向上查找
删除当前行后面所有的行:
,$-1d
删除第一行到当前行:
1,.d

文件权限
-------------------------------
drwxr-xr-x -rw-r--r--
 r: 4
 w: 2
 x: 1
 d:文件夹
 -: 文件
 rwx : read write x 7
 r-x : 组中的其他人有 read - x 5
 r-x :不在同一组中的其他人 read - x 5

 r : 4 w :2 x :1
 7 : rwx
 6 : rw-
 5 : r-x
 4 : r--
# useradd : 增加一个用户
 默认增加一个用户组,组名与用户名一致
# passwd :给用户设置密码
# su :切换用户
 root -> 普通用户, 不用输入密码
  普通用户 -> 任何用户,系统会提示输入密码
 su 不指定切换到那个用户,默认切换到超级用户
# chmod : 修改文件的权限
# chgrp : 修改文件所在的组
# chown : 修改文件的所有者

# usermode -g 组名 用户名 :修改用户所在的组

crontab -e ------修改配置文件启动定时任务
    --------*/1****/home/b.sh(文件名)  --(1分钟)


JAVAEE环境配置
jdk
----------------
rz:上传下载的小软件
yum install -y lrzsz
rz 

tar -zxvf jdk-8u151-linux-x64.tar.gz -C /usr/local/soft


配置环境变量 
 >检查是否配置
  echo $JAVA_HOME
  which java
  javac    ------测试命令
 > vi /etc/profile (~/.bash_profile)  --------修改的配置文件
 export JAVA_HOME=/usr/local/jdk
 export PATH=$JAVA_HOME/bin:$PATH
 > source ~/.ba


 tomcat
------------------------
tar -zxvf apatch-tomcat -C /usr/local/
mv
配置环境变量:
vi /etc/profile
export TOMCAT_HOME=/usr/local/soft/tomcat-1.7
export $TOMCAT_HOME/bin:$PATH
测试 
启动tomcat
Using CATALINA_BASE:   /usr/local/soft/tomcat-7.0
Using CATALINA_HOME:   /usr/local/soft/tomcat-7.0
Using CATALINA_TMPDIR: /usr/local/soft/tomcat-7.0/temp
Using JRE_HOME:        /usr/local/soft/jdk-1.8
Using CLASSPATH:       /usr/local/soft/tomcat-7.0/bin/bootstrap.jar:/usr/local/soft/tomcat-7.0/bin/tomcat-juli.jar
Tomcat started.
jps
2301 Bootstrap

查看8080端口:
ps  -ef|grep tomcat
netstat –nltp 8080

vim /etc/selinux/config
 SELINUX=disabled
防火墙开启8080端口:
#编辑防火墙配置文件 
# vi /etc/sysconfig/iptables
  -A INPUT -p tcp -m tcp --dport 8080 -j ACCEPT:放到第一位(行)
  重启防火墙
  #service iptables restart
  #/etc/init.d/iptables restart
linux系统重启的时候,tomcat自启 动
#echo "source /etc/profile" >>/etc/rc.d/rc.local
#echo "/usr/local/tomcat/bin/startup.sh" >>/etc/rc.d/rc.local

同一台服务器上安装多个tomcat

<Server port="8006" shutdown="SHUTDOWN">
  <Listener className="org.apache.catalina.startup.VersionLoggerListener" />

  <Connector port="8081" protocol="HTTP/1.1"
               connectionTimeout="20000"
               redirectPort="8443" />


<Connector port="8010" protocol="AJP/1.3" redirectPort="8443" />

mysql 5.1

rpm -qa | grep -i mysql;
yum -y remove mysql-libs*;


yum list | grep mysql
yum install -y mysql-server mysql mysql-devel 
service mysqld start
mysqladmin -u root password 'root'



>yum list installed | grep mysql
 采用在线安装
yum search mysql
yum还是rpm安装
查看mysql在线安装的路径
find / -name mysql*
yum/rpm把文件拆分为3个目录
/etc :存放软件的配置文件
/var :可变部份
/usr : 主体
mysql安装好以后,默认没有密码:
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION;
delete from user where host !='%'
service mysqld stop
service mysqld start
查看mysql的编码格式:
#SHOW VARIABLES LIKE 'character%';
vi /etc/my.cnf
mysql
    [mysqld]
    character-set-server=utf8
    datadir=/var/lib/mysql
    socket=/var/lib/mysql/mysql.sock
    user=mysql
    # Disabling symbolic-links is recommended to prevent assorted security risks
    symbolic-links=0
    [mysqld_safe]
    log-error=/var/log/mysqld.log
    pid-file=/var/run/mysqld/mysqld.pid
    [client]
    default-character-set=utf8
    [mysql]
    default-character-set=utf8
vi /etc/sysconfig/iptables
-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT

chkconfig mysqld on

mysql 5.6

1.新开的云服务器,需要检测系统是否自带安装mysql

# yum list installed | grep mysql
2.如果发现有系统自带mysql,果断这么干

# yum -y remove mysql-libs.x86_64
3.随便在你存放文件的目录下执行,这里解释一下,由于这个mysql的yum源服务器在国外,所以下载速度会比较慢,还好mysql5.6只有79M大,而mysql5.7就有182M了,所以这是我不想安装mysql5.7的原因

# wget http://repo.mysql.com/mysql-community-release-el6-5.noarch.rpm
4.接着执行这句,解释一下,这个rpm还不是mysql的安装文件,只是两个yum源文件,执行后,在/etc/yum.repos.d/ 这个目录下多出mysql-community-source.repo和mysql-community.repo

# rpm -ivh mysql-community-release-el6-5.noarch.rpm
5.这个时候,可以用yum repolist mysql这个命令查看一下是否已经有mysql可安装文件

#yum repolist all | grep mysql
6.安装mysql 服务器命令(一路yes):

# yum install mysql-community-server -y
7.安装成功后

# service mysqld start
8.mysql安装好以后,默认没有密码:
删除所有用户 :
delete from user ;
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION;
service mysqld restart ;

查看mysql默认编码格式:
SHOW VARIABLES LIKE 'character%';
设置mysql的编码
vim /etc/my.cnf
[mysqld]
character_set_server=utf8

导入数据到linux / mysql

# mysql > source /home/bank.sql


9.查看mysql是否自启动,并且设置开启自启动命令
# chkconfig --list | grep mysqld
# chkconfig mysqld on
10.mysql安全设置(系统会一路问你几个问题,看不懂复制之后翻译,基本上一路yes):

# mysql_secure_installation

mysql的主从复制
—————————————
master
[mysqld]
server-id=1
log-bin=master-bin
log-bin-index=master-bin.index
—————————————-
slave
[mysqld]
server-id=2
relay-log-index=slave-relay-bin.index
relay-log=slave-relay-bin

> vim /etc/my.cnf
create user repl  ;创建用户
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.15.%' IDENTIFIED BY 'mysql';
[mysqld]
server-id=1
log-bin=master-bin
log-bin-index=master-bin.index

service mysqld restart ;重启master
show master status ;查看master的状态

server-id=2
relay-log-index=slave-relay-bin.index
relay-log=slave-relay-bin 


change master to master_host='192.168.15.50', 
master_port=3306,
master_user='repl',
master_password='mysql', 
master_log_file='master-bin.000001',
master_log_pos=0; //复制主机的全部

start slave ;开启slave stop slave;关闭

show slave status\G ;查看slave的状态
--------------------------------------------------

1.安装flume
1.配置环境变量: FLEME_HOME=usr/local/soft/flume
PATH= F L E M E H O M E / b i n : PATH
2.安装完成之后先修改flume-env.sh.template配置文件里的jdk路径
3.1写配置文件:1.conf
me the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = netcat        //数据源
a1.sources.r1.bind = 192.168.16.2    //输出地址
a1.sources.r1.port = 44444           //端口号

# Describe the sink
a1.sinks.k1.type = logger

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1  

3.2监测一个文本文件 2.conf
exec source + memory channel + logger sink
# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = exec
#监控文件
a1.sources.r1.command = tail -F /home/root/1.log //监控的文件路径名字
a1.sources.r1.shell=/bin/sh -c
# Describe the sink
a1.sinks.k1.type = logger

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

3.3监控日志文件,输出结果到HDFS上 //需求 把hadoop上的三个架包拷贝到flume中的lib文件中
exec source + memory channel + hdfs sink

# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = exec
#监控文件
a1.sources.r1.command = tail -F /home/root/1.txt         //监控的文件路径名字
a1.sources.r1.shell=/bin/sh -c
# Describe the sink

a1.sinks.k1.type = hdfs
#解决乱码问题
a1.sinks.k1.hdfs.fileType=DataStream
a1.sinks.k1.hdfs.path = hdfs://192.168.15.11:9000/flume/webdata11/          //显示的路径
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

注意:需要的架包:
 异常:java.lang.NoClassDefFoundError: org/apache/hadoop/io/SequenceFile$CompressionType 
    缺少HDFS依赖库,从hadoop下拷贝相应的jar包到flume的lib下:
    /usr/local/hadoop/share/hadoop/common/lib/*.jar
    /usr/local/hadoop/share/hadoop/common/*.jar
    /usr/local/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.6.0.ja

3.4 avro sink source
a1.sources = r1
a1.sinks = k1
a1.channels = c1
a1.sources.r1.type = exec
a1.sources.r1.shell=/bin/bash -c
a1.sources.r1.command=tail -F /home/root/9.log //监控的文件
a1.sinks.k1.type = avro //输出的目的地
a1.sinks.k1.hostname = 192.168.15.12 //输出的网络地址
a1.sinks.k1.port = 4141 //端口号
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#再启动
bin/flume-ng agent –conf conf –conf-file conf/3.conf –name a1 -Dflume.root.logger=INFO,console


a2.sources = r1
a2.sinks = k1
a2.channels = c1
a2.sources.r1.type = avro
a2.sources.r1.bind = 0.0.0.0
a2.sources.r1.port = 4141
a2.sinks.k1.type = logger
a2.channels.c1.type = memory
# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1
#要先启动监听
bin/flume-ng agent --conf conf --conf-file conf/2.conf --name a1 -Dflume.root.logger=INFO,console  

--------------------------------
a2.sources = r1
a2.sinks = k1
a2.channels = c1
a2.sources.r1.type = avro
a2.sources.r1.bind = 0.0.0.0
a2.sources.r1.port = 4141
a2.sinks.k1.type = hdfs
#解决乱码问题
a2.sinks.k1.hdfs.fileType=DataStream
a2.sinks.k1.hdfs.path = hdfs://192.168.15.11:9000/flume/webdata21/
a2.channels.c1.type = memory
# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1

需求 : flume整合spark streaming push
# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = node11
a1.sources.r1.port = 44444

a1.sinks.k1.type = avro
a1.sinks.k1.hostname  = node11
a1.sinks.k1.port = 41414

# Use a channel which buffers events in memory
a1.channels.c1.type = memory

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

bin/flume-ng agent --conf conf --conf-file conf/s1.conf --name a1 -Dflume.root.logger=INFO,console

需求: 

# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = spark
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = node11
a1.sources.r1.port = 44444

a1.sinks.spark.type = org.apache.spark.streaming.flume.sink.SparkSink
a1.sinks.spark.hostname  = node11
a1.sinks.spark.port = 41414

# Use a channel which buffers events in memory
a1.channels.c1.type = memory

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.spark.channel = c1


yum install nc.x86_64 -y

解决方法:把spark-streaming-flume-sink_2.11-2.2.0.jar复制到flume的lib目录。(使用不同版本的scala和spark请放对应的jar)

大数据-zookeeper
--------------------
vim conf/zoo.cfg
dataDir=/usr/local/zookeeper/tmp/zookeeper

4.启动flume
bin/flume-ng agent –conf conf \
–conf-file conf/1.conf \ //启动的文件路径
–name a1 \
-Dflume.root.logger=INFO,console //启动flume 输出到控制台

测试的时候 : 如果没有telnet先安装 telnet
yum install telnet -y

    大数据-kafka
    ---------------------------------
    配置kafa:
    vim config/server.properties
    broker.id=0
    #listeners=PLAINTEXT://:9092
    log.dirs=/usr/local/kafka/kafka-logs
    zookeeper.connect=localhost:2181

    启动kafka服务:
    bin/kafka-server-start.sh config/server.properties

    后台启动kafka服务:
    bin/kafka-server-start.sh -daemon config/server.properties


    #查看当前创建的主题
    bin/kafka-topics.sh --list --zookeeper localhost:2181

    #构建一个主题
    bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

    #开启一个生产者发送一些消息
    bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

    #开启一个消费者
    bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning

    #开启集群的主题
    bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 1 --topic my-replicated-topic

    #描述集群的主题
    bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-replicated-topic

    #创建一个集群主题的生产者
    bin/kafka-console-producer.sh --broker-list localhost:9092 localhost:9093 localhost:9094 --topic my-replicated-topic

    #开启一个消费者
    bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic my-replicated-topic --from-beginning




    flume 集成 kafa

    启动flume:
    bin/flume-ng agent --conf conf --conf-file conf/3.conf --name a1 -Dflume.root.logger=INFO,console

    # example.conf: A single-node Flume configuration
    # Name the components on this agent
    a1.sources = r1
    a1.sinks = k1
    a1.channels = c1
    # Describe/configure the source
    a1.sources.r1.type = exec
    #监控文件
    a1.sources.r1.command = tail -F /home/root/1.log
    a1.sources.r1.shell=/bin/sh -c
    # Describe the sink
    a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
    a1.sinks.k1.kafka.bootstrap.servers=192.168.15.11:9092
    a1.sinks.k1.kafka.topic = test
    a1.sinks.k1.kafka.flumeBatchSize = 5
    a1.sinks.k1.kafka.producer.acks = 1
    # Use a channel which buffers events in memory
    a1.channels.c1.type = memory
    a1.channels.c1.capacity = 1000
    a1.channels.c1.transactionCapacity = 100

    # Bind the source and sink to the channel
    a1.sources.r1.channels = c1
    a1.sinks.k1.channel = c1

    开启flume
    bin/flume-ng agent --conf conf \
    --conf-file conf/3.conf \
    --name a1 \
    -Dflume.root.logger=INFO,console

猜你喜欢

转载自blog.csdn.net/weixin_41518795/article/details/81907960