大数据平台搭建(3)

注意:因为博客中美元符号有特殊含义,所以将美元符号替换为&
i.配置hdfs-site.xml

<configuration>
   <property>
      <name>dfs.namenode.name.dir</name>
      <value>file:/usr/local/hadoop-2.7.3/dfs/name</value>
   </property>
   <property>
      <name>dfs.datanode.data.dir</name>
      <value>file:/usr/local/hadoop-2.7.3/dfs/data</value> 
   </property>
   <property>
      <name>dfs.replication</name>
      <value>3</value>
   </property>
   <property>
      <name>dfs.webhdfs.enabled</name>
      <value>true</value>
   </property>
   <property>
      <name>dfs.permissions</name>
      <value>false</value>
   </property>
   <property>
      <name>dfs.permissions.enabled</name>
      <value>false</value>
   </property>
     <property>
      <name>dfs.nameservices</name>    //dfs.nameservices命名空间的逻辑名称,如果使用HDFS Federation,可以配置多个命名空间的名称,使用逗号分开即可。
      <value>mycluster</value>     
   </property>
   <property>
      <name>dfs.ha.namenodes.mycluster</name>  //dfs.ha.namenodes.[nameservice ID] 命名空间中所有NameNode的唯一标示名称。可以配置多个,

        <value>nn1,nn2</value>                               //  使用逗号分隔。该名称是可以让DataNode知道每个集群的所有NameNode。当前,每个集群最多只能配置两个NameNode。
   </property>
   <property>
      <name>dfs.namenode.rpc-address.mycluster.nn1</name>    //dfs.namenode.rpc-address.[nameservice ID].[name node ID] 每个namenode监听的RPC地址
      <value>namenode01:9000</value>
   </property>
   <property>
      <name>dfs.namenode.rpc-address.mycluster.nn2</name>
      <value>namenode02:9000</value>
   </property>
   <property>
      <name>dfs.namenode.servicerpc-address.mycluster.nn1</name>
      <value>namenode01:53310</value>
   </property>
   <property>
      <name>dfs.namenode.servicerpc-address.mycluster.nn2</name>
      <value>namenode02:53310</value>
   </property>
   <property>
      <name>dfs.namenode.http-address.mycluster.nn1</name>   //dfs.namenode.http-address.[nameservice ID].[name node ID] 每个namenode监听的http地址。
      <value>namenode01:50070</value>
   </property>
   <property>
      <name>dfs.namenode.http-address.mycluster.nn2</name>
      <value>namenode02:50070</value>
   </property>
   <property>
      <name>dfs.namenode.shared.edits.dir</name>    
      <value>qjournal://192.168.0.25:8485;192.168.0.26:8485;192.168.0.27:8485/mycluster</value>

         //dfs.namenode.shared.edits.dir 这是NameNode读写JNs组的uri。通过这个uri,NameNodes可以读写edit log内容。URI的格 式"qjournal://host1:port1;host2:port2;host3:port3/journalId"。这里的host1、host2、host3指的是Journal Node的地址,这里必须是奇数个,至少3个;其中journalId是集群的唯一标识符,对于多个联邦命名空间,也使用同一个journalId。
   </property>
   <property>
      <name>dfs.client.failover.proxy.provider.mycluster</name>  //dfs.client.failover.proxy.provider.[nameservice ID] 这里配置HDFS客户端连接到Active NameNode
      <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
   </property>
   <property>
      <name>dfs.ha.fencing.methods</name> //dfs.ha.fencing.methods 配置active namenode出错时的处理类
      <value>sshfence</value>
   </property>
     <property>
      <name>dfs.ha.fencing.ssh.private-key-files</name>
      <value>/root/.ssh/id_rsa</value>
   </property>
   <property>
      <name>dfs.ha.fencing.ssh.connect-timeout</name>
      <value>30000</value>
   </property>
   <property>
      <name>dfs.journalnode.edits.dir</name> //dfs.journalnode.edits.dir 这是JournalNode进程保持逻辑状态的路径。
      <value>/usr/local/hadoop-2.7.3/tmp/journal</value>
   </property>
   <property>
      <name>dfs.ha.automatic-failover.enabled</name>
      <value>true</value>
   </property>
   <property>
      <name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
      <value>60000</value>
   </property>
   <property>
      <name>ipc.client.connect.timeout</name>
      <value>60000</value>
   </property>
   <property>
      <name>dfs.image.transfer.bandwidthPerSec</name>
      <value>4194304</value>
   </property>

</configuration>

j.配置mapred-site.xml

<configuration>
  <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
  </property>
</configuration>

k.配置yarn-site.xml

<configuration>
    <property>
        <name>yarn.resourcemanager.connect.retry-interval.ms</name>
        <value>2000</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1,rm2</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.id</name>
        <value>rm1</value> <!—注释, rm1上配置为rm1, rm2上配置rm2-->
    </property>
    <property>
        <name>yarn.resourcemanager.store.class</name>
        <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>
    <property>
        <name>yarn.resourcemanager.zk.state-store.address</name>
        <value>namenode01:2181</value>
    </property>
    <property>
        <name>ha.zookeeper.quorum</name>
        <value>namenode01:2181</value>
    </property>
    <property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
        <value>5000</value>
    </property>
    <!-- RM1 configs -->
    <property>
        <name>yarn.resourcemanager.address.rm1</name>
        <value>namenode01:23140</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm1</name>
        <value>namenode01:23130</value>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address.rm1</name>
        <value>namenode01:23188</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
        <value>namenode01:23125</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address.rm1</name>
        <value>namenode01:23141</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.admin.address.rm1</name>
        <value>namenode01:23142</value>
    </property>

    <!-- RM2 configs -->
    <property>
        <name>yarn.resourcemanager.address.rm2</name>
        <value>namenode02:23140</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address.rm2</name>
        <value>namenode02:23130</value>
    </property>
    <property>
    <name>yarn.resourcemanager.webapp.address.rm2</name>
    <value>namenode02:23188</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
        <value>namenode02:23125</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address.rm2</name>
        <value>namenode02:23141</value>
    </property>
    <property>
        <name>yarn.resourcemanager.ha.admin.address.rm2</name>
        <value>namenode02:23142</value>
    </property>
    <!-- Node Manager Configs -->
    <property>
        <description>Address where the localizer IPC is.</description>
        <name>yarn.nodemanager.localizer.address</name>
        <value>0.0.0.0:23344</value>
    </property>
    <property>
        <description>NM Webapp address.</description>
        <name>yarn.nodemanager.webapp.address</name>
        <value>0.0.0.0:23999</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
    <property>
        <name>yarn.nodemanager.local-dirs</name>
        <value>/usr/local/hadoop-2.7.3/tmp/yarn/local</value>
    </property>
    <property>
        <name>yarn.nodemanager.log-dirs</name>
        <value>/usr/local/hadoop-2.7.3/tmp/yarn/log</value>
    </property>
    <property>
        <name>mapreduce.shuffle.port</name>
        <value>23080</value>
    </property>
</configuration>

11.启动集群
a.namenode01节点执行如下命令,创建命名空间(hdfs zkfc -formatZK)
b.在每个节点用如下命令启日志程序(hadoop-daemon.sh start journalnode)
c.在主namenode01节点用hadoop namenode -format格式化namenode和journalnode目录(hadoop namenode -format mycluster)
d.在主namenode节点启动./sbin/hadoop-daemon.sh start namenode进程(hadoop-daemon.sh start namenode)
e.在备节点执行第一行命令,这个是把备namenode节点的目录格式化并把元数据从主namenode节点copy过来,并且这个命令不会把journalnode目录再格式化了!
然后用第二个命令启动备namenode进程!
hdfs namenode –bootstrapStandby
hadoop-daemon.sh start namenode
f.在两个namenode节点都执行以下命令(hadoop-daemon.sh start zkfc)
g.在所有datanode节点都执行以下命令启动datanode(hadoop-daemon.sh start datanode)

猜你喜欢

转载自blog.csdn.net/m0_37063257/article/details/78907825
今日推荐