欢迎投稿

今日深度:

Hadooop 主机配置,

Hadooop 主机配置,


hadoop3.2.2 主机配置 根据官方文档 下

  • core-default.xml
  • hdfs-default.xml
  • hdfs-rbf-default.xml
  • mapred-default.xml
  • yarn-default.xml

 

  配置主机名

修改hadoop-3.2.2/etc/hadoop/workers  

#localhost
master
 

 2. hdfs-site.xml

<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <!--  SecondaryNameNode web ui 访问端口  -->
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>master:9868</value>
    </property>
    <!-- NameNode web ui 访问端口   -->
    <property>
        <name>dfs.namenode.http-address</name>
        <value>master:9870</value>
    </property>
</configuration>

3.yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
    <!-- 开启日志   -->
    <property>
        <name>yarn.log-aggregation-enable</name>
        <value>true</value>
    </property>
    <!-- 日志保留时间(7天:7*24*60*60)   -->
    <property>
        <name>yarn.nodemanager.log.retain-seconds</name>
        <value>604800</value>
    </property>
    <!-- 修改日志目录   -->
    <property>
        <name>yarn.nodemanager.remote-app-log-dir</name>
        <value>/logs</value>
    </property>

    <!--  配置yarn的主机  -->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
    </property>
    <!--  配置yarn的web端口  -->
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>master:8088</value>
    </property>
    <!--  配置yarn的通信端口  -->
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>master:8032</value>
    </property>
    <!--  配置yarn的代理 安全进程,防止集群被攻击 -->
    <property>
        <name>yarn.web-proxy.address</name>
        <value>master:8000</value>
    </property>
</configuration>

4.mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.application.classpath</name>
        <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
    </property>
    <!--  开启uber模式 (针对小作业的优化)  -->
    <property>
        <name>mapreduce.job.ubertask.enable</name>
        <value>true</value>
    </property>
    <!-- 启动uber模式的最大map数   -->
    <property>
        <name>mapreduce.job.ubertask.maxmaps</name>
        <value>9</value>
    </property>
    <!--  启动uber模式的最大reduce数  -->
    <property>
        <name>mapreduce.job.ubertask.maxreduces</name>
        <value>1</value>
    </property>
    <!-- jobHistory与yarn的通信端口 -->
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>master:10020</value>
    </property>
</configuration>

5. core-site.xml

<configuration>
    <!--   namenode 访问端口 -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://master:8020</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/opt/hadoop-3.2.2/data/tmp/hadoop-${user.name}</value>
    </property>
</configuration>

启动集群

[root@master hadoop-3.2.2]# sbin/start-dfs.sh 
[root@master hadoop-3.2.2]# sbin/start-yarn.sh 
[root@master hadoop-3.2.2]# mapred --daemon start historyserver
[root@master hadoop-3.2.2]# yarn --daemon start proxyserver
[root@master hadoop-3.2.2]# jps
30321 SecondaryNameNode
29970 NameNode
30099 DataNode
32339 JobHistoryServer
32756 WebAppProxyServer
32820 Jps
31686 ResourceManager
31818 NodeManager

7个进程  配置完主机 集群启动成功

www.htsjk.Com true http://www.htsjk.com/Hadoop/44208.html NewsArticle Hadooop 主机配置, hadoop3.2.2 主机配置 根据官方文档 下 core-default.xml hdfs-default.xml hdfs-rbf-default.xml mapred-default.xml yarn-default.xml     配置主机名 修改hadoop-3.2.2/etc/hadoop/workers   #localhostma...
评论暂时关闭