欢迎投稿

今日深度:

hadoop,

hadoop,


[root@server1 ~]# useradd -u 800 hadoop
[root@server1 ~]# passwd hadoop
[root@server1 ~]# su - hadoop
[hadoop@server1 ~]$ ls
hadoop-2.7.3.tar.gz  jdk-7u79-linux-x64.tar.gz
[hadoop@server1 ~]$ tar zxf jdk-7u79-linux-x64.tar.gz 
[hadoop@server1 ~]$ ln -s jdk1.7.0_79/ java
[hadoop@server1 ~]$ tar zxf hadoop-2.7.3.tar.gz 
[hadoop@server1 ~]$ cd hadoop-2.7.3
[hadoop@server1 hadoop-2.7.3]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim hadoop-env.sh 
 25 export JAVA_HOME=/home/hadoop/java
[hadoop@server1 hadoop]$ cd
[hadoop@server1 ~]$ vim .bash_profile
 10 PATH=$PATH:$HOME/bin:~/java/bin
[hadoop@server1 ~]$ source .bash_profile 
[hadoop@server1 ~]$ cd hadoop-2.7.3
[hadoop@server1 hadoop-2.7.3]$ mkdir input
[hadoop@server1 hadoop-2.7.3]$ cp etc/hadoop/*.xml input
[hadoop@server1 hadoop-2.7.3]$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar grep input output 'dfs[a-z.]+'
[hadoop@server1 hadoop-2.7.3]$ cd output/
[hadoop@server1 output]$ ls
part-r-00000  _SUCCESS
[hadoop@server1 output]$ cat part-r-00000 
1   dfsadmin
[hadoop@server1 ~]$ ln -s hadoop-2.7.3 hadoop
[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim core-site.xml 
<property>
         <name>fs.defaultFS</name>
                 <value>hdfs://172.25.40.1:9000</value>
                         </property>

[hadoop@server1 hadoop]$ vim hdfs-site.xml 
<property>
         <name>dfs.replication</name>
                 <value>1</value>
                        </property>

[hadoop@server1 hadoop]$ vim slaves
172.25.40.1     #将localhost改为节点主机IP

生成密钥,做免密连接

[hadoop@server1 hadoop]$ ssh-keygen
[hadoop@server1 hadoop]$ cd
[hadoop@server1 ~]$ cd .ssh/
[hadoop@server1 .ssh]$ ls
id_rsa  id_rsa.pub
[hadoop@server1 .ssh]$ cp id_rsa.pub authorized_keys

格式化

[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ ls
bin  include  lib      LICENSE.txt  output      sbin
etc  input    libexec  NOTICE.txt   README.txt  share
[hadoop@server1 hadoop]$ bin/hdfs namenode -format


开启服务

[hadoop@server1 hadoop]$ sbin/start-dfs.sh
[hadoop@server1 hadoop]$ jps

网页访问http://172.25.40.1:50070

[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir /user
[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir /user/hadoop
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls
[hadoop@server1 hadoop]$ bin/hdfs dfs -put input/
[hadoop@server1 hadoop]$ bin/hdfs dfs -ls
Found 1 items
drwxr-xr-x   - hadoop supergroup          0 2018-08-26 16:51 input

分布式

[hadoop@server1 hadoop]$ logout
[root@server1 ~]# yum install -y rpcbind-0.2.0-11.el6.x86_64
[root@server1 ~]# /etc/init.d/rpc start
[root@server1 ~]# vim /etc/exports
    /home/hadoop    *(rw,anonuid=800,anongid=800
[root@server1 ~]# /etc/init.d/nfs start

[root@server1 ~]# showmount -e

在【server2】和【server3】上同时操作以下步骤,作为分布式节点

[root@server2 ~]# yum install -y nfs-utils
[root@server2 ~]# /etc/init.d/rpcbind start

需要建立与【server1】相同的用户

[root@server2 ~]# useradd -u 800 hadoop
[root@server2 ~]# id hadoop
uid=800(hadoop) gid=800(hadoop) groups=800(hadoop)

挂载到【server1】上

[root@server2 ~]# mount 172.25.40.1:/home/hadoop/ /home/hadoop/
[root@server2 ~]# ll -d /home/hadoop/
drwx------ 5 hadoop hadoop 4096 Aug 26 16:57 /home/hadoop/

登陆hadoop用户时可以看到server1在这个用户里的文件

[root@server2 ~]# su - hadoop
[hadoop@server2 ~]$ ls
hadoop        hadoop-2.7.3.tar.gz  jdk1.7.0_79
hadoop-2.7.3  java                 jdk-7u79-linux-x64.tar.gz

在【server1】上

[root@server1 ~]# su - hadoop
[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim hdfs-site.xml 

[hadoop@server1 hadoop]$ vim slaves
[hadoop@server1 hadoop]$ cat slaves
172.25.40.2
172.25.40.3
[hadoop@server1 hadoop]$ cd /tmp/
[hadoop@server1 tmp]$ ls
[hadoop@server1 tmp]$ rm -fr *
[hadoop@server1 ~]$ cd hadoop
[hadoop@server1 hadoop]$ bin/hdfs namenode -format
[hadoop@server1 hadoop]$ sbin/start-dfs.sh 

[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir /user
[hadoop@server1 hadoop]$ bin/hdfs dfs -mkdir /user/hadoop
[hadoop@server1 hadoop]$ bin/hdfs dfs -put input
[hadoop@server1 hadoop]$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount input output

[hadoop@server1 hadoop]$ bin/hdfs dfs -ls output/
Found 2 items
-rw-r--r--   2 hadoop supergroup          0 2018-08-26 17:02 output/_SUCCESS
-rw-r--r--   2 hadoop supergroup       9984 2018-08-26 17:02 output/part-r-00000
[hadoop@server1 hadoop]$ rm -fr output/

[hadoop@server1 hadoop]$ bin/hdfs dfs -cat output/*

[hadoop@server1 hadoop]$ bin/hdfs dfs -get output
[hadoop@server1 hadoop]$ cd output/
[hadoop@server1 output]$ ls
[hadoop@server1 output]$ cat part-r-00000 

数据节点的添加和删除:
首先查看节点:

搭建server4:

[root@server4 ~]# yum install -y nfs-utils
[root@server4 ~]# /etc/init.d/rpcbind start
[root@server4 ~]# useradd -u 800 hadoop
[root@server4 ~]# id hadoop
uid=800(hadoop) gid=800(hadoop) groups=800(hadoop)
[root@server4 ~]# mount 172.25.40.1:/home/hadoop/ /home/hadoop/
[root@server4 ~]# df

[hadoop@server4 hadoop]$ vim slaves 
[hadoop@server4 hadoop]$ cat slaves 
172.25.40.2
172.25.40.3
172.25.40.4
[hadoop@server4 hadoop]$ cd
[hadoop@server4 ~]$ cd hadoop
[hadoop@server4 hadoop]$ sbin/hadoop-daemon.sh start datanode
[hadoop@server4 hadoop]$ jps

再次查看节点信息:点击overview

[hadoop@server1 output]$ cd ..
[hadoop@server1 hadoop]$ dd if=/dev/zero of=bigfile bs=1M count=300
300+0 records in
300+0 records out
314572800 bytes (315 MB) copied, 1.65197 s, 190 MB/s

[hadoop@server1 hadoop]$ bin/hdfs dfs -put bigfile
[hadoop@server1 hadoop]$ cd etc/hadoop/
[hadoop@server1 hadoop]$ vim slaves 
[hadoop@server1 hadoop]$ cat slaves
172.25.40.2
172.25.40.4
[hadoop@server1 hadoop]$ vim hosts-exclude
[hadoop@server1 hadoop]$ cat hosts-exclude
172.25.40.3

[hadoop@server1 hadoop]$ vim hdfs-site.xml 
<property>
        <name>dfs.hosts.exclude</name>
                <value>/home/hadoop/hadoop/etc/hadoop/hosts-exclude</value>
                </property>

[hadoop@server1 hadoop]$ ll /home/hadoop/hadoop/etc/hadoop/hosts-exclude 
-rw-rw-r-- 1 hadoop hadoop 12 Aug 26 19:08 /home/hadoop/hadoop/etc/hadoop/hosts-exclude
[hadoop@server1 hadoop]$ cd ..
[hadoop@server1 etc]$ cd ..
[hadoop@server1 hadoop]$ bin/hdfs dfsadmin -refreshNodes

[hadoop@server1 hadoop]$ bin/hdfs dfsadmin -report

[hadoop@server3 ~]$ jps
2119 DataNode
2264 Jps
[hadoop@server3 ~]$  cd hadoop
[hadoop@server3 hadoop]$ sbin/hadoop-daemon.sh stop datanode
stopping datanode

www.htsjk.Com true http://www.htsjk.com/Hadoop/36880.html NewsArticle hadoop, [root @server1 ~] # useradd -u 800 hadoop [root @server1 ~] # passwd hadoop [root @server1 ~] # su - hadoop [hadoop @server1 ~] $ lshadoop- 2.7 . 3 .tar.gz jdk- 7 u79-linux-x64.tar.gz[hadoop @server1 ~] $ tar zxf jdk- 7 u79-linux-...
相关文章
    暂无相关文章
评论暂时关闭