hadoop,
exec 6&-关闭exec 6<>/dev/tcp/ip.xx.port读写
exec 6<&-
exec 9<>/dev/tcp/ip.xx.port
echo -ne "GET /index.html HTTP/1.1/r/n" >&9
echo -ne "HOST:www.baidu.com/r/n" >&9
echo 9<&-
echo -e "DEVICE="eth0"\nONBOOT=yes\nNETBOOT=yes\nIPV6INIT=no\nBOOTPROTO=none\nTYPE=Ethernet\nIPADDR=192.168.1.11\nPREFIX=24\nGATEWAY=192.168.1.254">/etc/sysconfig/network-scripts/ifcfg-eth0
安装
yum install java-1.8.0-openjdk
yum -y install java-1.8.0-openjdk-devel
jps
主机名字ping /etc/hosts
拷贝scp hadoop-2.7.6.tar.gz 192.168.1.1:/usr/local/hadoop
修改/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64/jre
修改/usr/local/hadoop/etc/hadoop/hadoop-env.sh
验证./bin/hadoop version 在/usr/local/hadoop
/usr/local/hadoop/etc/hadoop路径
jar xx.jar
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.6.jar wordcount oo xx
xml格式vim core-site.xml
<property>
<name></name>
<value></value>
</property>
<description></description>
vim /root/.ssh/known_hosts
core-site.xml
http://hadoop.apache.org/Documentation/Release 2.7.6/Configuration
网站有默认配置文件http://hadoop.apache.org/docs/r2.7.6/
http://hadoop.apache.org/docs/r2.7.6/hadoop-project-dist/hadoop-common/core-default.xml
192.168.1.1
u1 NameNode
SecondaryNameNode
ResourceManager HDFS
YARN
192.168.1.2
u3 DataNode
NodeManager HDFS
YARN
192.168.1.3
u3 DataNode
NodeManager HDFS
YARN
192.168.1.4
u4 DataNode
NodeManager HDFS
YARN
vim /etc/ssh/ssh_config
GSSAPIAuthentication no
StrictHostKeyChecking no
/usr/local/hadoop/etc/hadoop 添加vim slaves主机名
hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.http-address</name>
<value>u1:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>u1:50090</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
所有机器scp -r /usr/local/hadoop/ 192.168.1.4:/usr/local/
./bin/hdfs u1 -format
./sbin/start-dfs.sh
jps
./sbin/stop-dfs.sh
rm -rf /usr/local/hadoop/logs
/usr/local/dfsadmin -report
firewalld
systemctl stop firewalld
yum remove
rpm -qa | grep firewalld
iptables
vim mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>h1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
rsync -aSH --delete /usr/
./sbin/start-yarn.sh
./bin/yarn node -list
验证
1.1:50070
1.1:50090
1.1:50075
1.2:8088
1.2:8042
for i in {2..4}; do scp yarn-site.xml 192.168.1.$i:/usr/local/hadoop/etc/hadoop/; done
./sbin/start-yarn.sh
./bin/yarn node -list
./bin/hdfs dfsadmin -report
./bin/hadoop fs -put/get
准备1.5 网关 1.6 客户端
关闭selinux卸载firewalld
nfsgw /etc/hosts
name
uid gid ===
adduser -g 100 nsd1802
adduser -u 1000 -g 100 nsd1802
停止集群服务 ./sbin/stop-all.sh
/usr/local/hadoop/etc/hadoop
vim core-site.xml
hadoop.proxyuser.nsd1802.groups *
hadoop.proxyuser.nsd1802.hosts *
启动./sbin/start-dfs.sh
配置网关
echo -e "DEVICE="eth0"\nONBOOT=yes\nNETBOOT=yes\nIPV6INIT=no\nBOOTPROTO=none\nTYPE=Ethernet\nIPADDR=192.168.1.5\nPREFIX=24\nGATEWAY=192.168.1.254">/etc/sysconfig/network-scripts/ifcfg-eth0
cat /etc/hosts
adduser -u 1000 -g 100 nsd1802
yum -y install java-1.8.0-openjdk-devel
setfacl -m user:nsd1802:rwx logs
scp /etc/yum.repos.d/dvd.repo 192.168.1.6:/etc/yum.repos.d/
scp -r /usr/local/hadoop/ 192.168.1.5:/usr/local/
yum remove -y rpcbind nfs-utils
参数
vim hdfs-site.xml
mkdir /var/nfstemp
chown nsd1802:users /var/nfstemp
<property>
<name>nfs.exports.allowed.hosts</name>
<value>* rw</value>
</property>
<property>
<name>nfs.rtmax</name>
<value>4194304</value>
</property>
<property>
<name>nfs.wtmax</name>
<value>1048576</value>
</property>
<property>
<name>nfs.port.monitoring.disabled</name>
<value>false</value>
</property>
<property>
<name>nfs.dump.dir</name>
<value>/var/nfstemp</value>
</property>
<property>
<name>dfs.namenode.accesstime.precision</name>
<value>3600000</value>
</property>
./sbin/stop-all.sh
./sbin/start-dfs.sh
./sbin/hadoop-daemon.sh --script ./bin/hdfs start portmap
jps
su -l nsd1802
cd /usr/local/hadoop
./sbin/hadoop-daemon.sh --script ./bin/hdfs start nfs3
jps
mount -t nfs -o vers=3,proto=tcp,nolock,noacl,noatime,sync 192.168.1.5:/ /mount/
git clone https://github.com/MrZhangzhg/nsd2018.git
安装openjdk
配置/etc/hosts
拷贝/usr/local/hadoop/复制
slavers添加节点
ssh免密码登陆
cd /root/.ssh/
ssh-copy-id -i id_rsa.pub newnode
启动./sbin/hadoop-daemon.sh start datanode
设置带宽./bin/hdfs dfsadmin -setBalancerBandwidth 67108864
同步数据./sbin/start-balancer.sh -threshold 5
验证 ./bin/hdfs dfsadmin -report
配置hdfs-site.xml增加
<property>
<name>dfs.hosts.exclude</name>
<value>/usr/local/hadoop/etc/hadoop/exclude</value>
</property>
touch /usr/local/hadoop/exclude
server.1=u2:2888:3888
server.2=u3:2888:3888
server.3=u4:2888:3888
server.4=u1:2888:3888:observer
mkdir /tmp/zookeeper
cd /tmp/zookeeper
echo 4 > myid
/usr/local/zookeeper-3.4.10/bin/zkServer.sh startus
cd /usr/local/kafka/config
vim server.properties
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
./bin/kafka-topics.sh --create --partitions 2 --replication-
factor 2 --zookeeper localhost:2181 --topic nsd1802
./bin/kafka-topics.sh --list --zookeeper localhost:2181
/bin/kafka-topics.sh --describe --zookeeper localhost:2181 --
topic nsd1802
目录
hadoop-env.sh
slaves u2 u3 u4
配置文件core-site.xml
hdfd-site.xml
yarn-site.xml
mapred-site.xml
usr/local/
core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node2:2181,node3:2181,node4:2181</value>
</property>
#######################################################
ALL 同步配置 所有机器配置
NN1 初始化ZK集群./bin/hdfs zkfc -formatZK
NODE启动服务 ./sbin/hadoop-daemon.sh start journalnode
successfully format
NN1 格式化 ./bin/hdfs namenode –format
NN2 同步数据本地 ./var/hadoop/dfs
NN1 初始化JNS ./bin/hdfs namenode –initializeSharedEdits
NODE 停止服务./sbin/hadoop-daemon.sh stop journalnode
############################################################
NN1: ./sbin/start-all.sh
NN2: ./sbin/yarn-daemon.sh start resourcemanager
bin/hdfs haadmin -getServiceState nn1
bin/hdfs haadmin -getServiceState nn2
bin/yarn rmadmin -getServiceState nn1
bin/yarn rmadmin -getServiceState nn2
./bin/hadoop dfsadmin –report
./bin/yarn node -list
./bin/hadoop fs -ls hdfs:/
本站文章为和通数据库网友分享或者投稿,欢迎任何形式的转载,但请务必注明出处.
同时文章内容如有侵犯了您的权益,请联系QQ:970679559,我们会在尽快处理。