1.免密码(自己也配)

#ssh-keygen -t rsa
#cd /root/.ssh/
#scp id_rsa.pub root@ip:/
-----------
在另一台机子上
#cat id_rsa.pub >> /etc/.ssh/auth..

2./etc/hosts

 主节点映射是 内网地址
 子节点映射是 外网地址
 ip name
 111.1.1.1 vdata1

3.建立目录
所有节点 mkdir -p /opt/workspace/hadoop/tmp
4.配置文件
阿里云要开放端口!!!!!!!!

6.4.1 修改hadoop-env.sh  mapred-env.sh配置文件,添加JAVA_HOME
		6.4.2 修改yarn-env.sh配置文件,添加JAVA_HOME   ---/opt/soft/jdk1.7
		6.4.3 修改slaves配置文件,添加相关从节点的主机名
		6.4.4 修改core-site.xml配置文件,添加如下内容
			 <property>
				<name>fs.defaultFS</name>
				<value>hdfs://vdata1:9000</value>
			</property>
			<property>
				<name>hadoop.tmp.dir</name>
				<value>/opt/workspace/hadoop/tmp</value>
			</property> 
		6.4.5 修改hdfs-site.xml配置文件,添加如下内容
			<property>		  
				<name>dfs.namenode.secondary.http-address</name>
				<value>vdata1:9001</value>
			 </property>

			 <property>
				<name>dfs.replication</name>
				<value>3</value>
			 </property>
			
			 <property>
				<name>dfs.webhdfs.enabled</name>
				<value>true</value>
			 </property>
			 <property>
				<name>dfs.namenode.name.dir</name>
				<value>file:/opt/workspace/hadoop/namenode/name</value>
			 </property>
			
			 <property>
				<name>dfs.datanode.data.dir</name>
				<value>file:/opt/workspace/hadoop/datanode/data</value>
			  </property>
			

		6.4.6 修改 mapred-site.xml配置文件
			<property>			   
				<name>mapreduce.framework.name</name>
			   	<value>yarn</value>
			 </property>


			 <property>
			 	<name>mapreduce.jobhistory.address</name>
			 	<value>vdata1:10020</value>
			 </property>
			 <property>
			 	<name>mapreduce.jobhistory.webapp.address</name>
			 	<value>vdata1:19888</value>
			 </property>

		6.4.7 修改yarn-site.xml配置文件
			<property>
			   	<name>yarn.nodemanager.aux-services</name>
			   	<value>mapreduce_shuffle</value>
			 </property>
			 <property>
			   	<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
			   	<value>org.apache.hadoop.mapred.ShuffleHandler</value>
			  </property>
			 <property>
			   	<name>yarn.resourcemanager.address</name>
			 	 <value>vdata1:8032</value>
			  </property>
			  <property>
			   	<name>yarn.resourcemanager.scheduler.address</name>
			   	<value>vdta1:8030</value>
			  </property>
			  <property>
			   	<name>yarn.resourcemanager.resource-tracker.address</name>
			   	<value>vdata1:8035</value>
			  </property>
			  <property>
			   	<name>yarn.resourcemanager.admin.address</name>
			   	<value>vdata1:8033</value>
			  </property>
			  <property>
			   	<name>yarn.resourcemanager.webapp.address</name>
			   	<value>vdata1:8088</value>
			  </property>

5.发送配置文件到结点
scp * root@vdata3:/opt/soft/hadoop-2.6.0-cdh5.5.0/etc/hadoop/
6.配置环境变量
java hadoop

#vim /etc/profile
JAVA_HOME=/opt/soft/jdk1.7.0_79
HADOOP_HOME=/opt/soft/hadoop-2.6.0-cdh5.5.0    ---不能有空格,重启
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export JAVA_HOME PATH HADOOP_HOME

#source /etc/profile

7.格式化
保证 /opt/workspace/hadoop/namenode/hdfs/data和name 空一致
保证 /opt/workspace/hadoop/tmp 清空

hdfs namenode -format

8.启动
start-all.sh
jps
hadoop fs -ls /