# 初始化namenode
$ ./bin/hdfs namenode -format
# 初始化好namenode后,hadoop会自动建好对应hdfs-site.xml的namenode配置的文件路径
$ ll /data/hdfs/name/current/
total 24
drwxrwxr-x 2 yunyu yunyu 4096 Sep 10 18:07 ./
drwxrwxr-x 3 yunyu yunyu 4096 Sep 10 18:07 ../
-rw-rw-r-- 1 yunyu yunyu 352 Sep 10 18:07 fsimage_0000000000000000000
-rw-rw-r-- 1 yunyu yunyu 62 Sep 10 18:07 fsimage_0000000000000000000.md5
-rw-rw-r-- 1 yunyu yunyu 2 Sep 10 18:07 seen_txid
-rw-rw-r-- 1 yunyu yunyu 202 Sep 10 18:07 VERSION
# 启动hdfs服务
$ ./sbin/start-dfs.sh
Starting namenodes on [hadoop1]
hadoop1: starting namenode, logging to /data/hadoop-2.7.1/logs/hadoop-yunyu-namenode-ubuntu.out
hadoop2: starting datanode, logging to /data/hadoop-2.7.1/logs/hadoop-yunyu-datanode-ubuntu.out
hadoop3: starting datanode, logging to /data/hadoop-2.7.1/logs/hadoop-yunyu-datanode-ubuntu.out
Starting secondary namenodes [hadoop1]
hadoop1: starting secondarynamenode, logging to /data/hadoop-2.7.1/logs/hadoop-yunyu-secondarynamenode-ubuntu.out
# 使用jps检查启动的服务,可以看到NameNode和SecondaryNameNode已经启动
$ jps
20379 SecondaryNameNode
20570 Jps
20106 NameNode
# 这时候在Hadoop2和Hadoop3节点上使用jps查看,DataNode已经启动
$ jps
16392 Jps
16024 DataNode
# 在Hadoop2和Hadoop3节点上,也会自动建好对应hdfs-site.xml的datanode配置的文件路径
$ ll /data/hdfs/data/current/
total 16
drwxrwxr-x 3 yunyu yunyu 4096 Sep 10 18:10 ./
drwx------ 3 yunyu yunyu 4096 Sep 10 18:10 ../
drwx------ 4 yunyu yunyu 4096 Sep 10 18:10 BP-1965589257-127.0.1.1-1473502067891/
-rw-rw-r-- 1 yunyu yunyu 229 Sep 10 18:10 VERSION
# 启动yarn服务
$ ./sbin/start-yarn.sh
starting yarn daemons
starting resourcemanager, logging to /data/hadoop-2.7.1/logs/yarn-yunyu-resourcemanager-ubuntu.out
hadoop3: starting nodemanager, logging to /data/hadoop-2.7.1/logs/yarn-yunyu-nodemanager-ubuntu.out
hadoop2: starting nodemanager, logging to /data/hadoop-2.7.1/logs/yarn-yunyu-nodemanager-ubuntu.out
# 使用jps检查启动的服务,可以看到ResourceManager已经启动
$ jps
21653 Jps
20379 SecondaryNameNode
20106 NameNode
21310 ResourceManager
# 这时候在Hadoop2和Hadoop3节点上使用jps查看,NodeManager已经启动
$ jps
16946 NodeManager
17235 Jps
16024 DataNode
# 启动jobhistory服务,默认jobhistory在使用start-all.sh是不启动的,所以即使使用start-all.sh也要手动启动jobhistory服务
$ ./sbin/mr-jobhistory-daemon.sh start historyserver
starting historyserver, logging to /data/hadoop-2.7.1/logs/mapred-yunyu-historyserver-ubuntu.out
# 使用jps检查启动的服务,可以看到JobHistoryServer已经启动
$ jps
21937 Jps
20379 SecondaryNameNode
20106 NameNode
21863 JobHistoryServer
21310 ResourceManager