start-dfs.sh脚本是如何启动NameNode节点?
执行脚本链路: start-dfs.sh -> hdfs-config.sh -> hadoop-config.sh -> hadoop-daemons.sh -> slave.sh -> hadoop-daemon.sh -> hadoop-env.sh -> hdfs
- start-dfs.sh 脚本入口
- hdfs-config.sh - hadoop-config.sh - hadoop-env.sh 变量配置与环境配置
- hadoop-daemons.sh - hadoop-daemon.sh 守护进程
- hdfs 真正执行启动 NameNode 脚本
简化层次: 脚本入口 -> 配置各环境变量 -> 守护进程 -> 真正执行
start-dfs.sh 脚本详细执行顺序
真实启动NameNode语句
代码语言:bash复制.../hadoop-2.6.5/bin/hdfs
----
...
# 设置JAVA启动类路径CP
---运行时内容
export 'CLASSPATH=/opt/bigdata/hadoop-2.6.5/etc/hadoop
:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/*
:/opt/bigdata/hadoop-2.6.5/contrib/capacity-scheduler/*.jar'
---
export CLASSPATH=$CLASSPATH
...
# Check to see if we should start a secure datanode
if [ "$starting_secure_dn" = "true" ]; then
...
# 启动 NameNode
---运行时内容
exec /usr/java/default/bin/java
-Dproc_namenode -Xmx1000m
-Djava.net.preferIPv4Stack=true
-Dhadoop.log.dir=/opt/bigdata/hadoop-2.6.5/logs
-Dhadoop.log.file=hadoop.log
-Dhadoop.home.dir=/opt/bigdata/hadoop-2.6.5
-Dhadoop.id.str=root
-Dhadoop.root.logger=INFO,console
-Djava.library.path=/opt/bigdata/hadoop-2.6.5/lib/native
-Dhadoop.policy.file=hadoop-policy.xml
-Djava.net.preferIPv4Stack=true
-Dhadoop.security.logger=INFO,RFAS
-Dhdfs.audit.logger=INFO,NullAppender
-Dhadoop.security.logger=INFO,NullAppender
org.apache.hadoop.hdfs.server.namenode.NameNode
---
exec "$JSVC"
-Dproc_$COMMAND -outfile "$JSVC_OUTFILE"
-errfile "$JSVC_ERRFILE"
-pidfile "$HADOOP_SECURE_DN_PID"
-nodetach
-user "$HADOOP_SECURE_DN_USER"
-cp "$CLASSPATH"
$JAVA_HEAP_MAX $HADOOP_OPTS
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter "$@"
...
代码语言:bash复制# 所以想直接执行NameNode
# 先设置类路径
export 'CLASSPATH=/opt/bigdata/hadoop-2.6.5/etc/hadoop
:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/common/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/hdfs/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/yarn/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/lib/*
:/opt/bigdata/hadoop-2.6.5/share/hadoop/mapreduce/*
:/opt/bigdata/hadoop-2.6.5/contrib/capacity-scheduler/*.jar'
# 再执行JAVA
exec /usr/java/default/bin/java
-Dproc_namenode -Xmx1000m
-Djava.net.preferIPv4Stack=true
-Dhadoop.log.dir=/opt/bigdata/hadoop-2.6.5/logs
-Dhadoop.log.file=hadoop.log
-Dhadoop.home.dir=/opt/bigdata/hadoop-2.6.5
-Dhadoop.id.str=root
-Dhadoop.root.logger=INFO,console
-Djava.library.path=/opt/bigdata/hadoop-2.6.5/lib/native
-Dhadoop.policy.file=hadoop-policy.xml
-Djava.net.preferIPv4Stack=true
-Dhadoop.security.logger=INFO,RFAS
-Dhdfs.audit.logger=INFO,NullAppender
-Dhadoop.security.logger=INFO,NullAppender
org.apache.hadoop.hdfs.server.namenode.NameNode