diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml
index 9c4c912e..ccf4fba7 100644
--- a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-env.xml
@@ -80,126 +80,128 @@
yarn-env template
This is the freemarker template for yarn-env.sh file
${JAVA_LIBRARY_PATH}#noparse>:${hadoop_java_io_tmpdir}"
- <#noparse>
- # User for YARN daemons
- export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-
- # resolve links - $0 may be a softlink
- export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/etc/hadoop}"
-
- # some Java parameters
- # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
- if [ "$JAVA_HOME" != "" ]; then
- #echo "run java in $JAVA_HOME"
- JAVA_HOME=$JAVA_HOME
- fi
-
- if [ "$JAVA_HOME" = "" ]; then
- echo "Error: JAVA_HOME is not set."
- exit 1
- fi
-
- JAVA=$JAVA_HOME/bin/java
- JAVA_HEAP_MAX=-Xmx1000m
- #noparse>
- # For setting YARN specific HEAP sizes please use this
- # Parameter and set appropriately
- YARN_HEAPSIZE=${yarn_heapsize}
-
- <#noparse>
- # check envvars which might override default args
- if [ "$YARN_HEAPSIZE" != "" ]; then
- JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
- fi
- #noparse>
-
- # Resource Manager specific parameters
-
- # Specify the max Heapsize for the ResourceManager using a numerical value
- # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
- # the value to 1000.
- # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
- # and/or YARN_RESOURCEMANAGER_OPTS.
- # If not specified, the default value will be picked from either YARN_HEAPMAX
- # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
- export YARN_RESOURCEMANAGER_HEAPSIZE=${resourcemanager_heapsize}
-
- # Specify the JVM options to be used when starting the ResourceManager.
- # These options will be appended to the options specified as HADOOP_OPTS
- # and therefore may override any similar flags set in HADOOP_OPTS
- #export YARN_RESOURCEMANAGER_OPTS=
-
- # Node Manager specific parameters
-
- # Specify the max Heapsize for the NodeManager using a numerical value
- # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
- # the value to 1000.
- # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
- # and/or YARN_NODEMANAGER_OPTS.
- # If not specified, the default value will be picked from either YARN_HEAPMAX
- # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
- export YARN_NODEMANAGER_HEAPSIZE=${nodemanager_heapsize}
-
- # Specify the max Heapsize for the HistoryManager using a numerical value
- # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
- # the value to 1024.
- # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
- # and/or YARN_HISTORYSERVER_OPTS.
- # If not specified, the default value will be picked from either YARN_HEAPMAX
- # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
- export YARN_HISTORYSERVER_HEAPSIZE=${apptimelineserver_heapsize}
-
- # Specify the JVM options to be used when starting the NodeManager.
- # These options will be appended to the options specified as HADOOP_OPTS
- # and therefore may override any similar flags set in HADOOP_OPTS
- #export YARN_NODEMANAGER_OPTS=
-
- # so that filenames w/ spaces are handled correctly in loops below
- IFS=
-
- <#noparse>
- # default log directory and file
- if [ "$HADOOP_LOG_DIR" = "" ]; then
- HADOOP_LOG_DIR="$HADOOP_YARN_HOME/logs"
- fi
- if [ "$HADOOP_LOGFILE" = "" ]; then
- HADOOP_LOGFILE='yarn.log'
- fi
-
- # default policy file for service-level authorization
- if [ "$YARN_POLICYFILE" = "" ]; then
- YARN_POLICYFILE="hadoop-policy.xml"
- fi
-
- # restore ordinary behaviour
- unset IFS
-
-
- HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.dir=$HADOOP_LOG_DIR"
- HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.file=$HADOOP_LOGFILE"
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.id.str=$HADOOP_IDENT_STRING"
- HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
- export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
- export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
- if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
- HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
- fi
- HADOOP_OPTS="$HADOOP_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
- #noparse>
- HADOOP_OPTS="$HADOOP_OPTS -Djava.io.tmpdir=${hadoop_java_io_tmpdir}"
- HADOOP_OPTS="$HADOOP_OPTS --add-opens java.base/java.lang=ALL-UNNAMED"
+export HADOOP_HOME=${hadoop_home}
+export HADOOP_YARN_HOME=${hadoop_yarn_home}
+export HADOOP_MAPRED_HOME=${hadoop_mapred_home}
+USER="$(whoami)"
+export HADOOP_LOG_DIR=${yarn_log_dir_prefix}/$USER
+export HADOOP_PID_DIR=${yarn_pid_dir_prefix}/$USER
+export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir}
+export JAVA_HOME=${java_home}
+export JAVA_LIBRARY_PATH="<#noparse>${JAVA_LIBRARY_PATH}#noparse>:${hadoop_java_io_tmpdir}"
+<#noparse>
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+
+# resolve links - $0 may be a softlink
+export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/etc/hadoop}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+#echo "run java in $JAVA_HOME"
+JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+echo "Error: JAVA_HOME is not set."
+exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+#noparse>
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE=${yarn_heapsize}
+
+<#noparse>
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+#noparse>
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE=${resourcemanager_heapsize}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE=${nodemanager_heapsize}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE=${apptimelineserver_heapsize}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+<#noparse>
+# default log directory and file
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+HADOOP_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$HADOOP_LOGFILE" = "" ]; then
+HADOOP_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.dir=$HADOOP_LOG_DIR"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.file=$HADOOP_LOGFILE"
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME"
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.id.str=$HADOOP_IDENT_STRING"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+HADOOP_OPTS="$HADOOP_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+#noparse>
+HADOOP_OPTS="$HADOOP_OPTS -Djava.io.tmpdir=${hadoop_java_io_tmpdir}"
+HADOOP_OPTS="$HADOOP_OPTS --add-opens java.base/java.lang=ALL-UNNAMED"
]]>
diff --git a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml
index c985c873..55f1f2cd 100644
--- a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml
+++ b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/yarn/configuration/yarn-log4j.xml
@@ -35,65 +35,65 @@
yarn-log4j template
Custom log4j.properties
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}#noparse>
- hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
- log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
- # Set the ResourceManager summary log filename
- yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
- # Set the ResourceManager summary log level and appender
- <#noparse>yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}#noparse>
- #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+<#noparse>hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}#noparse>
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+<#noparse>yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}#noparse>
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
- # To enable AppSummaryLogging for the RM,
- # set yarn.server.resourcemanager.appsummary.logger to
- # LEVEL,RMSUMMARY in hadoop-env.sh
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
- # Appender for ResourceManager Application Summary Log
- # Requires the following properties to be set
- # - hadoop.log.dir (Hadoop Log directory)
- # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
- # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
- log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
- <#noparse>log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}#noparse>
- log4j.appender.RMSUMMARY.MaxFileSize=${yarn_rm_summary_log_max_backup_size}MB
- log4j.appender.RMSUMMARY.MaxBackupIndex=${yarn_rm_summary_log_number_of_backup_files}
- log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
- log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
- log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
- log4j.appender.JSA.DatePattern=.yyyy-MM-dd
- log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
- <#noparse>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}#noparse>
- <#noparse>log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false#noparse>
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+# - hadoop.log.dir (Hadoop Log directory)
+# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+<#noparse>log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}#noparse>
+log4j.appender.RMSUMMARY.MaxFileSize=${yarn_rm_summary_log_max_backup_size}MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=${yarn_rm_summary_log_number_of_backup_files}
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+<#noparse>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}#noparse>
+<#noparse>log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false#noparse>
- # Audit logging for ResourceManager
- <#noparse>rm.audit.logger=${hadoop.root.logger}#noparse>
- <#noparse>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}#noparse>
- log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
- log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
- <#noparse>log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log#noparse>
- log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for ResourceManager
+<#noparse>rm.audit.logger=${hadoop.root.logger}#noparse>
+<#noparse>log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}#noparse>
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+<#noparse>log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log#noparse>
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
- # Audit logging for NodeManager
- <#noparse>nm.audit.logger=${hadoop.root.logger}#noparse>
- <#noparse>log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}#noparse>
- log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
- log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
- <#noparse>log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log#noparse>
- log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for NodeManager
+<#noparse>nm.audit.logger=${hadoop.root.logger}#noparse>
+<#noparse>log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}#noparse>
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+<#noparse>log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log#noparse>
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
]]>
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
index 4d4aee23..a5afd93f 100644
--- a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hdfs/HdfsParams.java
@@ -62,10 +62,6 @@ public HdfsParams(CommandPayload commandPayload) {
globalParamsMap.put("hadoop_hdfs_home", hdfsHome());
globalParamsMap.put("hadoop_conf_dir", confDir());
globalParamsMap.put("hadoop_libexec_dir", serviceHome() + "/libexec");
- List namenodeList = LocalSettings.hosts("namenode");
- if (!namenodeList.isEmpty()) {
- coreSite().put("fs.defaultFS", MessageFormat.format("hdfs://{0}:8020", namenodeList.get(0)));
- }
}
public String hdfsLimits() {
@@ -85,7 +81,12 @@ public Map hdfsLog4j() {
@GlobalParams
public Map coreSite() {
- return LocalSettings.configurations(serviceName(), "core-site");
+ Map coreSite = LocalSettings.configurations(serviceName(), "core-site");
+ List namenodeList = LocalSettings.hosts("namenode");
+ if (!namenodeList.isEmpty()) {
+ coreSite.put("fs.defaultFS", MessageFormat.format("hdfs://{0}:8020", namenodeList.get(0)));
+ }
+ return coreSite;
}
@GlobalParams
@@ -96,6 +97,18 @@ public Map hadoopPolicy() {
@GlobalParams
public Map hdfsSite() {
Map hdfsSite = LocalSettings.configurations(serviceName(), "hdfs-site");
+ List namenodeList = LocalSettings.hosts("namenode");
+ if (!namenodeList.isEmpty()) {
+ hdfsSite.put("dfs.namenode.rpc-address", MessageFormat.format("{0}:8020", namenodeList.get(0)));
+ hdfsSite.put("dfs.namenode.http-address", MessageFormat.format("{0}:50070", namenodeList.get(0)));
+ hdfsSite.put("dfs.namenode.https-address", MessageFormat.format("{0}:50470", namenodeList.get(0)));
+ }
+ List snamenodeList = LocalSettings.hosts("secondary_namenode");
+ if (!snamenodeList.isEmpty()) {
+ hdfsSite.put("dfs.namenode.secondary.http-address", MessageFormat.format("{0}:50090", snamenodeList.get(0)));
+ }
+
+
dfsDataDir = (String) hdfsSite.get("dfs.datanode.data.dir");
dfsNameNodeDir = (String) hdfsSite.get("dfs.namenode.name.dir");
nameNodeFormattedDirs = Arrays.stream(dfsNameNodeDir.split(","))
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java
index e3d5bc54..ae246d1a 100644
--- a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnParams.java
@@ -38,6 +38,8 @@ public class YarnParams extends BaseParams {
private String yarnPidDir = "/var/run/hadoop-yarn";
private String rmNodesExcludeDir = "/etc/hadoop/conf/yarn.exclude";
private String tmpDir = "/tmp/hadoop-yarn";
+ private String nodemanagerLogDir = "/hadoop/yarn/log";
+ private String nodemanagerLocalDir = "/hadoop/yarn/local";
/* pid file */
private String resourceManagerPidFile = yarnPidDir + "/yarn/hadoop-yarn-resourcemanager.pid";
private String nodeManagerPidFile = yarnPidDir + "/yarn/hadoop-yarn-nodemanager.pid";
@@ -52,10 +54,9 @@ public YarnParams(CommandPayload commandPayload) {
globalParamsMap.put("hadoop_home", serviceHome());
globalParamsMap.put("hadoop_hdfs_home", hdfsHome());
globalParamsMap.put("hadoop_yarn_home", yarnHome());
+ globalParamsMap.put("hadoop_mapred_home", mapredHome());
globalParamsMap.put("hadoop_conf_dir", confDir());
-
globalParamsMap.put("hadoop_libexec_dir", serviceHome() + "/libexec");
-
globalParamsMap.put("exclude_hosts", excludeHosts);
}
@@ -77,8 +78,19 @@ public Map yarnLog4j() {
@GlobalParams
public Map yarnSite() {
Map yarnSite = LocalSettings.configurations(serviceName(), "yarn-site");
- rmNodesExcludeDir = (String) yarnSite.get("yarn.resourcemanager.nodes.exclude-path");
+ List resourcemanagerList = LocalSettings.hosts("resourcemanager");
+ if (!resourcemanagerList.isEmpty()) {
+ yarnSite.put("yarn.resourcemanager.hostname", MessageFormat.format("{0}", resourcemanagerList.get(0)));
+ yarnSite.put("yarn.resourcemanager.resource-tracker.address", MessageFormat.format("{0}:8025", resourcemanagerList.get(0)));
+ yarnSite.put("yarn.resourcemanager.address", MessageFormat.format("{0}:8050", resourcemanagerList.get(0)));
+ yarnSite.put("yarn.resourcemanager.admin.address", MessageFormat.format("{0}:8141", resourcemanagerList.get(0)));
+ yarnSite.put("yarn.resourcemanager.webapp.address", MessageFormat.format("{0}:8088", resourcemanagerList.get(0)));
+ yarnSite.put("yarn.resourcemanager.webapp.https.address", MessageFormat.format("{0}:8090", resourcemanagerList.get(0)));
+ }
+ rmNodesExcludeDir = (String) yarnSite.get("yarn.resourcemanager.nodes.exclude-path");
+ nodemanagerLogDir = (String) yarnSite.get("yarn.nodemanager.log-dirs");
+ nodemanagerLocalDir = (String) yarnSite.get("yarn.nodemanager.local-dirs");
return yarnSite;
}
@@ -115,4 +127,8 @@ public String yarnExec() {
public String yarnHome() {
return stackLibDir() + "/hadoop-yarn";
}
+
+ public String mapredHome() {
+ return stackLibDir() + "/hadoop-mapreduce";
+ }
}
diff --git a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java
index 176a2bb4..78240567 100644
--- a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java
+++ b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/yarn/YarnSetup.java
@@ -54,9 +54,6 @@ public static ShellResult config(Params params, String componentName) {
if (StringUtils.isNotBlank(componentName)) {
switch (componentName) {
case "resourcemanager": {
- LinuxFileUtils.createDirectories(
- yarnParams.getTmpDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
-
LinuxFileUtils.toFileByTemplate(
yarnParams.excludeNodesContent(),
yarnParams.getRmNodesExcludeDir(),
@@ -65,6 +62,12 @@ public static ShellResult config(Params params, String componentName) {
Constants.PERMISSION_644,
yarnParams.getGlobalParamsMap());
}
+ case "nodemanager": {
+ LinuxFileUtils.createDirectories(
+ yarnParams.getNodemanagerLogDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+ LinuxFileUtils.createDirectories(
+ yarnParams.getNodemanagerLocalDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+ }
}
}
@@ -73,6 +76,8 @@ public static ShellResult config(Params params, String componentName) {
yarnParams.getYarnLogDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
LinuxFileUtils.createDirectories(
yarnParams.getYarnPidDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
+ LinuxFileUtils.createDirectories(
+ yarnParams.getTmpDir(), yarnUser, yarnGroup, Constants.PERMISSION_755, true);
// hdfs.limits
LinuxFileUtils.toFileByTemplate(