blob: 8d9fb7b090a79dc33db2b835765c1fda6a852f54 [file] [log] [blame]
{
"configuration_attributes": {
"mapred-site": {},
"sqoop-env": {},
"pig-env": {},
"ams-hbase-env": {},
"webhcat-env": {},
"kerberos-env": {},
"hive-log4j": {},
"hcat-env": {},
"tez-site": {},
"hdfs-site": {},
"ams-env": {},
"pig-properties": {},
"ams-site": {},
"ams-hbase-policy": {},
"hadoop-policy": {},
"hdfs-log4j": {},
"hbase-site": {},
"hive-site": {},
"hive-exec-log4j": {},
"mapred-env": {},
"ranger-hive-plugin-properties": {},
"ranger-hdfs-plugin-properties": {},
"zoo.cfg": {},
"tez-env": {},
"ranger-hbase-plugin-properties": {},
"oozie-log4j": {},
"hbase-policy": {},
"core-site": {},
"ams-hbase-site": {},
"yarn-env": {},
"ams-hbase-log4j": {},
"hadoop-env": {},
"zookeeper-log4j": {},
"yarn-site": {},
"hive-env": {},
"capacity-scheduler": {},
"hbase-log4j": {},
"oozie-site": {},
"oozie-env": {},
"webhcat-site": {},
"hbase-env": {},
"krb5-conf": {},
"yarn-log4j": {},
"hiveserver2-site": {},
"ams-log4j": {},
"zookeeper-env": {},
"pig-log4j": {},
"cluster-env": {}
},
"commandParams": {
"restart_type": "rolling_upgrade",
"service_package_folder": "common-services/HDFS/2.1.0.2.0/package",
"script": "scripts/journalnode.py",
"upgrade_direction": "upgrade",
"hooks_folder": "HDP/2.0.6/hooks",
"version": "2.2.1.0-2270",
"command_timeout": "1200",
"script_type": "PYTHON"
},
"roleCommand": "CUSTOM_COMMAND",
"kerberosCommandParams": [],
"clusterName": "c1",
"hostname": "c6406.ambari.apache.org",
"hostLevelParams": {
"jdk_location": "http://c6406.ambari.apache.org:8080/resources/",
"ambari_db_rca_password": "mapred",
"java_home": "/usr/jdk64/jdk1.7.0_67",
"java_version": "8",
"ambari_db_rca_url": "jdbc:postgresql://c6406.ambari.apache.org/ambarirca",
"jce_name": "UnlimitedJCEPolicyJDK7.zip",
"custom_command": "RESTART",
"oracle_jdbc_url": "http://c6406.ambari.apache.org:8080/resources//ojdbc6.jar",
"repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.2.1.0-2340\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]",
"group_list": "[\"hadoop\",\"users\",\"hdfs\"]",
"agentCacheDir": "/var/lib/ambari-agent/cache",
"stack_version": "2.2",
"stack_name": "HDP",
"db_name": "ambari",
"ambari_db_rca_driver": "org.postgresql.Driver",
"jdk_name": "jdk-7u67-linux-x64.tar.gz",
"ambari_db_rca_username": "mapred",
"db_driver_filename": "mysql-connector-java.jar",
"user_list": "[\"oozie\",\"hive\",\"mapred\",\"hbase\",\"ambari-qa\",\"zookeeper\",\"tez\",\"hdfs\",\"sqoop\",\"hcat\",\"yarn\",\"ams\"]",
"mysql_jdbc_url": "http://c6406.ambari.apache.org:8080/resources//mysql-connector-java.jar"
},
"commandType": "EXECUTION_COMMAND",
"roleParams": {
"component_category": "SLAVE"
},
"serviceName": "HDFS",
"role": "JOURNALNODE",
"forceRefreshConfigTags": [],
"taskId": 611,
"public_hostname": "c6406.ambari.apache.org",
"configurations": {
"mapred-site": {
"mapreduce.jobhistory.address": "c6407.ambari.apache.org:10020",
"mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"mapreduce.reduce.input.buffer.percent": "0.0",
"mapreduce.output.fileoutputformat.compress": "false",
"mapreduce.framework.name": "yarn",
"mapreduce.map.speculative": "false",
"mapreduce.reduce.shuffle.merge.percent": "0.66",
"yarn.app.mapreduce.am.resource.mb": "682",
"mapreduce.map.java.opts": "-Xmx546m",
"mapreduce.cluster.administrators": " hadoop",
"mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
"mapreduce.job.reduce.slowstart.completedmaps": "0.05",
"mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
"mapreduce.output.fileoutputformat.compress.type": "BLOCK",
"mapreduce.reduce.speculative": "false",
"mapreduce.reduce.java.opts": "-Xmx546m",
"mapreduce.am.max-attempts": "2",
"yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
"mapreduce.reduce.log.level": "INFO",
"mapreduce.map.sort.spill.percent": "0.7",
"mapreduce.job.emit-timeline-data": "false",
"mapreduce.task.io.sort.mb": "273",
"mapreduce.task.timeout": "300000",
"mapreduce.map.memory.mb": "682",
"mapreduce.task.io.sort.factor": "100",
"mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
"mapreduce.reduce.memory.mb": "682",
"mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM",
"yarn.app.mapreduce.am.log.level": "INFO",
"mapreduce.map.log.level": "INFO",
"mapreduce.shuffle.port": "13562",
"mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
"mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"mapreduce.map.output.compress": "false",
"yarn.app.mapreduce.am.staging-dir": "/user",
"mapreduce.reduce.shuffle.parallelcopies": "30",
"mapreduce.reduce.shuffle.input.buffer.percent": "0.7",
"mapreduce.jobhistory.webapp.address": "c6407.ambari.apache.org:19888",
"mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab",
"mapreduce.jobhistory.done-dir": "/mr-history/done",
"mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
"mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
"yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
"mapreduce.jobhistory.bind-host": "0.0.0.0",
"mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
},
"sqoop-env": {
"content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home}}}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
"sqoop_user": "sqoop"
},
"pig-env": {
"content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
},
"ams-hbase-env": {
"hbase_pid_dir": "/var/run/ambari-metrics-collector/",
"hbase_regionserver_xmn_max": "512m",
"hbase_regionserver_xmn_ratio": "0.2",
"hbase_user": "ams",
"hbase_master_heapsize": "1024m",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HBASE_HEAPSIZE={{hbase_heapsize}}\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}\n\n#Specify native libraries of installed Hadoop paltform\n#_HADOOP_IN_PATH=$(PATH=\"${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH\" which hadoop 2>/dev/null)\n#_HADOOP_CLASSPATH=\"/usr/lib/ams-hbase/lib/*\"\n#_HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH=\"$_HADOOP_CLASSPATH\" ${_HADOOP_IN_PATH} org.apache.hadoop.hbase.util.GetJavaProperty java.library.path)\n#use embedded native libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}\"\n\n#\"Unsetting\" HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/ \nexport HADOOP_HOME=`pwd`",
"hbase_regionserver_heapsize": "1024m",
"hbase_log_dir": "/var/log/ambari-metrics-collector"
},
"webhcat-env": {
"content": "\n# The file containing the running pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME={{hadoop_home}}"
},
"kerberos-env": {
"kdc_type": "mit-kdc",
"kdc_host": "c6406.ambari.apache.org",
"admin_server_host": "c6406.ambari.apache.org",
"ldap_url": "",
"ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}\n ",
"container_dn": ""
},
"hive-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\nhive.log.threshold=ALL\nhive.root.logger=INFO,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.log.file=hive.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files\n# for different CLI session.\n#\n# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,DRFA\nlog4j.category.Datastore=ERROR,DRFA\nlog4j.category.Datastore.Schema=ERROR,DRFA\nlog4j.category.JPOX.Datastore=ERROR,DRFA\nlog4j.category.JPOX.Plugin=ERROR,DRFA\nlog4j.category.JPOX.MetaData=ERROR,DRFA\nlog4j.category.JPOX.Query=ERROR,DRFA\nlog4j.category.JPOX.General=ERROR,DRFA\nlog4j.category.JPOX.Enhancer=ERROR,DRFA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA"
},
"hcat-env": {
"content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}"
},
"tez-site": {
"tez.task.get-task.sleep.interval-ms.max": "200",
"tez.task.max-events-per-heartbeat": "500",
"tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.runtime.compress": "true",
"tez.runtime.io.sort.mb": "272",
"tez.generate.debug.artifacts": "false",
"tez.am.log.level": "INFO",
"tez.counters.max.groups": "1000",
"tez.runtime.unordered.output.buffer.size-mb": "51",
"tez.shuffle-vertex-manager.max-src-fraction": "0.4",
"tez.counters.max": "2000",
"tez.task.resource.memory.mb": "682",
"tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
"tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
"tez.task.am.heartbeat.counter.interval-ms.max": "4000",
"tez.am.max.app.attempts": "2",
"tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"tez.am.container.idle.release-timeout-max.millis": "20000",
"tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"tez.am.container.idle.release-timeout-min.millis": "10000",
"tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
"tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
"tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
"tez.am.container.reuse.enabled": "true",
"tez.session.am.dag.submit.timeout.secs": "300",
"tez.grouping.min-size": "16777216",
"tez.grouping.max-size": "1073741824",
"tez.session.client.timeout.secs": "-1",
"tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
"tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.staging-dir": "/tmp/${user.name}/staging",
"tez.am.am-rm.heartbeat.interval-ms.max": "250",
"tez.am.maxtaskfailures.per.node": "10",
"tez.am.container.reuse.non-local-fallback.enabled": "false",
"tez.am.container.reuse.locality.delay-allocation-millis": "250",
"tez.am.container.reuse.rack-fallback.enabled": "true",
"tez.grouping.split-waves": "1.7",
"tez.shuffle-vertex-manager.min-src-fraction": "0.2",
"tez.am.resource.memory.mb": "1364"
},
"hdfs-site": {
"dfs.namenode.avoid.write.stale.datanode": "true",
"dfs.permissions.superusergroup": "hdfs",
"dfs.ha.namenodes.ha": "nn1,nn2",
"dfs.namenode.avoid.read.stale.datanode": "true",
"dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
"dfs.namenode.checkpoint.txns": "1000000",
"dfs.block.access.token.enable": "true",
"dfs.support.append": "true",
"dfs.datanode.address": "0.0.0.0:1019",
"dfs.cluster.administrators": "hdfs",
"dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
"dfs.datanode.balance.bandwidthPerSec": "6250000",
"dfs.namenode.startup.delay.block.deletion.sec": "3600",
"dfs.namenode.safemode.threshold-pct": "0.99f",
"dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
"dfs.permissions.enabled": "true",
"dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
"dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
"dfs.https.port": "50470",
"dfs.namenode.https-address": "c6406.ambari.apache.org:50470",
"dfs.ha.automatic-failover.enabled": "true",
"dfs.namenode.http-address.ha.nn2": "c6407.ambari.apache.org:50070",
"dfs.blockreport.initialDelay": "120",
"dfs.namenode.http-address.ha.nn1": "c6406.ambari.apache.org:50070",
"dfs.journalnode.edits.dir": "/hadoop/hdfs/journal",
"dfs.blocksize": "134217728",
"dfs.client.read.shortcircuit": "false",
"dfs.datanode.max.transfer.threads": "4096",
"dfs.heartbeat.interval": "3",
"dfs.replication": "3",
"dfs.namenode.handler.count": "100",
"dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
"fs.permissions.umask-mode": "022",
"dfs.namenode.stale.datanode.interval": "30000",
"dfs.datanode.ipc.address": "0.0.0.0:8010",
"dfs.namenode.rpc-address.ha.nn2": "c6407.ambari.apache.org:8020",
"dfs.namenode.rpc-address.ha.nn1": "c6406.ambari.apache.org:8020",
"dfs.nameservices": "ha",
"dfs.datanode.data.dir": "/hadoop/hdfs/data",
"dfs.namenode.http-address": "c6406.ambari.apache.org:50070",
"dfs.webhdfs.enabled": "false",
"dfs.namenode.accesstime.precision": "0",
"dfs.datanode.failed.volumes.tolerated": "0",
"dfs.namenode.https-address.ha.nn2": "c6407.ambari.apache.org:50470",
"dfs.namenode.https-address.ha.nn1": "c6406.ambari.apache.org:50470",
"dfs.datanode.https.address": "0.0.0.0:50475",
"dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
"dfs.namenode.shared.edits.dir": "qjournal://c6406.ambari.apache.org:8485;c6407.ambari.apache.org:8485;c6408.ambari.apache.org:8485/ha",
"dfs.ha.fencing.methods": "shell(/bin/true)",
"dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
"dfs.datanode.http.address": "0.0.0.0:1022",
"dfs.datanode.du.reserved": "1073741824",
"dfs.client.read.shortcircuit.streams.cache.size": "4096",
"dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
"dfs.http.policy": "HTTP_ONLY",
"dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
"dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
"dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
"dfs.journalnode.https-address": "0.0.0.0:8481",
"dfs.journalnode.http-address": "0.0.0.0:8480",
"dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
"dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
"dfs.namenode.write.stale.datanode.ratio": "1.0f",
"dfs.client.failover.proxy.provider.ha": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
"dfs.datanode.data.dir.perm": "750",
"dfs.namenode.name.dir.restore": "true",
"dfs.replication.max": "50",
"dfs.namenode.checkpoint.period": "21600"
},
"ams-env": {
"ams_user": "ams",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}",
"ams_monitor_log_dir": "/var/log/ambari-metrics-monitor",
"ams_monitor_pid_dir": "/var/run/ambari-metrics-monitor",
"ams_collector_log_dir": "/var/log/ambari-metrics-collector",
"ams_collector_pid_dir": "/var/run/ambari-metrics-collector"
},
"pig-properties": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no timestamps)\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce, mapreduce is default\nexectype=mapreduce\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=false;\n\npig.disable.counter=false\n\n# Avoid pig failures when multiple jobs write to the same location\npig.location.check.strict=false\n\nhcat.bin=/usr/bin/hcat"
},
"ams-site": {
"timeline.metrics.host.aggregator.minute.ttl": "604800",
"timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2",
"timeline.metrics.cluster.aggregator.hourly.disabled": "false",
"timeline.metrics.cluster.aggregator.minute.timeslice.interval": "15",
"timeline.metrics.service.resultset.fetchSize": "2000",
"timeline.metrics.service.checkpointDelay": "60",
"timeline.metrics.cluster.aggregator.hourly.ttl": "31536000",
"timeline.metrics.hbase.compression.scheme": "SNAPPY",
"timeline.metrics.cluster.aggregator.hourly.interval": "3600",
"timeline.metrics.host.aggregator.ttl": "86400",
"timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2",
"timeline.metrics.service.webapp.address": "0.0.0.0:6188",
"timeline.metrics.aggregator.checkpoint.dir": "/var/lib/ambari-metrics-collector/checkpoint",
"timeline.metrics.host.aggregator.minute.disabled": "false",
"timeline.metrics.cluster.aggregator.minute.ttl": "2592000",
"timeline.metrics.service.operation.mode": "embedded",
"timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2",
"timeline.metrics.host.aggregator.hourly.disabled": "false",
"timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2",
"timeline.metrics.service.rpc.address": "0.0.0.0:60200",
"timeline.metrics.cluster.aggregator.minute.disabled": "false",
"timeline.metrics.host.aggregator.hourly.ttl": "2592000",
"timeline.metrics.host.aggregator.minute.interval": "300",
"timeline.metrics.service.default.result.limit": "5760",
"timeline.metrics.hbase.data.block.encoding": "FAST_DIFF",
"timeline.metrics.cluster.aggregator.minute.interval": "120",
"timeline.metrics.host.aggregator.hourly.interval": "3600"
},
"ams-hbase-policy": {
"security.admin.protocol.acl": "*",
"security.masterregion.protocol.acl": "*",
"security.client.protocol.acl": "*"
},
"hadoop-policy": {
"security.job.client.protocol.acl": "*",
"security.job.task.protocol.acl": "*",
"security.datanode.protocol.acl": "*",
"security.namenode.protocol.acl": "*",
"security.client.datanode.protocol.acl": "*",
"security.inter.tracker.protocol.acl": "*",
"security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
"security.client.protocol.acl": "*",
"security.refresh.policy.protocol.acl": "hadoop",
"security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
},
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
},
"hbase-site": {
"hbase.master.info.bindAddress": "0.0.0.0",
"hbase.client.keyvalue.maxsize": "10485760",
"hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
"hbase.hstore.compactionThreshold": "3",
"hbase.hregion.majorcompaction.jitter": "0.50",
"hbase.rootdir": "hdfs://ha/apps/hbase/data",
"hbase.regionserver.handler.count": "60",
"hbase.hregion.majorcompaction": "604800000",
"hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
"hbase.hregion.memstore.block.multiplier": "4",
"hbase.hregion.memstore.flush.size": "134217728",
"hbase.superuser": "hbase",
"hbase.regionserver.global.memstore.lowerLimit": "0.38",
"hbase.zookeeper.property.clientPort": "2181",
"hbase.hregion.max.filesize": "10737418240",
"hbase.regionserver.global.memstore.upperLimit": "0.4",
"zookeeper.session.timeout": "30000",
"hbase.client.scanner.caching": "100",
"hbase.tmp.dir": "/hadoop/hbase",
"hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM",
"hfile.block.cache.size": "0.40",
"hbase.security.authentication": "kerberos",
"hbase.defaults.for.version.skip": "true",
"hbase.master.info.port": "60010",
"hbase.zookeeper.quorum": "c6408.ambari.apache.org,c6406.ambari.apache.org,c6407.ambari.apache.org",
"hbase.regionserver.info.port": "60030",
"zookeeper.znode.parent": "/hbase-unsecure",
"hbase.zookeeper.useMulti": "true",
"hbase.hstore.blockingStoreFiles": "10",
"hbase.master.port": "60000",
"hbase.security.authorization": "true",
"hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab",
"hbase.local.dir": "${hbase.tmp.dir}/local",
"hbase.cluster.distributed": "true",
"hbase.hregion.memstore.mslab.enabled": "false",
"dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
"hbase.coprocessor.region.classes": "",
"hbase.coprocessor.master.classes": ""
},
"hive-site": {
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"hive.fetch.task.aggr": "false",
"hive.execution.engine": "mr",
"hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
"hive.server2.table.type.mapping": "CLASSIC",
"hive.tez.min.partition.factor": "0.25",
"hive.tez.cpu.vcores": "-1",
"hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
"hive.stats.dbclass": "fs",
"hive.tez.auto.reducer.parallelism": "false",
"hive.auto.convert.sortmerge.join.to.mapjoin": "false",
"hive.server2.thrift.http.path": "cliservice",
"hive.exec.scratchdir": "/tmp/hive",
"hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.zookeeper.namespace": "hive_zookeeper_namespace",
"hive.cbo.enable": "true",
"hive.optimize.index.filter": "true",
"hive.optimize.bucketmapjoin": "true",
"hive.mapjoin.bucket.cache.size": "10000",
"hive.limit.optimize.enable": "true",
"hive.fetch.task.conversion.threshold": "1073741824",
"hive.exec.max.dynamic.partitions": "5000",
"hive.metastore.sasl.enabled": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
"hive.optimize.constant.propagation": "true",
"hive.exec.submitviachild": "false",
"hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
"hive.txn.max.open.batch": "1000",
"hive.exec.compress.output": "false",
"hive.merge.size.per.task": "256000000",
"hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
"hive.merge.mapfiles": "true",
"hive.compactor.initiator.on": "false",
"hive.mapjoin.optimized.hashtable": "true",
"hive.optimize.metadataonly": "true",
"hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
"hive.server2.thrift.max.worker.threads": "500",
"hive.optimize.sort.dynamic.partition": "false",
"hive.server2.thrift.http.port": "10001",
"hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener",
"hive.metastore.failure.retries": "24",
"hive.merge.smallfiles.avgsize": "16000000",
"hive.tez.max.partition.factor": "2.0",
"hive.server2.transport.mode": "binary",
"hive.tez.container.size": "682",
"hive.optimize.bucketmapjoin.sortedmerge": "false",
"hive.compactor.worker.threads": "0",
"hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
"hive.server2.thrift.port": "10000",
"hive.map.aggr.hash.percentmemory": "0.5",
"hive.user.install.directory": "/user/",
"hive.compute.query.using.stats": "true",
"hive.merge.rcfile.block.level": "true",
"hive.map.aggr": "true",
"hive.metastore.client.connect.retry.delay": "5s",
"hive.security.authorization.enabled": "true",
"hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
"hive.server2.tez.default.queues": "default",
"hive.prewarm.enabled": "false",
"hive.exec.reducers.max": "1009",
"hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
"hive.stats.fetch.partition.stats": "true",
"hive.cli.print.header": "false",
"hive.server2.thrift.sasl.qop": "auth",
"hive.server2.support.dynamic.service.discovery": "false",
"hive.fetch.task.conversion": "more",
"hive.exec.reducers.bytes.per.reducer": "67108864",
"hive.compactor.abortedtxn.threshold": "1000",
"hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
"hive.metastore.warehouse.dir": "/apps/hive/warehouse",
"hive.metastore.client.socket.timeout": "1800s",
"hive.server2.zookeeper.namespace": "hiveserver2",
"hive.prewarm.numcontainers": "10",
"hive.vectorized.groupby.flush.percent": "0.1",
"hive.server2.enable.doAs": "true",
"hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
"hive.server2.use.SSL": "false",
"hive.enforce.bucketing": "true",
"hive.server2.authentication.spnego.keytab": "/etc/security/keytabs/spnego.service.keytab",
"hive.mapred.reduce.tasks.speculative.execution": "false",
"javax.jdo.option.ConnectionURL": "jdbc:mysql://c6407.ambari.apache.org/hive?createDatabaseIfNotExist=true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.auto.convert.sortmerge.join": "true",
"hive.zookeeper.quorum": "c6408.ambari.apache.org:2181,c6406.ambari.apache.org:2181,c6407.ambari.apache.org:2181",
"hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
"hive.exec.parallel": "false",
"hive.stats.fetch.column.stats": "false",
"hive.enforce.sorting": "true",
"hive.txn.timeout": "300",
"hive.metastore.authorization.storage.checks": "false",
"hive.exec.orc.default.stripe.size": "67108864",
"hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order",
"hive.server2.logging.operation.enabled": "true",
"hive.merge.tezfiles": "false",
"hive.exec.parallel.thread.number": "8",
"hive.auto.convert.join.noconditionaltask": "true",
"hive.server2.authentication.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
"hive.server2.authentication.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
"hive.compactor.worker.timeout": "86400L",
"hive.optimize.null.scan": "true",
"hive.server2.tez.initialize.default.sessions": "false",
"datanucleus.cache.level2.type": "none",
"hive.stats.autogather": "true",
"hive.auto.convert.join": "true",
"hive.exec.submit.local.task.via.child": "true",
"hive.merge.mapredfiles": "false",
"hive.vectorized.execution.enabled": "true",
"hive.cluster.delegation.token.store.zookeeper.connectString": "c6408.ambari.apache.org:2181,c6406.ambari.apache.org:2181,c6407.ambari.apache.org:2181",
"hive.vectorized.execution.reduce.enabled": "false",
"hive.optimize.reducededuplication": "true",
"hive.server2.tez.sessions.per.default.queue": "1",
"hive.exec.max.dynamic.partitions.pernode": "2000",
"hive.tez.dynamic.partition.pruning": "true",
"hive.limit.pushdown.memory.usage": "0.04",
"hive.security.metastore.authorization.auth.reads": "true",
"ambari.hive.db.schema.name": "hive",
"hive.vectorized.groupby.checkinterval": "4096",
"hive.smbjoin.cache.rows": "10000",
"hive.metastore.execute.setugi": "true",
"hive.zookeeper.client.port": "2181",
"hive.vectorized.groupby.maxentries": "100000",
"hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
"hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
"hive.server2.authentication.kerberos.keytab": "/etc/security/keytabs/hive.service.keytab",
"javax.jdo.option.ConnectionPassword": "admin",
"hive.exec.max.created.files": "100000",
"hive.map.aggr.hash.min.reduction": "0.5",
"hive.orc.splits.include.file.footer": "false",
"hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.merge.orcfile.stripe.level": "true",
"hive.exec.orc.default.compress": "ZLIB",
"hive.server2.allow.user.substitution": "true",
"hive.metastore.connect.retries": "24",
"hive.metastore.server.max.threads": "100000",
"hive.exec.orc.compression.strategy": "SPEED",
"hive.optimize.reducededuplication.min.reducer": "4",
"hive.metastore.uris": "thrift://c6407.ambari.apache.org:9083",
"hive.enforce.sortmergebucketmapjoin": "true",
"hive.auto.convert.join.noconditionaltask.size": "238026752",
"javax.jdo.option.ConnectionUserName": "hive",
"hive.tez.log.level": "INFO",
"hive.compactor.delta.num.threshold": "10",
"hive.exec.dynamic.partition": "true",
"hive.server2.authentication": "KERBEROS",
"hive.exec.compress.intermediate": "false",
"hive.orc.compute.splits.num.threads": "10",
"hive.tez.smb.number.waves": "0.5",
"hive.convert.join.bucket.mapjoin.tez": "false",
"hive.server2.logging.operation.log.location": "${system:java.io.tmpdir}/${system:user.name}/operation_logs",
"hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
"hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
"hive.support.concurrency": "false",
"hive.compactor.check.interval": "300L",
"hive.compactor.delta.pct.threshold": "0.1f"
},
"hive-exec-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,FA\nlog4j.category.Datastore=ERROR,FA\nlog4j.category.Datastore.Schema=ERROR,FA\nlog4j.category.JPOX.Datastore=ERROR,FA\nlog4j.category.JPOX.Plugin=ERROR,FA\nlog4j.category.JPOX.MetaData=ERROR,FA\nlog4j.category.JPOX.Query=ERROR,FA\nlog4j.category.JPOX.General=ERROR,FA\nlog4j.category.JPOX.Enhancer=ERROR,FA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA"
},
"mapred-env": {
"content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"",
"mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
"mapred_user": "mapred",
"jobhistory_heapsize": "900",
"mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
},
"ranger-hive-plugin-properties": {
"XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
"XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
"XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
"common.name.for.certificate": "-",
"XAAUDIT.HDFS.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
"SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
"XAAUDIT.DB.IS_ENABLED": "true",
"XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
"SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
"XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
"UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
"XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
"enable_hdfs_plugin": "false",
"XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
"SSL_TRUSTSTORE_PASSWORD": "changeit",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
"ranger-hive-plugin-enabled": "No",
"REPOSITORY_CONFIG_USERNAME": "hive",
"jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
"XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
"SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
"REPOSITORY_CONFIG_PASSWORD": "hive"
},
"ranger-hdfs-plugin-properties": {
"XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
"XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
"XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
"common.name.for.certificate": "-",
"XAAUDIT.HDFS.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
"SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
"XAAUDIT.DB.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
"hadoop.rpc.protection": "-",
"ranger-hdfs-plugin-enabled": "No",
"SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
"XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
"XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
"enable_hdfs_plugin": "false",
"XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
"SSL_TRUSTSTORE_PASSWORD": "changeit",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
"REPOSITORY_CONFIG_USERNAME": "hadoop",
"XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
"SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
},
"zoo.cfg": {
"clientPort": "2181",
"autopurge.purgeInterval": "24",
"syncLimit": "5",
"dataDir": "/hadoop/zookeeper",
"initLimit": "10",
"tickTime": "2000",
"autopurge.snapRetainCount": "30"
},
"tez-env": {
"content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}",
"tez_user": "tez"
},
"ranger-hbase-plugin-properties": {
"XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
"XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
"XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
"XAAUDIT.HDFS.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
"ranger-hbase-plugin-enabled": "No",
"SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
"XAAUDIT.DB.IS_ENABLED": "true",
"XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
"SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
"XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
"UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
"XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
"enable_hdfs_plugin": "false",
"XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
"SSL_TRUSTSTORE_PASSWORD": "changeit",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
"REPOSITORY_CONFIG_USERNAME": "hbase",
"XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
"SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
"REPOSITORY_CONFIG_PASSWORD": "hbase"
},
"oozie-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time\n# XLogService sets its value to '${oozie.home}/logs'\n\nlog4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.oozie.File=${oozie.log.dir}/oozie.log\nlog4j.appender.oozie.Append=true\nlog4j.appender.oozie.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n\n\nlog4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieops.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log\nlog4j.appender.oozieops.Append=true\nlog4j.appender.oozieops.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log\nlog4j.appender.oozieinstrumentation.Append=true\nlog4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log\nlog4j.appender.oozieaudit.Append=true\nlog4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.openjpa.DatePattern='.'yyyy-MM-dd\nlog4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log\nlog4j.appender.openjpa.Append=true\nlog4j.appender.openjpa.layout=org.apache.log4j.PatternLayout\nlog4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.logger.openjpa=INFO, openjpa\nlog4j.logger.oozieops=INFO, oozieops\nlog4j.logger.oozieinstrumentation=ALL, oozieinstrumentation\nlog4j.logger.oozieaudit=ALL, oozieaudit\nlog4j.logger.org.apache.oozie=INFO, oozie\nlog4j.logger.org.apache.hadoop=WARN, oozie\nlog4j.logger.org.mortbay=WARN, oozie\nlog4j.logger.org.hsqldb=WARN, oozie\nlog4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie"
},
"hbase-policy": {
"security.admin.protocol.acl": "*",
"security.masterregion.protocol.acl": "*",
"security.client.protocol.acl": "*"
},
"core-site": {
"proxyuser_group": "users",
"hadoop.proxyuser.hcat.hosts": "c6407.ambari.apache.org",
"hadoop.proxyuser.hcat.groups": "users",
"fs.trash.interval": "360",
"hadoop.proxyuser.hive.groups": "users",
"ipc.server.tcpnodelay": "true",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
"ipc.client.idlethreshold": "8000",
"io.file.buffer.size": "131072",
"ha.zookeeper.quorum": "c6406.ambari.apache.org:2181,c6407.ambari.apache.org:2181,c6408.ambari.apache.org:2181",
"io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
"hadoop.rpc.protection": "authentication",
"hadoop.security.authentication": "kerberos",
"mapreduce.jobtracker.webinterface.trusted": "false",
"hadoop.proxyuser.hive.hosts": "c6407.ambari.apache.org",
"fs.defaultFS": "hdfs://ha",
"hadoop.proxyuser.oozie.groups": "*",
"ha.failover-controller.active-standby-elector.zk.op.retries": "120",
"hadoop.security.authorization": "true",
"hadoop.http.authentication.simple.anonymous.allowed": "true",
"ipc.client.connect.max.retries": "50",
"hadoop.security.auth_to_local": "RULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hive@EXAMPLE.COM)s/.*/hive/\nRULE:[2:$1@$0](jhs@EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](oozie@EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\nRULE:[1:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT",
"hadoop.proxyuser.oozie.hosts": "c6407.ambari.apache.org",
"ipc.client.connection.maxidletime": "30000"
},
"ams-hbase-site": {
"hbase.master.info.bindAddress": "0.0.0.0",
"hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper",
"hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase",
"hbase.replication": "false",
"hbase.hregion.majorcompaction": "0",
"hbase.hregion.memstore.block.multiplier": "4",
"hbase.hregion.memstore.flush.size": "134217728",
"hbase.regionserver.global.memstore.lowerLimit": "0.4",
"hbase.zookeeper.property.clientPort": "61181",
"hbase.client.scanner.timeout.period": "900000",
"phoenix.groupby.maxCacheSize": "307200000",
"hbase.snapshot.enabled": "false",
"hbase.master.wait.on.regionservers.mintostart": "1",
"hbase.regionserver.global.memstore.upperLimit": "0.5",
"phoenix.query.spoolThresholdBytes": "12582912",
"zookeeper.session.timeout": "120000",
"hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
"hfile.block.cache.size": "0.3",
"hbase.regionserver.port": "61320",
"hbase.regionserver.thread.compaction.small": "3",
"hbase.master.info.port": "61310",
"hbase.zookeeper.quorum": "localhost",
"hbase.regionserver.info.port": "61330",
"hbase.hstore.blockingStoreFiles": "200",
"hbase.master.port": "61300",
"hbase.zookeeper.leaderport": "61388",
"hbase.regionserver.thread.compaction.large": "2",
"phoenix.query.timeoutMs": "1200000",
"hbase.local.dir": "${hbase.tmp.dir}/local",
"hbase.cluster.distributed": "false",
"hbase.client.scanner.caching": "10000",
"phoenix.sequence.saltBuckets": "2",
"hbase.hstore.flusher.count": "2",
"hbase.zookeeper.peerport": "61288"
},
"yarn-env": {
"yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
"apptimelineserver_heapsize": "1024",
"nodemanager_heapsize": "1024",
"content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
"yarn_heapsize": "1024",
"min_user_id": "500",
"yarn_user": "yarn",
"resourcemanager_heapsize": "1024",
"yarn_log_dir_prefix": "/var/log/hadoop-yarn"
},
"ams-hbase-log4j": {
"content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n\n # Define some default values that can be overridden by system properties\n hbase.root.logger=INFO,console\n hbase.security.logger=INFO,console\n hbase.log.dir=.\n hbase.log.file=hbase.log\n\n # Define the root logger to the system property \"hbase.root.logger\".\n log4j.rootLogger=${hbase.root.logger}\n\n # Logging Threshold\n log4j.threshold=ALL\n\n #\n # Daily Rolling File Appender\n #\n log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n # Rollver at midnight\n log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n # 30-day backup\n #log4j.appender.DRFA.MaxBackupIndex=30\n log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n # Pattern format: Date LogLevel LoggerName LogMessage\n log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n # Rolling File Appender properties\n hbase.log.maxfilesize=256MB\n hbase.log.maxbackupindex=20\n\n # Rolling File Appender\n log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\n log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\n log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\n log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n #\n # Security audit appender\n #\n hbase.security.log.file=SecurityAuth.audit\n hbase.security.log.maxfilesize=256MB\n hbase.security.log.maxbackupindex=20\n log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\n log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\n log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\n log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n log4j.category.SecurityLogger=${hbase.security.logger}\n log4j.additivity.SecurityLogger=false\n #log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n #\n # Null Appender\n #\n log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n #\n # console\n # Add \"console\" to rootlogger above if you want to use this\n #\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n # Custom Logging levels\n\n log4j.logger.org.apache.zookeeper=INFO\n #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n log4j.logger.org.apache.hadoop.hbase=INFO\n # Make these two classes INFO-level. Make them DEBUG to see more zk debug.\n log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\n log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n #log4j.logger.org.apache.hadoop.dfs=DEBUG\n # Set this class to log INFO only otherwise its OTT\n # Enable this to get detailed connection error/retry logging.\n # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n #log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n # Uncomment the below if you want to remove logging of client region caching'\n # and scan of .META. messages\n # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n # log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
},
"hadoop-env": {
"dtnode_heapsize": "1024m",
"namenode_opt_maxnewsize": "256m",
"hdfs_log_dir_prefix": "/var/log/hadoop",
"namenode_heapsize": "1024m",
"proxyuser_group": "users",
"hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab",
"hadoop_pid_dir_prefix": "/var/run/hadoop",
"content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"",
"hdfs_user": "hdfs",
"namenode_opt_newsize": "256m",
"hadoop_root_logger": "INFO,RFA",
"hadoop_heapsize": "1024",
"namenode_opt_maxpermsize": "256m",
"namenode_opt_permsize": "128m",
"hdfs_principal_name": "hdfs@EXAMPLE.COM"
},
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
},
"yarn-site": {
"yarn.timeline-service.http-authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"yarn.resourcemanager.webapp.address": "c6407.ambari.apache.org:8088",
"yarn.resourcemanager.zk-num-retries": "1000",
"yarn.timeline-service.bind-host": "0.0.0.0",
"yarn.resourcemanager.ha.enabled": "false",
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
"yarn.timeline-service.webapp.address": "c6407.ambari.apache.org:8188",
"yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM",
"yarn.timeline-service.enabled": "true",
"yarn.nodemanager.recovery.enabled": "true",
"yarn.timeline-service.http-authentication.type": "kerberos",
"yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab",
"yarn.resourcemanager.hostname": "c6407.ambari.apache.org",
"yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
"yarn.resourcemanager.am.max-attempts": "2",
"yarn.nodemanager.log-aggregation.debug-enabled": "false",
"yarn.resourcemanager.system-metrics-publisher.enabled": "true",
"yarn.nodemanager.vmem-pmem-ratio": "2.1",
"yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
"yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
"yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
"yarn.log.server.url": "http://c6407.ambari.apache.org:19888/jobhistory/logs",
"yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM",
"yarn.timeline-service.keytab": "/etc/security/keytabs/yarn.service.keytab",
"yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
"yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
"yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab",
"yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM",
"yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
"yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
"yarn.nodemanager.remote-app-log-dir-suffix": "logs",
"yarn.resourcemanager.connect.max-wait.ms": "900000",
"yarn.resourcemanager.address": "c6407.ambari.apache.org:8050",
"yarn.scheduler.maximum-allocation-mb": "2048",
"yarn.nodemanager.container-monitor.interval-ms": "3000",
"yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
"yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
"yarn.timeline-service.address": "c6407.ambari.apache.org:10200",
"yarn.log-aggregation-enable": "false",
"yarn.nodemanager.delete.debug-delay-sec": "0",
"yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
"yarn.timeline-service.client.retry-interval-ms": "1000",
"hadoop.registry.zk.quorum": "c6408.ambari.apache.org:2181,c6406.ambari.apache.org:2181,c6407.ambari.apache.org:2181",
"yarn.nodemanager.aux-services": "mapreduce_shuffle",
"yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
"yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
"yarn.resourcemanager.zk-timeout-ms": "10000",
"yarn.resourcemanager.fs.state-store.uri": " ",
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.remote-app-log-dir": "/app-logs",
"yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
"yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
"yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
"yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
"yarn.resourcemanager.work-preserving-recovery.enabled": "true",
"yarn.resourcemanager.resource-tracker.address": "c6407.ambari.apache.org:8025",
"yarn.nodemanager.health-checker.script.timeout-ms": "60000",
"yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
"yarn.nodemanager.resource.memory-mb": "2048",
"yarn.nodemanager.resource.cpu-vcores": "1",
"yarn.timeline-service.ttl-ms": "2678400000",
"yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
"yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
"yarn.nodemanager.log.retain-second": "604800",
"yarn.timeline-service.principal": "yarn/_HOST@EXAMPLE.COM",
"yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
"yarn.timeline-service.client.max-retries": "30",
"yarn.nodemanager.health-checker.interval-ms": "135000",
"yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
"yarn.nodemanager.vmem-check-enabled": "false",
"yarn.acl.enable": "true",
"yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
"yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
"yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
"yarn.client.nodemanager-connect.max-wait-ms": "60000",
"yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
"yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
"yarn.resourcemanager.bind-host": "0.0.0.0",
"yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"yarn.resourcemanager.scheduler.address": "c6407.ambari.apache.org:8030",
"yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
"yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
"yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab",
"yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
"yarn.scheduler.minimum-allocation-mb": "682",
"yarn.timeline-service.ttl-enable": "false",
"yarn.resourcemanager.zk-address": "c6408.ambari.apache.org:2181",
"yarn.log-aggregation.retain-seconds": "2592000",
"yarn.nodemanager.address": "0.0.0.0:45454",
"hadoop.registry.rm.enabled": "false",
"yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
"yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
"yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
"yarn.nodemanager.log-aggregation.compression-type": "gz",
"yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
"yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
"yarn.resourcemanager.recovery.enabled": "true",
"yarn.nodemanager.bind-host": "0.0.0.0",
"yarn.resourcemanager.zk-retry-interval-ms": "1000",
"yarn.admin.acl": "",
"yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
"yarn.client.nodemanager-connect.retry-interval-ms": "10000",
"yarn.resourcemanager.admin.address": "c6407.ambari.apache.org:8141",
"yarn.timeline-service.webapp.https.address": "c6407.ambari.apache.org:8190",
"yarn.resourcemanager.connect.retry-interval.ms": "30000",
"yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
},
"hive-env": {
"hive_existing_postgresql_host": "c6407.ambari.apache.org",
"hcat_pid_dir": "/var/run/webhcat",
"hcat_user": "hcat",
"hive_ambari_database": "MySQL",
"hive_hostname": "c6407.ambari.apache.org",
"hive_ambari_host": "c6407.ambari.apache.org",
"hive_existing_mssql_server_2_host": "c6407.ambari.apache.org",
"hive_metastore_port": "9083",
"hive_existing_mssql_server_host": "c6407.ambari.apache.org",
"webhcat_user": "hcat",
"content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog\nfi\n\nexport METASTORE_PORT={{hive_metastore_port}}",
"hive_database_name": "hive",
"hive_database_type": "mysql",
"hive_pid_dir": "/var/run/hive",
"hive_log_dir": "/var/log/hive",
"hive_existing_mysql_host": "c6407.ambari.apache.org",
"hive_user": "hive",
"hcat_log_dir": "/var/log/webhcat",
"hive_existing_oracle_host": "c6407.ambari.apache.org",
"hive_database": "New MySQL Database"
},
"capacity-scheduler": {
"yarn.scheduler.capacity.default.minimum-user-limit-percent": "100",
"yarn.scheduler.capacity.root.default.maximum-capacity": "100",
"yarn.scheduler.capacity.root.default.user-limit-factor": "1",
"yarn.scheduler.capacity.root.accessible-node-labels": "*",
"yarn.scheduler.capacity.root.default.state": "RUNNING",
"yarn.scheduler.capacity.root.capacity": "100",
"yarn.scheduler.capacity.root.default.capacity": "100",
"yarn.scheduler.capacity.root.queues": "default",
"yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1",
"yarn.scheduler.capacity.root.default-node-label-expression": " ",
"yarn.scheduler.capacity.node-locality-delay": "40",
"yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1",
"yarn.scheduler.capacity.root.default.acl_submit_applications": "*",
"yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
"yarn.scheduler.capacity.root.acl_administer_queue": "*",
"yarn.scheduler.capacity.maximum-applications": "10000",
"yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
"yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator"
},
"hbase-log4j": {
"content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=DEBUG\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
},
"oozie-site": {
"oozie.service.coord.check.maximum.frequency": "false",
"oozie.service.PurgeService.purge.interval": "3600",
"oozie.service.CallableQueueService.queue.size": "1000",
"oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
"oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
"oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver",
"local.realm": "EXAMPLE.COM",
"use.system.libpath.for.mapreduce.and.pig.jobs": "false",
"oozie.service.HadoopAccessorService.kerberos.enabled": "true",
"oozie.db.schema.name": "oozie",
"oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
"oozie.service.JPAService.create.db.schema": "false",
"oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hive@EXAMPLE.COM)s/.*/hive/\nRULE:[2:$1@$0](jhs@EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](oozie@EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\nRULE:[1:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT",
"oozie.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"oozie.service.ActionService.executor.ext.classes": "\n org.apache.oozie.action.email.EmailActionExecutor,\n org.apache.oozie.action.hadoop.HiveActionExecutor,\n org.apache.oozie.action.hadoop.ShellActionExecutor,\n org.apache.oozie.action.hadoop.SqoopActionExecutor,\n org.apache.oozie.action.hadoop.DistcpActionExecutor",
"oozie.authentication.simple.anonymous.allowed": "true",
"oozie.service.HadoopAccessorService.kerberos.principal": "oozie/_HOST@EXAMPLE.COM",
"oozie.service.AuthorizationService.authorization.enabled": "true",
"oozie.base.url": "http://c6407.ambari.apache.org:11000/oozie",
"oozie.service.JPAService.jdbc.password": "admin",
"oozie.service.coord.normal.default.timeout": "120",
"oozie.service.AuthorizationService.security.enabled": "true",
"oozie.service.JPAService.pool.max.active.conn": "10",
"oozie.service.PurgeService.older.than": "30",
"oozie.service.coord.push.check.requeue.interval": "30000",
"oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf",
"oozie.service.CallableQueueService.callable.concurrency": "3",
"oozie.service.JPAService.jdbc.username": "oozie",
"oozie.service.CallableQueueService.threads": "10",
"oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
"oozie.systemmode": "NORMAL",
"oozie.service.HadoopAccessorService.keytab.file": "/etc/security/keytabs/oozie.service.keytab",
"oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib",
"oozie.services": "\n org.apache.oozie.service.SchedulerService,\n org.apache.oozie.service.InstrumentationService,\n org.apache.oozie.service.MemoryLocksService,\n org.apache.oozie.service.UUIDService,\n org.apache.oozie.service.ELService,\n org.apache.oozie.service.AuthorizationService,\n org.apache.oozie.service.UserGroupInformationService,\n org.apache.oozie.service.HadoopAccessorService,\n org.apache.oozie.service.JobsConcurrencyService,\n org.apache.oozie.service.URIHandlerService,\n org.apache.oozie.service.DagXLogInfoService,\n org.apache.oozie.service.SchemaService,\n org.apache.oozie.service.LiteWorkflowAppService,\n org.apache.oozie.service.JPAService,\n org.apache.oozie.service.StoreService,\n org.apache.oozie.service.CoordinatorStoreService,\n org.apache.oozie.service.SLAStoreService,\n org.apache.oozie.service.DBLiteWorkflowStoreService,\n org.apache.oozie.service.CallbackService,\n org.apache.oozie.service.ShareLibService,\n org.apache.oozie.service.CallableQueueService,\n org.apache.oozie.service.ActionService,\n org.apache.oozie.service.ActionCheckerService,\n org.apache.oozie.service.RecoveryService,\n org.apache.oozie.service.PurgeService,\n org.apache.oozie.service.CoordinatorEngineService,\n org.apache.oozie.service.BundleEngineService,\n org.apache.oozie.service.DagEngineService,\n org.apache.oozie.service.CoordMaterializeTriggerService,\n org.apache.oozie.service.StatusTransitService,\n org.apache.oozie.service.PauseTransitService,\n org.apache.oozie.service.GroupsService,\n org.apache.oozie.service.ProxyUserService,\n org.apache.oozie.service.XLogStreamingService,\n org.apache.oozie.service.JvmPauseMonitorService",
"oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
"oozie.authentication.type": "kerberos",
"oozie.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
"oozie.system.id": "oozie-${user.name}"
},
"oozie-env": {
"oozie_heapsize": "2048m",
"oozie_existing_mysql_host": "c6407.ambari.apache.org",
"oozie_admin_port": "11001",
"oozie_hostname": "c6407.ambari.apache.org",
"oozie_pid_dir": "/var/run/oozie",
"content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, the memory settings may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \"",
"oozie_user": "oozie",
"oozie_derby_database": "Derby",
"oozie_database": "New Derby Database",
"oozie_existing_oracle_host": "c6407.ambari.apache.org",
"oozie_data_dir": "/hadoop/oozie/data",
"oozie_permsize": "256m",
"oozie_log_dir": "/var/log/oozie",
"oozie_existing_postgresql_host": "c6407.ambari.apache.org"
},
"webhcat-site": {
"templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6407.ambari.apache.org:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@EXAMPLE.COM",
"templeton.hadoop.conf.dir": "/etc/hadoop/conf",
"templeton.kerberos.secret": "secret",
"templeton.port": "50111",
"templeton.hive.home": "hive.tar.gz/hive",
"templeton.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
"templeton.exec.timeout": "60000",
"templeton.hcat.home": "hive.tar.gz/hive/hcatalog",
"templeton.sqoop.home": "sqoop.tar.gz/sqoop",
"templeton.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
"templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
"templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
"templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
"templeton.override.enabled": "false",
"templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
"templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
"templeton.hive.path": "hive.tar.gz/hive/bin/hive",
"templeton.pig.path": "pig.tar.gz/pig/bin/pig",
"templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
"templeton.zookeeper.hosts": "c6408.ambari.apache.org:2181,c6406.ambari.apache.org:2181,c6407.ambari.apache.org:2181",
"templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
"templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar",
"templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz"
},
"hbase-env": {
"hbase_pid_dir": "/var/run/hbase",
"hbase_regionserver_xmn_max": "512",
"hbase_regionserver_xmn_ratio": "0.2",
"hbase_user": "hbase",
"hbase_master_heapsize": "1024m",
"content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}",
"hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab",
"hbase_regionserver_heapsize": "1024m",
"hbase_log_dir": "/var/log/hbase",
"hbase_principal_name": "hbase@EXAMPLE.COM"
},
"krb5-conf": {
"realm": "EXAMPLE.COM",
"conf_dir": "/etc",
"content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm|upper()}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n\n{% if domains %}\n[domain_realm]\n{% for domain in domains.split(',') %}\n {{domain}} = {{realm|upper()}}\n{% endfor %}\n{% endif %}\n\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n admin_server = {{admin_server_host|default(kdc_host, True)}}\n kdc = {{kdc_host}}\n }\n\n{# Append additional realm declarations below #}\n ",
"domains": ""
},
"yarn-log4j": {
"content": "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the following properties to be set\n# - hadoop.log.dir (Hadoop Log directory)\n# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false"
},
"hiveserver2-site": {
"hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
"hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
},
"ams-log4j": {
"content": "\n #\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n\n # Define some default values that can be overridden by system properties\n ams.log.dir=.\n ams.log.file=ambari-metrics-collector.log\n\n # Root logger option\n log4j.rootLogger=INFO,file\n\n # Direct log messages to a log file\n log4j.appender.file=org.apache.log4j.RollingFileAppender\n log4j.appender.file.File=${ams.log.dir}/${ams.log.file}\n log4j.appender.file.MaxFileSize=80MB\n log4j.appender.file.MaxBackupIndex=60\n log4j.appender.file.layout=org.apache.log4j.PatternLayout\n log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n"
},
"zookeeper-env": {
"zk_user": "zookeeper",
"zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab",
"zk_log_dir": "/var/log/zookeeper",
"content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
"zk_pid_dir": "/var/run/zookeeper",
"zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
},
"pig-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
},
"cluster-env": {
"security_enabled": "false",
"ignore_groupsusers_create": "false",
"kerberos_domain": "EXAMPLE.COM",
"user_group": "hadoop",
"smokeuser": "ambari-qa"
}
},
"configurationTags": {
"mapred-site": {
"tag": "version1422665855990"
},
"sqoop-env": {
"tag": "version1422653902159"
},
"ams-hbase-site": {
"tag": "version1"
},
"ranger-hdfs-plugin-properties": {
"tag": "version1"
},
"ams-hbase-env": {
"tag": "version1"
},
"webhcat-env": {
"tag": "version1422653902159"
},
"kerberos-env": {
"tag": "version1422664276771"
},
"oozie-env": {
"tag": "version1422653902159"
},
"hcat-env": {
"tag": "version1422653902159"
},
"tez-site": {
"tag": "version1422653902159"
},
"hdfs-site": {
"tag": "version1422665856194"
},
"yarn-log4j": {
"tag": "version1422653902159"
},
"ams-env": {
"tag": "version1"
},
"tez-env": {
"tag": "version1422653902159"
},
"ams-site": {
"tag": "version1"
},
"ams-hbase-policy": {
"tag": "version1"
},
"hadoop-policy": {
"tag": "version1"
},
"hdfs-log4j": {
"tag": "version1"
},
"hbase-site": {
"tag": "version1422665856153"
},
"ranger-hbase-plugin-properties": {
"tag": "version1422653902159"
},
"hive-exec-log4j": {
"tag": "version1422653902159"
},
"mapred-env": {
"tag": "version1422653902159"
},
"yarn-site": {
"tag": "version1422665856251"
},
"ranger-hive-plugin-properties": {
"tag": "version1422653902159"
},
"krb5-conf": {
"tag": "version1422664276771"
},
"zoo.cfg": {
"tag": "version1"
},
"pig-properties": {
"tag": "version1422653902159"
},
"oozie-log4j": {
"tag": "version1422653902159"
},
"hbase-policy": {
"tag": "version1422653902159"
},
"core-site": {
"tag": "version1422665856532"
},
"pig-env": {
"tag": "version1422653902159"
},
"hive-env": {
"tag": "version1422653902159"
},
"ams-hbase-log4j": {
"tag": "version1"
},
"hadoop-env": {
"tag": "version1422665856464"
},
"zookeeper-log4j": {
"tag": "version1"
},
"hive-site": {
"tag": "version1422665856416"
},
"yarn-env": {
"tag": "version1422653902159"
},
"capacity-scheduler": {
"tag": "version1422653902159"
},
"hbase-log4j": {
"tag": "version1422653902159"
},
"oozie-site": {
"tag": "version1422665856372"
},
"webhcat-site": {
"tag": "version1422665856314"
},
"hbase-env": {
"tag": "version1422665856074"
},
"hive-log4j": {
"tag": "version1422653902159"
},
"hiveserver2-site": {
"tag": "version1422653902159"
},
"ams-log4j": {
"tag": "version1"
},
"zookeeper-env": {
"tag": "version1422665856042"
},
"pig-log4j": {
"tag": "version1422653902159"
},
"cluster-env": {
"tag": "version1422665856107"
}
},
"commandId": "53-3",
"clusterHostInfo": {
"nm_hosts": [
"c6406.ambari.apache.org",
"c6408.ambari.apache.org"
],
"app_timeline_server_hosts": [
"c6407.ambari.apache.org"
],
"hive_metastore_host": [
"c6407.ambari.apache.org"
],
"zkfc_hosts": [
"c6407.ambari.apache.org",
"c6406.ambari.apache.org"
],
"all_ping_ports": [
"8670",
"8670",
"8670",
"8670"
],
"journalnode_hosts": [
"c6408.ambari.apache.org",
"c6407.ambari.apache.org",
"c6406.ambari.apache.org"
],
"rm_host": [
"c6407.ambari.apache.org"
],
"all_hosts": [
"c6408.ambari.apache.org",
"c6407.ambari.apache.org",
"c6406.ambari.apache.org",
"c6409.ambari.apache.org"
],
"hbase_rs_hosts": [
"c6407.ambari.apache.org",
"c6406.ambari.apache.org"
],
"slave_hosts": [
"c6406.ambari.apache.org"
],
"metric_monitor_hosts": [
"c6408.ambari.apache.org",
"c6407.ambari.apache.org",
"c6406.ambari.apache.org",
"c6409.ambari.apache.org"
],
"hbase_master_hosts": [
"c6406.ambari.apache.org"
],
"hive_mysql_host": [
"c6407.ambari.apache.org"
],
"oozie_server": [
"c6407.ambari.apache.org"
],
"webhcat_server_host": [
"c6407.ambari.apache.org"
],
"metric_collector_hosts": [
"c6408.ambari.apache.org"
],
"ambari_server_host": [
"c6406.ambari.apache.org"
],
"zookeeper_hosts": [
"c6408.ambari.apache.org",
"c6407.ambari.apache.org",
"c6406.ambari.apache.org"
],
"hs_host": [
"c6407.ambari.apache.org"
],
"hive_server_host": [
"c6407.ambari.apache.org"
],
"namenode_host": [
"c6407.ambari.apache.org",
"c6406.ambari.apache.org"
]
}
}