blob: 60cab52a288dfffa4d35d77714cff86e60fedbf8 [file] [log] [blame]
{
"configuration_attributes": {},
"roleCommand": "ACTIONEXECUTE",
"kerberosCommandParams": [],
"clusterName": "c1",
"componentName": "",
"hostname": "u1202.ambari.apache.org",
"hostLevelParams": {
"jdk_location": "http://u1201.ambari.apache.org:8080/resources/",
"ambari_db_rca_password": "mapred",
"java_home": "/usr/jdk64/jdk1.7.0_67",
"ambari_db_rca_url": "jdbc:postgresql://u1201.ambari.apache.org/ambarirca",
"jce_name": "UnlimitedJCEPolicyJDK7.zip",
"oracle_jdbc_url": "http://u1201.ambari.apache.org:8080/resources//ojdbc6.jar",
"stack_version": "2.2",
"stack_name": "HDP",
"db_name": "ambari",
"ambari_db_rca_driver": "org.postgresql.Driver",
"jdk_name": "jdk-7u67-linux-x64.tar.gz",
"ambari_db_rca_username": "mapred",
"db_driver_filename": "mysql-connector-java.jar",
"agentCacheDir": "/var/lib/ambari-agent/cache",
"mysql_jdbc_url": "http://u1201.ambari.apache.org:8080/resources//mysql-connector-java.jar"
},
"commandType": "EXECUTION_COMMAND",
"roleParams": {
"version": "2.2.1.0-2260",
"tasks": "[{\"type\":\"EXECUTE\",\"hosts\":\"master\",\"script\":\"scripts/namenode.py\",\"function\":\"prepare_rolling_upgrade\"}]",
"service_package_folder": "common-services/HDFS/2.1.0.2.0/package",
"upgrade_direction": "upgrade",
"hooks_folder": "HDP/2.0.6/hooks"
},
"serviceName": "",
"role": "ru_execute_tasks",
"forceRefreshConfigTags": [],
"taskId": 90,
"public_hostname": "u1202.ambari.apache.org",
"configurations": {
"ranger-hdfs-plugin-properties": {
"XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
"XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
"XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
"common.name.for.certificate": "-",
"XAAUDIT.HDFS.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
"SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
"XAAUDIT.DB.IS_ENABLED": "false",
"XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
"hadoop.rpc.protection": "-",
"ranger-hdfs-plugin-enabled": "No",
"SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
"XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
"XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
"XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
"SSL_TRUSTSTORE_PASSWORD": "changeit",
"XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
"REPOSITORY_CONFIG_USERNAME": "hadoop",
"XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
"SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
"REPOSITORY_CONFIG_PASSWORD": "hadoop"
},
"hdfs-site": {
"dfs.namenode.avoid.write.stale.datanode": "true",
"dfs.namenode.shared.edits.dir": "qjournal://u1201.ambari.apache.org:8485;u1202.ambari.apache.org:8485;u1203.ambari.apache.org:8485/ha",
"dfs.namenode.startup.delay.block.deletion.sec": "3600",
"dfs.namenode.checkpoint.txns": "1000000",
"dfs.block.access.token.enable": "true",
"dfs.support.append": "true",
"dfs.datanode.address": "0.0.0.0:50010",
"dfs.cluster.administrators": "hdfs",
"dfs.datanode.balance.bandwidthPerSec": "6250000",
"dfs.namenode.safemode.threshold-pct": "0.99f",
"dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
"dfs.permissions.enabled": "true",
"dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
"dfs.https.port": "50470",
"dfs.namenode.https-address": "u1201.ambari.apache.org:50470",
"dfs.ha.automatic-failover.enabled": "true",
"dfs.namenode.http-address.ha.nn2": "u1202.ambari.apache.org:50070",
"dfs.blockreport.initialDelay": "120",
"dfs.namenode.http-address.ha.nn1": "u1201.ambari.apache.org:50070",
"dfs.journalnode.edits.dir": "/hadoop/hdfs/journal",
"dfs.blocksize": "134217728",
"dfs.datanode.max.transfer.threads": "4096",
"dfs.heartbeat.interval": "3",
"dfs.replication": "3",
"dfs.namenode.handler.count": "100",
"dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
"fs.permissions.umask-mode": "022",
"dfs.namenode.stale.datanode.interval": "30000",
"dfs.datanode.ipc.address": "0.0.0.0:8010",
"dfs.namenode.rpc-address.ha.nn2": "u1202.ambari.apache.org:8020",
"dfs.namenode.rpc-address.ha.nn1": "u1201.ambari.apache.org:8020",
"dfs.nameservices": "ha",
"dfs.datanode.data.dir": "/hadoop/hdfs/data",
"dfs.namenode.http-address": "u1201.ambari.apache.org:50070",
"dfs.webhdfs.enabled": "false",
"dfs.namenode.accesstime.precision": "0",
"dfs.datanode.failed.volumes.tolerated": "0",
"dfs.namenode.https-address.ha.nn2": "u1202.ambari.apache.org:50470",
"dfs.namenode.https-address.ha.nn1": "u1201.ambari.apache.org:50470",
"dfs.datanode.https.address": "0.0.0.0:50475",
"dfs.namenode.write.stale.datanode.ratio": "1.0f",
"dfs.client.read.shortcircuit": "false",
"dfs.ha.fencing.methods": "shell(/bin/true)",
"dfs.datanode.http.address": "0.0.0.0:50075",
"dfs.datanode.du.reserved": "1073741824",
"dfs.client.read.shortcircuit.streams.cache.size": "4096",
"dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
"dfs.http.policy": "HTTP_ONLY",
"dfs.ha.namenodes.ha": "nn1,nn2",
"dfs.permissions.superusergroup": "hdfs",
"dfs.journalnode.https-address": "0.0.0.0:8481",
"dfs.journalnode.http-address": "0.0.0.0:8480",
"dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
"dfs.namenode.avoid.read.stale.datanode": "true",
"dfs.client.failover.proxy.provider.ha": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider",
"dfs.datanode.data.dir.perm": "750",
"dfs.namenode.name.dir.restore": "true",
"dfs.replication.max": "50",
"dfs.namenode.checkpoint.period": "21600"
},
"zoo.cfg": {
"clientPort": "2181",
"autopurge.purgeInterval": "24",
"syncLimit": "5",
"dataDir": "/hadoop/zookeeper",
"initLimit": "10",
"tickTime": "2000",
"autopurge.snapRetainCount": "30"
},
"core-site": {
"io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
"hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT",
"proxyuser_group": "users",
"fs.trash.interval": "360",
"ha.failover-controller.active-standby-elector.zk.op.retries": "120",
"hadoop.security.authentication": "simple",
"io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
"mapreduce.jobtracker.webinterface.trusted": "false",
"hadoop.security.authorization": "false",
"fs.defaultFS": "hdfs://ha",
"hadoop.http.authentication.simple.anonymous.allowed": "true",
"ipc.client.connect.max.retries": "50",
"ipc.client.idlethreshold": "8000",
"io.file.buffer.size": "131072",
"ipc.server.tcpnodelay": "true",
"ha.zookeeper.quorum": "u1201.ambari.apache.org:2181,u1202.ambari.apache.org:2181,u1203.ambari.apache.org:2181",
"ipc.client.connection.maxidletime": "30000"
},
"hadoop-policy": {
"security.job.client.protocol.acl": "*",
"security.job.task.protocol.acl": "*",
"security.datanode.protocol.acl": "*",
"security.namenode.protocol.acl": "*",
"security.client.datanode.protocol.acl": "*",
"security.inter.tracker.protocol.acl": "*",
"security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
"security.client.protocol.acl": "*",
"security.refresh.policy.protocol.acl": "hadoop",
"security.admin.operations.protocol.acl": "hadoop",
"security.inter.datanode.protocol.acl": "*"
},
"hdfs-log4j": {
"content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
},
"hadoop-env": {
"dtnode_heapsize": "1024m",
"namenode_opt_maxnewsize": "256m",
"hdfs_log_dir_prefix": "/var/log/hadoop",
"namenode_heapsize": "1024m",
"proxyuser_group": "users",
"hadoop_pid_dir_prefix": "/var/run/hadoop",
"content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"",
"hdfs_user": "hdfs",
"namenode_opt_newsize": "256m",
"hadoop_root_logger": "INFO,RFA",
"hadoop_heapsize": "1024",
"namenode_opt_maxpermsize": "256m",
"namenode_opt_permsize": "128m"
},
"zookeeper-env": {
"zk_log_dir": "/var/log/zookeeper",
"content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}",
"zk_pid_dir": "/var/run/zookeeper",
"zk_user": "zookeeper"
},
"zookeeper-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
},
"cluster-env": {
"security_enabled": "false",
"ignore_groupsusers_create": "false",
"kerberos_domain": "EXAMPLE.COM",
"user_group": "hadoop",
"smokeuser": "ambari-qa",
"smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab"
}
},
"commandParams": {
"tasks": "[{\"type\":\"EXECUTE\",\"hosts\":\"master\",\"script\":\"scripts/namenode.py\",\"function\":\"prepare_rolling_upgrade\"}]",
"service_package_folder": "common-services/HDFS/2.1.0.2.0/package",
"hooks_folder": "HDP/2.0.6/hooks",
"upgrade_direction": "upgrade",
"script": "ru_execute_tasks.py",
"version": "2.2.1.0-2260",
"command_timeout": "60",
"script_type": "PYTHON"
},
"commandId": "21-11",
"clusterHostInfo": {
"zkfc_hosts": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org"
],
"all_ping_ports": [
"8670",
"8670",
"8670"
],
"journalnode_hosts": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org",
"u1203.ambari.apache.org"
],
"all_hosts": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org",
"u1203.ambari.apache.org"
],
"slave_hosts": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org",
"u1203.ambari.apache.org"
],
"namenode_host": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org"
],
"ambari_server_host": [
"u1201.ambari.apache.org"
],
"zookeeper_hosts": [
"u1202.ambari.apache.org",
"u1201.ambari.apache.org",
"u1203.ambari.apache.org"
]
}
}