blob: 407e4b94ecb465af03a4ff3703ced3002781bfc8 [file] [log] [blame]
{
"roleCommand": "SERVICE_CHECK",
"clusterName": "c1",
"hostname": "c6401.ambari.apache.org",
"hostLevelParams": {
"not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
"agent_stack_retry_count": "5",
"agent_stack_retry_on_unavailability": "false",
"jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
"ambari_db_rca_password": "mapred",
"ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
"jce_name": "UnlimitedJCEPolicyJDK7.zip",
"stack_version": "2.5",
"stack_name": "HDP",
"ambari_db_rca_driver": "org.postgresql.Driver",
"jdk_name": "jdk-7u67-linux-x64.tar.gz",
"ambari_db_rca_username": "mapred",
"java_home": "/usr/jdk64/jdk1.7.0_45",
"db_name": "ambari"
},
"commandType": "EXECUTION_COMMAND",
"roleParams": {},
"serviceName": "SLIDER",
"role": "SLIDER",
"commandParams": {
"version": "2.5.0.0-1235",
"command_timeout": "300",
"service_package_folder": "OOZIE",
"script_type": "PYTHON",
"script": "scripts/service_check.py",
"excluded_hosts": "host1,host2"
},
"taskId": 152,
"public_hostname": "c6401.ambari.apache.org",
"configurations": {
"slider-client": {
"slider.yarn.queue": "default"
},
"sqoop-site": {
"atlas.cluster.name": "c1",
"sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
},
"mahout-env": {
"mahout_user": "mahout"
},
"yarn-env": {
"yarn_user": "yarn"
},
"mahout-log4j": {
"content": "\n #\n #\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing,\n # software distributed under the License is distributed on an\n # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n # KIND, either express or implied. See the License for the\n # specific language governing permissions and limitations\n # under the License.\n #\n #\n #\n\n # Set everything to be logged to the console\n log4j.rootCategory=WARN, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n # Settings to quiet third party logs that are too verbose\n log4j.logger.org.eclipse.jetty=WARN\n log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN"
},
"hadoop-env": {
"hdfs_user": "hdfs"
},
"core-site": {
"fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
},
"hdfs-site": {
"a": "b"
},
"yarn-site": {
"yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
"yarn.resourcemanager.address": "c6401.ambari.apache.org:8050",
"yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
},
"cluster-env": {
"managed_hdfs_resource_property_names": "",
"security_enabled": "false",
"ignore_groupsusers_create": "false",
"smokeuser": "ambari-qa",
"kerberos_domain": "EXAMPLE.COM",
"user_group": "hadoop"
},
"webhcat-site": {
"templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
"templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz",
"templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz",
"templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz",
"templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar"
},
"slider-log4j": {
"content": "log4jproperties\nline2"
},
"slider-env": {
"content": "envproperties\nline2"
},
"gateway-site": {
"java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf",
"gateway.hadoop.kerberos.secured": "false",
"gateway.gateway.conf.dir": "deployments",
"gateway.path": "gateway",
"sun.security.krb5.debug": "true",
"java.security.krb5.conf": "/etc/knox/conf/krb5.conf",
"gateway.port": "8443"
},
"users-ldif": {
"content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n version: 1\n\n # Please replace with site specific values\n dn: dc=hadoop,dc=apache,dc=org\n objectclass: organization\n objectclass: dcObject\n o: Hadoop\n dc: hadoop\n\n # Entry for a sample people container\n # Please replace with site specific values\n dn: ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: people\n\n # Entry for a sample end user\n # Please replace with site specific values\n dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Guest\n sn: User\n uid: guest\n userPassword:guest-password\n\n # entry for sample user admin\n dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: Admin\n sn: Admin\n uid: admin\n userPassword:admin-password\n\n # entry for sample user sam\n dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: sam\n sn: sam\n uid: sam\n userPassword:sam-password\n\n # entry for sample user tom\n dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:person\n objectclass:organizationalPerson\n objectclass:inetOrgPerson\n cn: tom\n sn: tom\n uid: tom\n userPassword:tom-password\n\n # create FIRST Level groups branch\n dn: ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass:organizationalUnit\n ou: groups\n description: generic groups branch\n\n # create the analyst group under groups\n dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: analyst\n description:analyst group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n # create the scientist group under groups\n dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n objectclass:top\n objectclass: groupofnames\n cn: scientist\n description: scientist group\n member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
},
"topology": {
"content": "\n <topology>\n\n <gateway>\n\n <provider>\n <role>authentication</role>\n <name>ShiroProvider</name>\n <enabled>true</enabled>\n <param>\n <name>sessionTimeout</name>\n <value>30</value>\n </param>\n <param>\n <name>main.ldapRealm</name>\n <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n </param>\n <param>\n <name>main.ldapRealm.userDnTemplate</name>\n <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.url</name>\n <value>ldap://{{knox_host_name}}:33389</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n <value>simple</value>\n </param>\n <param>\n <name>urls./**</name>\n <value>authcBasic</value>\n </param>\n </provider>\n\n <provider>\n <role>identity-assertion</role>\n <name>Default</name>\n <enabled>true</enabled>\n </provider>\n\n </gateway>\n\n <service>\n <role>NAMENODE</role>\n <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n </service>\n\n <service>\n <role>JOBTRACKER</role>\n <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n </service>\n\n <service>\n <role>WEBHDFS</role>\n <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n </service>\n\n <service>\n <role>WEBHCAT</role>\n <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n </service>\n\n <service>\n <role>OOZIE</role>\n <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n </service>\n\n <service>\n <role>WEBHBASE</role>\n <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n </service>\n\n <service>\n <role>HIVE</role>\n <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n </service>\n\n <service>\n <role>RESOURCEMANAGER</role>\n <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n </service>\n </topology>"
},
"ldap-log4j": {
"content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #testing\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n\n log4j.rootLogger=ERROR, drfa\n log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n log4j.logger.org.apache.directory=WARN\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
},
"gateway-log4j": {
"content": "\n\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n app.audit.file=${launcher.name}-audit.log\n\n log4j.rootLogger=ERROR, drfa\n\n log4j.logger.org.apache.hadoop.gateway=INFO\n #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n #log4j.logger.org.eclipse.jetty=DEBUG\n #log4j.logger.org.apache.shiro=DEBUG\n #log4j.logger.org.apache.http=DEBUG\n #log4j.logger.org.apache.http.client=DEBUG\n #log4j.logger.org.apache.http.headers=DEBUG\n #log4j.logger.org.apache.http.wire=DEBUG\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n log4j.logger.audit=INFO, auditfile\n log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n log4j.appender.auditfile.Append = true\n log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
},
"knox-env": {
"knox_master_secret": "sa",
"knox_group": "knox",
"knox_pid_dir": "/var/run/knox",
"knox_user": "knox"
},
"kafka-env": {
"content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin",
"kafka_user": "kafka",
"kafka_log_dir": "/var/log/kafka",
"kafka_pid_dir": "/var/run/kafka"
},
"kafka-log4j": {
"content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false"
},
"kafka-broker": {
"log.segment.bytes": "1073741824",
"socket.send.buffer.bytes": "102400",
"num.network.threads": "3",
"log.flush.scheduler.interval.ms": "3000",
"kafka.ganglia.metrics.host": "localhost",
"zookeeper.session.timeout.ms": "6000",
"replica.lag.time.max.ms": "10000",
"num.io.threads": "8",
"kafka.ganglia.metrics.group": "kafka",
"replica.lag.max.messages": "4000",
"port": "6667",
"log.retention.bytes": "-1",
"fetch.purgatory.purge.interval.requests": "10000",
"producer.purgatory.purge.interval.requests": "10000",
"default.replication.factor": "1",
"replica.high.watermark.checkpoint.interval.ms": "5000",
"zookeeper.connect": "c6402.ambari.apache.org:2181",
"controlled.shutdown.retry.backoff.ms": "5000",
"num.partitions": "1",
"log.flush.interval.messages": "10000",
"replica.fetch.min.bytes": "1",
"queued.max.requests": "500",
"controlled.shutdown.max.retries": "3",
"replica.fetch.wait.max.ms": "500",
"controlled.shutdown.enable": "false",
"log.roll.hours": "168",
"log.cleanup.interval.mins": "10",
"replica.socket.receive.buffer.bytes": "65536",
"zookeeper.connection.timeout.ms": "6000",
"replica.fetch.max.bytes": "1048576",
"num.replica.fetchers": "1",
"socket.request.max.bytes": "104857600",
"message.max.bytes": "1000000",
"zookeeper.sync.time.ms": "2000",
"socket.receive.buffer.bytes": "102400",
"controller.message.queue.size": "10",
"log.flush.interval.ms": "3000",
"log.dirs": "/tmp/log/dir",
"controller.socket.timeout.ms": "30000",
"replica.socket.timeout.ms": "30000",
"auto.create.topics.enable": "true",
"log.index.size.max.bytes": "10485760",
"kafka.ganglia.metrics.port": "8649",
"log.index.interval.bytes": "4096",
"log.retention.hours": "168"
},
"spark-defaults": {
"spark.yarn.applicationMaster.waitTries": "10",
"spark.history.kerberos.keytab": "none",
"spark.yarn.preserve.staging.files": "false",
"spark.yarn.submit.file.replication": "3",
"spark.history.kerberos.principal": "none",
"spark.yarn.driver.memoryOverhead": "384",
"spark.yarn.queue": "default",
"spark.yarn.containerLauncherMaxThreads": "25",
"spark.yarn.scheduler.heartbeat.interval-ms": "5000",
"spark.history.ui.port": "18080",
"spark.yarn.max.executor.failures": "3",
"spark.driver.extraJavaOptions": "",
"spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider",
"spark.yarn.am.extraJavaOptions": "",
"spark.yarn.executor.memoryOverhead": "384"
},
"spark-javaopts-properties": {
"content": " "
},
"spark-log4j-properties": {
"content": "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
},
"spark-env": {
"content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n export TEZ_CONF_DIR=/etc/tez/conf\nelse\n export TEZ_CONF_DIR=\nfi",
"spark_pid_dir": "/var/run/spark",
"spark_log_dir": "/var/log/spark",
"spark_group": "spark",
"spark_user": "spark"
},
"spark-metrics-properties": {
"content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
},
"spark-metrics-properties": {
"content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
},
"livy-log4j-properties": {
"content": "\n # Set everything to be logged to the console\n log4j.rootCategory=INFO, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n log4j.logger.org.eclipse.jetty=WARN"
},
"livy-conf": {
"livy.server.port": "8998",
"livy.server.csrf_protection.enabled": "true",
"livy.environment": "production",
"livy.impersonation.enabled": "true",
"livy.server.session.timeout": "3600000"
},
"livy-spark-blacklist": {
"content": "\n #\n # Configuration override / blacklist. Defines a list of properties that users are not allowed\n # to override when starting Spark sessions.\n #\n # This file takes a list of property names (one per line). Empty lines and lines starting with \"#\"\n # are ignored.\n #"
},
"livy-env": {
"livy_group": "livy",
"spark_home": "/usr/hdp/current/spark-client",
"content": "\n #!/usr/bin/env bash\n\n # - SPARK_HOME Spark which you would like to use in livy\n # - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.\n # - LIVY_LOG_DIR Where log files are stored. (Default: ${LIVY_HOME}/logs)\n # - LIVY_PID_DIR Where the pid file is stored. (Default: /tmp)\n # - LIVY_SERVER_JAVA_OPTS Java Opts for running livy server (You can set jvm related setting here, like jvm memory/gc algorithm and etc.)\n export SPARK_HOME=/usr/hdp/current/spark-client\n export HADOOP_CONF_DIR=/etc/hadoop/conf\n export LIVY_LOG_DIR={{livy_log_dir}}\n export LIVY_PID_DIR={{livy_pid_dir}}\n export LIVY_SERVER_JAVA_OPTS=\"-Xmx2g\"",
"livy_pid_dir": "/var/run/livy",
"livy_log_dir": "/var/log/livy",
"livy_user": "livy"
},
"logsearch-solr-env": {
"logsearch_solr_znode": "/logsearch",
"logsearch_solr_user": "solr",
"logsearch_solr_client_log_dir" :"/var/log/ambari-logsearch-solr-client"
},
"logsearch-solr-client-log4j" : {
"content" : "content"
},
"application-properties": {
"atlas.cluster.name" : "c2",
"atlas.rest.address": "http://c6401.ambari.apache.org:21000",
"atlas.graph.storage.backend": "berkeleyje",
"atlas.graph.storage.directory": "data/berkley",
"atlas.graph.index.search.backend": "solr5",
"atlas.graph.index.search.directory": "data/es",
"atlas.graph.index.search.elasticsearch.client-only": false,
"atlas.graph.index.search.elasticsearch.local-mode": true,
"atlas.lineage.hive.table.type.name": "Table",
"atlas.lineage.hive.column.type.name": "Column",
"atlas.lineage.hive.table.column.name": "columns",
"atlas.lineage.hive.process.type.name": "LoadProcess",
"atlas.lineage.hive.process.inputs.name": "inputTables",
"atlas.lineage.hive.process.outputs.name": "outputTables",
"atlas.enableTLS": false,
"atlas.authentication.method": "simple",
"atlas.authentication.principal": "atlas",
"atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab",
"atlas.http.authentication.enabled": false,
"atlas.http.authentication.type": "simple",
"atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
"atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
"atlas.http.authentication.kerberos.name.rules": "DEFAULT",
"atlas.server.http.port" : "21000",
"atlas.notification.embedded" : false,
"atlas.kafka.bootstrap.servers" : "c6401.ambari.apache.org:6667",
"atlas.kafka.data" : "/usr/hdp/current/atlas-server/data/kafka",
"atlas.kafka.entities.group.id" : "entities",
"atlas.kafka.hook.group.id" : "atlas",
"atlas.kafka.zookeeper.connect" : "c6401.ambari.apache.org:2181"
},
"atlas-env": {
"content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}",
"metadata_user": "atlas",
"metadata_port": 21000,
"metadata_pid_dir": "/var/run/atlas",
"metadata_log_dir": "/var/log/atlas",
"metadata_data_dir": "/var/lib/atlas/data",
"metadata_expanded_war_dir": "/var/lib/atlas/server/webapp"
},
"atlas-log4j": {
"content": "<property><name>content</name><description>Custom log4j.properties</description><value></value></property>",
"atlas_log_level": "debug",
"audit_log_level": "OFF"
},
"atlas-solrconfig": {
"content": "<property><name>content</name><description>Custom solrconfig properties</description><value></value></property>"
},
"zoo.cfg": {
"clientPort": "2181"
},
"ranger-hbase-plugin-properties": {
"ranger-hbase-plugin-enabled":"yes"
},
"ranger-hive-plugin-properties": {
"ranger-hive-plugin-enabled":"yes"
},
"ranger-env": {
"xml_configurations_supported" : "true"
},
"tagsync-application-properties": {
"atlas.kafka.hook.group.id": "atlas",
"atlas.kafka.zookeeper.connect": "os-mv-31-dev-4.novalocal:2181",
"atlas.kafka.acks": "1",
"atlas.kafka.entities.group.id": "ranger_entities_consumer",
"atlas.kafka.data": "/usr/hdp/current/atlas-server/data/kafka",
"atlas.kafka.bootstrap.servers": "localhost:2181",
"atlas.notification.embedded": "false"
},
"ranger-tagsync-site": {
"ranger.tagsync.sink.impl.class": "org.apache.ranger.tagsync.sink.tagadmin.TagAdminRESTSink",
"ranger.tagsync.atlasrestsource.endpoint": "",
"ranger.tagsync.tagadmin.rest.ssl.config.file": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
"ranger.tagsync.logdir": "/var/log/ranger/tagsync",
"ranger.tagsync.filesource.filename": "/usr/hdp/current/ranger-tagsync/conf/etc/ranger/data/tags.json",
"ranger.tagsync.enabled": "true",
"ranger.tagsync.tagadmin.rest.url": "{{ranger_external_url}}",
"ranger.tagsync.atlasrestsource.download.interval": "",
"ranger.tagsync.filesource.modtime.check.interval": "60000",
"ranger.tagsync.tagadmin.password": "rangertagsync",
"ranger.tagsync.source.impl.class": "file",
"ranger.tagsync.source.atlas.custom.resource.mappers": "",
"ranger.tagsync.tagadmin.alias": "tagsync.tagadmin",
"ranger.tagsync.tagadmin.keystore": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
"ranger.tagsync.atlas.to.service.mapping": ""
}
},
"configuration_attributes": {
"sqoop-site": {},
"yarn-site": {
"final": {
"yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
"yarn.nodemanager.container-executor.class": "true",
"yarn.nodemanager.local-dirs": "true"
}
},
"yarn-site": {
"final": {
"is_supported_yarn_ranger": "true"
}
},
"hdfs-site": {
"final": {
"dfs.web.ugi": "true",
"dfs.support.append": "true",
"dfs.cluster.administrators": "true"
}
},
"core-site": {
"final": {
"hadoop.proxyuser.hive.groups": "true",
"webinterface.private.actions": "true",
"hadoop.proxyuser.oozie.hosts": "true"
}
},
"knox-env": {},
"gateway-site": {},
"users-ldif": {},
"kafka-env": {},
"kafka-log4j": {},
"kafka-broker": {},
"metadata-env": {},
"atlas-hbase-site": {},
"tagsync-application-properties": {},
"ranger-tagsync-site": {}
},
"configurationTags": {
"slider-client": {
"tag": "version1"
},
"slider-log4j": {
"tag": "version1"
},
"slider-env": {
"tag": "version1"
},
"core-site": {
"tag": "version1"
},
"hdfs-site": {
"tag": "version1"
},
"yarn-site": {
"tag": "version1"
},
"gateway-site": {
"tag": "version1"
},
"topology": {
"tag": "version1"
},
"users-ldif": {
"tag": "version1"
},
"kafka-env": {
"tag": "version1"
},
"kafka-log4j": {
"tag": "version1"
},
"kafka-broker": {
"tag": "version1"
},
"metadata-env": {
"tag": "version1"
},
"tagsync-application-properties": {
"tag": "version1"
},
"ranger-tagsync-site": {
"tag": "version1"
}
},
"commandId": "7-1",
"clusterHostInfo": {
"ambari_server_host": [
"c6401.ambari.apache.org"
],
"all_ping_ports": [
"8670",
"8670"
],
"rm_host": [
"c6402.ambari.apache.org"
],
"all_hosts": [
"c6401.ambari.apache.org",
"c6402.ambari.apache.org"
],
"knox_gateway_hosts": [
"jaimin-knox-1.c.pramod-thangali.internal"
],
"kafka_broker_hosts": [
"c6401.ambari.apache.org"
],
"logsearch_solr_hosts": [
"c6401.ambari.apache.org"
],
"zookeeper_hosts": [
"c6401.ambari.apache.org"
],
"ranger_tagsync_hosts": [
"c6401.ambari.apache.org"
],
"atlas_server_hosts": [
"c6401.ambari.apache.org"
]
}
}