| { |
| "configuration_attributes": { |
| "ranger-knox-plugin-properties": {}, |
| "gateway-log4j": {}, |
| "ranger-hdfs-plugin-properties": {}, |
| "hbase-policy": {}, |
| "kerberos-env": {}, |
| "storm-site": {}, |
| "hdfs-site": {}, |
| "storm-env": {}, |
| "hbase-site": {}, |
| "knox-env": {}, |
| "hadoop-policy": {}, |
| "hdfs-log4j": {}, |
| "ranger-hbase-plugin-properties": {}, |
| "krb5-conf": {}, |
| "ldap-log4j": {}, |
| "core-site": {}, |
| "hadoop-env": {}, |
| "zookeeper-log4j": {}, |
| "topology": {}, |
| "hbase-log4j": {}, |
| "oozie-site": {}, |
| "gateway-site": {}, |
| "hbase-env": {}, |
| "zookeeper-env": {}, |
| "zoo.cfg": {}, |
| "ranger-storm-plugin-properties": {}, |
| "webhcat-site": {}, |
| "users-ldif": {}, |
| "cluster-env": {} |
| }, |
| "commandParams": { |
| "service_package_folder": "common-services/HBASE/0.96.0.2.0/package", |
| "script": "scripts/hbase_regionserver.py", |
| "hooks_folder": "HDP/2.0.6/hooks", |
| "version": "2.3.0.0-1606", |
| "excluded_hosts": "host1", |
| "command_timeout": "900", |
| "script_type": "PYTHON" |
| }, |
| "roleCommand": "CUSTOM_COMMAND", |
| "kerberosCommandParams": [], |
| "clusterName": "c1", |
| "hostname": "c6405.ambari.apache.org", |
| "hostLevelParams": { |
| "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", |
| "ambari_db_rca_password": "mapred", |
| "java_home": "/usr/jdk64/jdk1.8.0_40", |
| "ambari_db_rca_url": "jdbc:postgresql://c6405.ambari.apache.org/ambarirca", |
| "jce_name": "jce_policy-8.zip", |
| "custom_command": "RESTART", |
| "oracle_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//ojdbc6.jar", |
| "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.3\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]", |
| "group_list": "[\"hadoop\",\"users\",\"knox\"]", |
| "agentCacheDir": "/var/lib/ambari-agent/cache", |
| "stack_version": "2.3", |
| "stack_name": "HDP", |
| "db_name": "ambari", |
| "jdk_name": "jdk-8u40-linux-x64.tar.gz", |
| "ambari_db_rca_driver": "org.postgresql.Driver", |
| "java_version": "8", |
| "ambari_db_rca_username": "mapred", |
| "db_driver_filename": "mysql-connector-java.jar", |
| "user_list": "[\"storm\",\"zookeeper\",\"ambari-qa\",\"hdfs\",\"hbase\",\"knox\"]", |
| "mysql_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//mysql-connector-java.jar", |
| "clientsToUpdateConfigs": "[\"*\"]" |
| }, |
| "commandType": "EXECUTION_COMMAND", |
| "roleParams": { |
| "component_category": "SLAVE" |
| }, |
| "serviceName": "HBASE", |
| "role": "HBASE_REGIONSERVER", |
| "forceRefreshConfigTags": [], |
| "taskId": 115, |
| "public_hostname": "c6405.ambari.apache.org", |
| "configurations": { |
| "ranger-knox-plugin-properties": { |
| "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", |
| "KNOX_HOME": "/usr/hdp/current/knox-server", |
| "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", |
| "common.name.for.certificate": "-", |
| "XAAUDIT.HDFS.IS_ENABLED": "false", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", |
| "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", |
| "XAAUDIT.DB.IS_ENABLED": "true", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", |
| "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", |
| "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", |
| "ranger-knox-plugin-enabled": "No", |
| "policy_user": "ambari-qa", |
| "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", |
| "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", |
| "SSL_TRUSTSTORE_PASSWORD": "changeit", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", |
| "REPOSITORY_CONFIG_USERNAME": "admin", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", |
| "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", |
| "REPOSITORY_CONFIG_PASSWORD": "admin-password" |
| }, |
| "gateway-log4j": { |
| "content": "\n\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n app.audit.file=${launcher.name}-audit.log\n\n log4j.rootLogger=ERROR, drfa\n\n log4j.logger.org.apache.hadoop.gateway=INFO\n #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n #log4j.logger.org.eclipse.jetty=DEBUG\n #log4j.logger.org.apache.shiro=DEBUG\n #log4j.logger.org.apache.http=DEBUG\n #log4j.logger.org.apache.http.client=DEBUG\n #log4j.logger.org.apache.http.headers=DEBUG\n #log4j.logger.org.apache.http.wire=DEBUG\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n log4j.logger.audit=INFO, auditfile\n log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n log4j.appender.auditfile.Append = true\n log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout" |
| }, |
| "ranger-hdfs-plugin-properties": { |
| "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", |
| "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", |
| "common.name.for.certificate": "-", |
| "XAAUDIT.HDFS.IS_ENABLED": "false", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", |
| "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", |
| "XAAUDIT.DB.IS_ENABLED": "true", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", |
| "hadoop.rpc.protection": "-", |
| "ranger-hdfs-plugin-enabled": "No", |
| "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", |
| "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", |
| "policy_user": "ambari-qa", |
| "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", |
| "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", |
| "SSL_TRUSTSTORE_PASSWORD": "changeit", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", |
| "REPOSITORY_CONFIG_USERNAME": "hadoop", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", |
| "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", |
| "REPOSITORY_CONFIG_PASSWORD": "hadoop" |
| }, |
| "hbase-policy": { |
| "security.masterregion.protocol.acl": "*", |
| "security.admin.protocol.acl": "*", |
| "security.client.protocol.acl": "*" |
| }, |
| "kerberos-env": { |
| "kdc_host": "c6405.ambari.apache.org", |
| "ad_create_attributes_template": "\n{\n \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n \"cn\": \"$principal_name\",\n #if( $is_service )\n \"servicePrincipalName\": \"$principal_name\",\n #end\n \"userPrincipalName\": \"$normalized_principal\",\n \"unicodePwd\": \"$password\",\n \"accountExpires\": \"0\",\n \"userAccountControl\": \"66048\"\n}\n ", |
| "realm": "EXAMPLE.COM", |
| "container_dn": "", |
| "ldap_url": "", |
| "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", |
| "admin_server_host": "c6405.ambari.apache.org", |
| "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", |
| "kdc_type": "mit-kdc", |
| "manage_identities": "true" |
| }, |
| "storm-site": { |
| "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", |
| "topology.workers": "1", |
| "drpc.worker.threads": "64", |
| "storm.messaging.netty.client_worker_threads": "1", |
| "supervisor.heartbeat.frequency.secs": "5", |
| "topology.executor.send.buffer.size": "1024", |
| "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", |
| "nimbus.thrift.port": "6627", |
| "storm.zookeeper.retry.intervalceiling.millis": "30000", |
| "storm.local.dir": "/hadoop/storm", |
| "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}", |
| "topology.receiver.buffer.size": "8", |
| "storm.zookeeper.servers": "['c6405.ambari.apache.org']", |
| "transactional.zookeeper.root": "/transactional", |
| "drpc.authorizer": "backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer", |
| "_storm.min.ruid": "null", |
| "drpc.request.timeout.secs": "600", |
| "topology.skip.missing.kryo.registrations": "false", |
| "worker.heartbeat.frequency.secs": "1", |
| "zmq.hwm": "0", |
| "storm.zookeeper.connection.timeout": "15000", |
| "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin", |
| "storm.messaging.netty.server_worker_threads": "1", |
| "supervisor.worker.start.timeout.secs": "120", |
| "zmq.threads": "1", |
| "topology.acker.executors": "null", |
| "storm.local.mode.zmq": "false", |
| "topology.max.task.parallelism": "null", |
| "topology.max.error.report.per.interval": "5", |
| "topology.debug": "false", |
| "drpc.queue.size": "128", |
| "storm.principal.tolocal": "backtype.storm.security.auth.KerberosPrincipalToLocal", |
| "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM", |
| "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf", |
| "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM", |
| "storm.zookeeper.retry.times": "5", |
| "nimbus.monitor.freq.secs": "10", |
| "storm.cluster.mode": "distributed", |
| "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", |
| "drpc.invocations.port": "3773", |
| "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin", |
| "storm.zookeeper.root": "/storm", |
| "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", |
| "transactional.zookeeper.port": "null", |
| "topology.worker.childopts": "null", |
| "topology.max.spout.pending": "null", |
| "nimbus.cleanup.inbox.freq.secs": "600", |
| "storm.messaging.netty.min_wait_ms": "100", |
| "nimbus.task.timeout.secs": "30", |
| "nimbus.thrift.max_buffer_size": "1048576", |
| "topology.sleep.spout.wait.strategy.time.ms": "1", |
| "topology.optimize": "true", |
| "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}", |
| "nimbus.reassign": "true", |
| "storm.messaging.transport": "backtype.storm.messaging.netty.Context", |
| "logviewer.appender.name": "A1", |
| "nimbus.supervisor.users": "['{{storm_user}}']", |
| "nimbus.host": "c6405.ambari.apache.org", |
| "ui.port": "8744", |
| "supervisor.slots.ports": "[6700, 6701]", |
| "nimbus.file.copy.expiration.secs": "600", |
| "supervisor.monitor.frequency.secs": "3", |
| "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", |
| "transactional.zookeeper.servers": "null", |
| "zmq.linger.millis": "5000", |
| "topology.error.throttle.interval.secs": "10", |
| "topology.worker.shared.thread.pool.size": "4", |
| "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", |
| "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", |
| "task.heartbeat.frequency.secs": "3", |
| "topology.transfer.buffer.size": "1024", |
| "storm.zookeeper.session.timeout": "20000", |
| "nimbus.admins": "['{{storm_user}}']", |
| "topology.executor.receive.buffer.size": "1024", |
| "topology.stats.sample.rate": "0.05", |
| "topology.fall.back.on.java.serialization": "true", |
| "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM", |
| "topology.enable.message.timeouts": "true", |
| "storm.messaging.netty.max_wait_ms": "1000", |
| "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", |
| "nimbus.supervisor.timeout.secs": "60", |
| "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", |
| "storm.messaging.netty.buffer_size": "5242880", |
| "drpc.port": "3772", |
| "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", |
| "storm.zookeeper.retry.interval": "1000", |
| "nimbus.authorizer": "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer", |
| "storm.messaging.netty.max_retries": "30", |
| "topology.tick.tuple.freq.secs": "null", |
| "supervisor.enable": "true", |
| "nimbus.task.launch.secs": "120", |
| "task.refresh.poll.secs": "10", |
| "topology.message.timeout.secs": "30", |
| "nimbus.inbox.jar.expiration.secs": "3600", |
| "topology.state.synchronization.timeout.secs": "60", |
| "supervisor.worker.timeout.secs": "30", |
| "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter", |
| "topology.trident.batch.emit.interval.millis": "500", |
| "topology.builtin.metrics.bucket.size.secs": "60", |
| "storm.thrift.transport": "{{storm_thrift_transport}}", |
| "logviewer.port": "8000", |
| "storm.log.dir": "{{log_dir}}", |
| "storm.zookeeper.port": "2181" |
| }, |
| "hdfs-site": { |
| "dfs.namenode.checkpoint.period": "21600", |
| "dfs.namenode.avoid.write.stale.datanode": "true", |
| "dfs.permissions.superusergroup": "hdfs", |
| "nfs.file.dump.dir": "/tmp/.hdfs-nfs", |
| "dfs.namenode.startup.delay.block.deletion.sec": "3600", |
| "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", |
| "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", |
| "dfs.heartbeat.interval": "3", |
| "dfs.block.access.token.enable": "true", |
| "dfs.support.append": "true", |
| "dfs.datanode.address": "0.0.0.0:1019", |
| "dfs.cluster.administrators": " hdfs", |
| "dfs.datanode.balance.bandwidthPerSec": "6250000", |
| "dfs.namenode.safemode.threshold-pct": "1.0f", |
| "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", |
| "dfs.namenode.rpc-address": "c6405.ambari.apache.org:8020", |
| "dfs.permissions.enabled": "true", |
| "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", |
| "dfs.client.read.shortcircuit": "true", |
| "dfs.https.port": "50470", |
| "dfs.namenode.https-address": "c6405.ambari.apache.org:50470", |
| "dfs.blockreport.initialDelay": "120", |
| "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", |
| "dfs.blocksize": "134217728", |
| "dfs.datanode.max.transfer.threads": "16384", |
| "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", |
| "dfs.replication": "3", |
| "dfs.namenode.handler.count": "25", |
| "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", |
| "fs.permissions.umask-mode": "022", |
| "dfs.namenode.stale.datanode.interval": "30000", |
| "dfs.datanode.ipc.address": "0.0.0.0:8010", |
| "dfs.datanode.data.dir": "/hadoop/hdfs/data", |
| "dfs.namenode.http-address": "c6405.ambari.apache.org:50070", |
| "dfs.webhdfs.enabled": "true", |
| "dfs.datanode.failed.volumes.tolerated": "0", |
| "dfs.namenode.accesstime.precision": "0", |
| "dfs.datanode.https.address": "0.0.0.0:50475", |
| "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", |
| "dfs.namenode.secondary.http-address": "c6405.ambari.apache.org:50090", |
| "nfs.exports.allowed.hosts": "* rw", |
| "dfs.namenode.checkpoint.txns": "1000000", |
| "dfs.datanode.http.address": "0.0.0.0:1022", |
| "dfs.datanode.du.reserved": "1073741824", |
| "dfs.client.read.shortcircuit.streams.cache.size": "4096", |
| "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", |
| "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", |
| "dfs.http.policy": "HTTP_ONLY", |
| "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", |
| "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", |
| "dfs.namenode.name.dir.restore": "true", |
| "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", |
| "dfs.journalnode.https-address": "0.0.0.0:8481", |
| "dfs.journalnode.http-address": "0.0.0.0:8480", |
| "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", |
| "dfs.namenode.avoid.read.stale.datanode": "true", |
| "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", |
| "dfs.datanode.data.dir.perm": "750", |
| "dfs.namenode.write.stale.datanode.ratio": "1.0f", |
| "dfs.replication.max": "50", |
| "dfs.namenode.name.dir": "/hadoop/hdfs/namenode" |
| }, |
| "storm-env": { |
| "jmxremote_port": "56431", |
| "storm_ui_keytab": "/etc/security/keytabs/spnego.service.keytab", |
| "storm_log_dir": "/var/log/storm", |
| "storm_principal_name": "storm@EXAMPLE.COM", |
| "storm_pid_dir": "/var/run/storm", |
| "storm_ui_principal_name": "HTTP/_HOST@EXAMPLE.COM", |
| "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM", |
| "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab", |
| "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"\nexport STORM_HOME=/usr/hdp/current/storm-client", |
| "storm_keytab": "/etc/security/keytabs/storm.headless.keytab", |
| "storm_user": "storm" |
| }, |
| "hbase-site": { |
| "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", |
| "hbase.master.info.bindAddress": "0.0.0.0", |
| "hbase.regionserver.port": "16020", |
| "hbase.client.keyvalue.maxsize": "1048576", |
| "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab", |
| "phoenix.queryserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab", |
| "hbase.hstore.compactionThreshold": "3", |
| "hbase.hregion.majorcompaction.jitter": "0.50", |
| "hbase.client.retries.number": "35", |
| "hbase.bulkload.staging.dir": "/apps/hbase/staging", |
| "hbase.rootdir": "hdfs://c6405.ambari.apache.org:8020/apps/hbase/data", |
| "hbase.rpc.timeout": "60000", |
| "hbase.regionserver.handler.count": "30", |
| "hbase.hregion.majorcompaction": "604800000", |
| "hbase.rpc.protection": "authentication", |
| "hbase.bucketcache.size": "", |
| "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", |
| "hbase.bucketcache.percentage.in.combinedcache": "", |
| "hbase.hregion.memstore.flush.size": "134217728", |
| "hbase.superuser": "hbase", |
| "hbase.regionserver.global.memstore.lowerLimit": "0.38", |
| "hbase.zookeeper.property.clientPort": "2181", |
| "hbase.hregion.max.filesize": "1073741824", |
| "hbase.regionserver.global.memstore.upperLimit": "0.4", |
| "hbase.bucketcache.ioengine": "", |
| "zookeeper.session.timeout": "90000", |
| "hbase.regionserver.global.memstore.size": "${hbase.regionserver.global.memstore.upperLimit}", |
| "hbase.tmp.dir": "/hadoop/hbase", |
| "hfile.block.cache.size": "0.40", |
| "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", |
| "phoenix.queryserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", |
| "hbase.client.scanner.caching": "100", |
| "hbase.security.authentication": "kerberos", |
| "hbase.defaults.for.version.skip": "true", |
| "hbase.master.info.port": "60010", |
| "hbase.zookeeper.quorum": "c6405.ambari.apache.org", |
| "hbase.regionserver.info.port": "16030", |
| "zookeeper.znode.parent": "/hbase-secure", |
| "hbase.zookeeper.useMulti": "true", |
| "hbase.hstore.blockingStoreFiles": "10", |
| "hbase.master.port": "16000", |
| "hbase.security.authorization": "true", |
| "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab", |
| "phoenix.query.timeoutMs": "60000", |
| "hbase.local.dir": "${hbase.tmp.dir}/local", |
| "hbase.cluster.distributed": "true", |
| "hbase.hregion.memstore.mslab.enabled": "true", |
| "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", |
| "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController", |
| "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController", |
| "hbase.hregion.memstore.block.multiplier": "4" |
| }, |
| "knox-env": { |
| "knox_master_secret": "password", |
| "knox_pid_dir": "/var/run/knox", |
| "knox_keytab_path": "/etc/security/keytabs/knox.service.keytab", |
| "knox_group": "knox", |
| "knox_user": "knox", |
| "knox_principal_name": "knox/_HOST@EXAMPLE.COM" |
| }, |
| "hadoop-policy": { |
| "security.job.client.protocol.acl": "*", |
| "security.job.task.protocol.acl": "*", |
| "security.datanode.protocol.acl": "*", |
| "security.namenode.protocol.acl": "*", |
| "security.client.datanode.protocol.acl": "*", |
| "security.inter.tracker.protocol.acl": "*", |
| "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", |
| "security.client.protocol.acl": "*", |
| "security.refresh.policy.protocol.acl": "hadoop", |
| "security.admin.operations.protocol.acl": "hadoop", |
| "security.inter.datanode.protocol.acl": "*" |
| }, |
| "hdfs-log4j": { |
| "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN" |
| }, |
| "ranger-hbase-plugin-properties": { |
| "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", |
| "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", |
| "common.name.for.certificate": "-", |
| "XAAUDIT.HDFS.IS_ENABLED": "false", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", |
| "ranger-hbase-plugin-enabled": "No", |
| "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", |
| "XAAUDIT.DB.IS_ENABLED": "true", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", |
| "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", |
| "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", |
| "policy_user": "ambari-qa", |
| "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", |
| "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", |
| "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", |
| "SSL_TRUSTSTORE_PASSWORD": "changeit", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", |
| "REPOSITORY_CONFIG_USERNAME": "hbase", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", |
| "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", |
| "REPOSITORY_CONFIG_PASSWORD": "hbase" |
| }, |
| "krb5-conf": { |
| "domains": "EXAMPLE.COM", |
| "manage_krb5_conf": "true", |
| "content": "\n[libdefaults]\n renew_lifetime = 7d\n forwardable = true\n default_realm = {{realm|upper()}}\n ticket_lifetime = 24h\n dns_lookup_realm = false\n dns_lookup_kdc = false\n #default_tgs_enctypes = {{encryption_types}}\n #default_tkt_enctypes = {{encryption_types}}\n\n{% if domains %}\n[domain_realm]\n{% for domain in domains.split(',') %}\n {{domain}} = {{realm|upper()}}\n{% endfor %}\n{% endif %}\n\n[logging]\n default = FILE:/var/log/krb5kdc.log\n admin_server = FILE:/var/log/kadmind.log\n kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n {{realm}} = {\n admin_server = {{admin_server_host|default(kdc_host, True)}}\n kdc = {{kdc_host}}\n }\n\n{# Append additional realm declarations below #}\n ", |
| "conf_dir": "/etc" |
| }, |
| "ldap-log4j": { |
| "content": "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n\n log4j.rootLogger=ERROR, drfa\n log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n log4j.logger.org.apache.directory=WARN\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n" |
| }, |
| "core-site": { |
| "hadoop.http.authentication.cookie.domain": "", |
| "proxyuser_group": "users", |
| "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", |
| "hadoop.proxyuser.hdfs.groups": "*", |
| "fs.trash.interval": "360", |
| "hadoop.http.authentication.signer.secret.provider.object": "", |
| "ipc.server.tcpnodelay": "true", |
| "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", |
| "hadoop.proxyuser.knox.hosts": "c6405.ambari.apache.org", |
| "ipc.client.idlethreshold": "8000", |
| "hadoop.http.authentication.cookie.path": "", |
| "hadoop.http.authentication.signer.secret.provider": "", |
| "hadoop.http.authentication.signature.secret": "", |
| "hadoop.rpc.protection": "authentication", |
| "io.file.buffer.size": "131072", |
| "hadoop.security.authentication": "kerberos", |
| "hadoop.http.filter.initializers": "", |
| "mapreduce.jobtracker.webinterface.trusted": "false", |
| "hadoop.proxyuser.knox.groups": "users", |
| "hadoop.proxyuser.hdfs.hosts": "*", |
| "hadoop.proxyuser.HTTP.groups": "users", |
| "fs.defaultFS": "hdfs://c6405.ambari.apache.org:8020", |
| "ha.failover-controller.active-standby-elector.zk.op.retries": "120", |
| "hadoop.http.authentication.signature.secret.file": "", |
| "hadoop.http.authentication.token.validity": "", |
| "hadoop.http.authentication.type": "simple", |
| "hadoop.security.authorization": "true", |
| "hadoop.http.authentication.simple.anonymous.allowed": "true", |
| "ipc.client.connect.max.retries": "50", |
| "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[1:$1@$0](hdfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](knox@EXAMPLE.COM)s/.*/knox/\nRULE:[2:$1@$0](nfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nDEFAULT", |
| "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", |
| "hadoop.http.authentication.kerberos.name.rules": "", |
| "ipc.client.connection.maxidletime": "30000" |
| }, |
| "hadoop-env": { |
| "proxyuser_group": "users", |
| "hdfs_log_dir_prefix": "/var/log/hadoop", |
| "hdfs_user": "hdfs", |
| "namenode_opt_maxnewsize": "256m", |
| "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", |
| "namenode_opt_maxpermsize": "256m", |
| "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", |
| "namenode_heapsize": "1024m", |
| "namenode_opt_newsize": "256m", |
| "nfsgateway_heapsize": "1024", |
| "dtnode_heapsize": "1024m", |
| "hadoop_root_logger": "INFO,RFA", |
| "hadoop_heapsize": "1024", |
| "hadoop_pid_dir_prefix": "/var/run/hadoop", |
| "namenode_opt_permsize": "128m", |
| "hdfs_principal_name": "hdfs@EXAMPLE.COM" |
| }, |
| "zookeeper-log4j": { |
| "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n" |
| }, |
| "topology": { |
| "content": "\n <topology>\n\n <gateway>\n\n <provider>\n <role>authentication</role>\n <name>ShiroProvider</name>\n <enabled>true</enabled>\n <param>\n <name>sessionTimeout</name>\n <value>30</value>\n </param>\n <param>\n <name>main.ldapRealm</name>\n <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n </param>\n <param>\n <name>main.ldapRealm.userDnTemplate</name>\n <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.url</name>\n <value>ldap://{{knox_host_name}}:33389</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n <value>simple</value>\n </param>\n <param>\n <name>urls./**</name>\n <value>authcBasic</value>\n </param>\n </provider>\n\n <provider>\n <role>identity-assertion</role>\n <name>Default</name>\n <enabled>true</enabled>\n </provider>\n\n <provider>\n <role>authorization</role>\n <name>AclsAuthz</name>\n <enabled>true</enabled>\n </provider>\n\n </gateway>\n\n <service>\n <role>NAMENODE</role>\n <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n </service>\n\n <service>\n <role>JOBTRACKER</role>\n <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n </service>\n\n <service>\n <role>WEBHDFS</role>\n <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n </service>\n\n <service>\n <role>WEBHCAT</role>\n <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n </service>\n\n <service>\n <role>OOZIE</role>\n <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n </service>\n\n <service>\n <role>WEBHBASE</role>\n <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n </service>\n\n <service>\n <role>HIVE</role>\n <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n </service>\n\n <service>\n <role>RESOURCEMANAGER</role>\n <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n </service>\n </topology>" |
| }, |
| "hbase-log4j": { |
| "content": "log4jproperties\nline2" |
| }, |
| "oozie-site": { |
| "oozie.service.ProxyUserService.proxyuser.knox.hosts": "c6405.ambari.apache.org", |
| "oozie.service.ProxyUserService.proxyuser.knox.groups": "users" |
| }, |
| "gateway-site": { |
| "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf", |
| "gateway.hadoop.kerberos.secured": "true", |
| "gateway.gateway.conf.dir": "deployments", |
| "gateway.path": "gateway", |
| "sun.security.krb5.debug": "true", |
| "gateway.port": "8443", |
| "java.security.krb5.conf": "/etc/krb5.conf" |
| }, |
| "hbase-env": { |
| "hbase_pid_dir": "/var/run/hbase", |
| "hbase_regionserver_xmn_max": "512", |
| "hbase_regionserver_xmn_ratio": "0.2", |
| "hbase_user": "hbase", |
| "hbase_master_heapsize": "1024m", |
| "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n{% endif %}", |
| "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", |
| "hbase_regionserver_heapsize": "1024m", |
| "hbase_log_dir": "/var/log/hbase", |
| "hbase_max_direct_memory_size": "", |
| "hbase_principal_name": "hbase@EXAMPLE.COM" |
| }, |
| "zookeeper-env": { |
| "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", |
| "zk_user": "zookeeper", |
| "zk_log_dir": "/var/log/zookeeper", |
| "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", |
| "zk_pid_dir": "/var/run/zookeeper", |
| "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM" |
| }, |
| "zoo.cfg": { |
| "clientPort": "2181", |
| "autopurge.purgeInterval": "24", |
| "syncLimit": "5", |
| "dataDir": "/hadoop/zookeeper", |
| "initLimit": "10", |
| "tickTime": "2000", |
| "autopurge.snapRetainCount": "30" |
| }, |
| "ranger-storm-plugin-properties": { |
| "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", |
| "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", |
| "common.name.for.certificate": "-", |
| "XAAUDIT.HDFS.IS_ENABLED": "false", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", |
| "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", |
| "XAAUDIT.DB.IS_ENABLED": "true", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", |
| "ranger-storm-plugin-enabled": "No", |
| "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", |
| "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", |
| "policy_user": "storm", |
| "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", |
| "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", |
| "SSL_TRUSTSTORE_PASSWORD": "changeit", |
| "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", |
| "REPOSITORY_CONFIG_USERNAME": "stormtestuser@EXAMPLE.COM", |
| "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", |
| "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", |
| "REPOSITORY_CONFIG_PASSWORD": "stormtestuser" |
| }, |
| "webhcat-site": { |
| "webhcat.proxyuser.knox.hosts": "c6405.ambari.apache.org", |
| "webhcat.proxyuser.knox.groups": "users" |
| }, |
| "users-ldif": { |
| "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nversion: 1\n\n# Please replace with site specific values\ndn: dc=hadoop,dc=apache,dc=org\nobjectclass: organization\nobjectclass: dcObject\no: Hadoop\ndc: hadoop\n\n# Entry for a sample people container\n# Please replace with site specific values\ndn: ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: people\n\n# Entry for a sample end user\n# Please replace with site specific values\ndn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Guest\nsn: User\nuid: guest\nuserPassword:guest-password\n\n# entry for sample user admin\ndn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Admin\nsn: Admin\nuid: admin\nuserPassword:admin-password\n\n# entry for sample user sam\ndn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: sam\nsn: sam\nuid: sam\nuserPassword:sam-password\n\n# entry for sample user tom\ndn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: tom\nsn: tom\nuid: tom\nuserPassword:tom-password\n\n# create FIRST Level groups branch\ndn: ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: groups\ndescription: generic groups branch\n\n# create the analyst group under groups\ndn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: analyst\ndescription:analyst group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nmember: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n# create the scientist group under groups\ndn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: scientist\ndescription: scientist group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org" |
| }, |
| "cluster-env": { |
| "security_enabled": "true", |
| "ignore_groupsusers_create": "false", |
| "kerberos_domain": "EXAMPLE.COM", |
| "user_group": "hadoop", |
| "smokeuser": "ambari-qa", |
| "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM", |
| "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab" |
| } |
| }, |
| "configurationTags": { |
| "ranger-knox-plugin-properties": { |
| "tag": "version1429063570674" |
| }, |
| "gateway-log4j": { |
| "tag": "version1429063570674" |
| }, |
| "ranger-hdfs-plugin-properties": { |
| "tag": "version1" |
| }, |
| "hbase-policy": { |
| "tag": "version1" |
| }, |
| "kerberos-env": { |
| "tag": "version1429065728624" |
| }, |
| "storm-site": { |
| "tag": "version1429066050500" |
| }, |
| "hdfs-site": { |
| "tag": "version1429066050308" |
| }, |
| "storm-env": { |
| "tag": "version1429066050460" |
| }, |
| "hbase-site": { |
| "tag": "version1429066050414" |
| }, |
| "knox-env": { |
| "tag": "version1429066050383" |
| }, |
| "hadoop-policy": { |
| "tag": "version1" |
| }, |
| "hdfs-log4j": { |
| "tag": "version1" |
| }, |
| "ranger-hbase-plugin-properties": { |
| "tag": "version1" |
| }, |
| "krb5-conf": { |
| "tag": "version1429065728624" |
| }, |
| "ldap-log4j": { |
| "tag": "version1429063570674" |
| }, |
| "core-site": { |
| "tag": "version1429066050580" |
| }, |
| "hadoop-env": { |
| "tag": "version1429066050476" |
| }, |
| "zookeeper-log4j": { |
| "tag": "version1" |
| }, |
| "topology": { |
| "tag": "version1429063570674" |
| }, |
| "hbase-log4j": { |
| "tag": "version1" |
| }, |
| "oozie-site": { |
| "tag": "version1" |
| }, |
| "gateway-site": { |
| "tag": "version1429066050442" |
| }, |
| "hbase-env": { |
| "tag": "version1429066050343" |
| }, |
| "zookeeper-env": { |
| "tag": "version1429066050522" |
| }, |
| "zoo.cfg": { |
| "tag": "version1" |
| }, |
| "ranger-storm-plugin-properties": { |
| "tag": "version1429063043219" |
| }, |
| "webhcat-site": { |
| "tag": "version1" |
| }, |
| "users-ldif": { |
| "tag": "version1429063570674" |
| }, |
| "cluster-env": { |
| "tag": "version1429066050554" |
| } |
| }, |
| "commandId": "17-0", |
| "clusterHostInfo": { |
| "snamenode_host": [ |
| "c6405.ambari.apache.org" |
| ], |
| "drpc_server_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "nimbus_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "all_ping_ports": [ |
| "8670" |
| ], |
| "knox_gateway_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "all_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "hbase_rs_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "slave_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "namenode_host": [ |
| "c6405.ambari.apache.org" |
| ], |
| "hbase_master_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "storm_ui_server_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "all_racks": [ |
| "/default-rack" |
| ], |
| "all_ipv4_ips": [ |
| "192.168.64.105" |
| ], |
| "ambari_server_host": [ |
| "c6405.ambari.apache.org" |
| ], |
| "zookeeper_hosts": [ |
| "c6405.ambari.apache.org" |
| ], |
| "supervisor_hosts": [ |
| "c6405.ambari.apache.org" |
| ] |
| } |
| } |