blob: 6a36fac17c6258bcd9dc575104631e5b72506305 [file] [log] [blame]
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
---
# vars file for single_node_vm blueprint
hadoop_master: [NAMENODE, SECONDARY_NAMENODE, RESOURCEMANAGER, HISTORYSERVER]
hadoop_slave: [APP_TIMELINE_SERVER, DATANODE, HDFS_CLIENT, NODEMANAGER, YARN_CLIENT, MAPREDUCE2_CLIENT]
spark_master: [SPARK_JOBHISTORYSERVER]
spark_slave: [SPARK_CLIENT]
storm_master: [NIMBUS, STORM_UI_SERVER, DRPC_SERVER]
storm_slave: [SUPERVISOR]
kafka_broker: [KAFKA_BROKER]
zookeeper_master: [ZOOKEEPER_SERVER]
zookeeper_slave: [ZOOKEEPER_CLIENT]
hbase_master: [HBASE_MASTER, HBASE_CLIENT]
hbase_slave: [HBASE_REGIONSERVER]
es_master: [ES_MASTER]
kibana_master: [KIBANA_MASTER]
metron_indexing: [METRON_INDEXING]
metron_enrichment_master : [METRON_ENRICHMENT_MASTER]
metron_parsers : [METRON_PARSERS]
metron_rest: [METRON_REST]
metron_management_ui: [METRON_MANAGEMENT_UI]
metron_components: >
{{ hadoop_master | union(zookeeper_master) | union(storm_master) | union(hbase_master) | union(hadoop_slave) | union(zookeeper_slave) |
union(storm_slave) | union(kafka_broker) | union(hbase_slave) | union(kibana_master) | union(metron_indexing) |
union(metron_enrichment_master) | union(metron_parsers) | union(metron_rest) | union(metron_management_ui) | union(es_master) }}
cluster_name: "metron_cluster"
blueprint_name: "metron_blueprint"
configurations:
- zoo.cfg:
dataDir: '{{ zookeeper_data_dir }}'
- hadoop-env:
hadoop_heapsize: 1024
namenode_heapsize: 2048
dtnode_heapsize: 512
namenode_opt_permsize: 128m
- hbase-env:
hbase_regionserver_heapsize: 512
hbase_master_heapsize: 512
hbase_regionserver_xmn_max: 512
- hdfs-site:
dfs.replication: 1
dfs.namenode.checkpoint.dir: '{{ namenode_checkpoint_dir }}'
dfs.namenode.name.dir: '{{ namenode_name_dir }}'
dfs.datanode.data.dir: '{{ datanode_data_dir }}'
dfs.journalnode.edits.dir: '{{ journalnode_edits_dir }}'
- yarn-env:
nodemanager_heapsize: 512
yarn_heapsize: 512
apptimelineserver_heapsize : 512
resourcemanager_heapsize: 1024
- mapred-env:
jobhistory_heapsize: 256
- mapred-site:
mapreduce.jobhistory.recovery.store.leveldb.path : '{{ jhs_recovery_store_ldb_path }}'
mapreduce.map.java.opts : '{{ mapred_map_java_opts }}'
mapreduce.reduce.java.opts : '{{ mapred_reduce_java_opts }}'
mapreduce.map.memory.mb : '{{ mapred_map_mem_mb }}'
mapreduce.reduce.memory.mb : '{{ mapred_reduce_mem_mb }}'
- yarn-site:
yarn.nodemanager.local-dirs : '{{ nodemanager_local_dirs }}'
yarn.timeline-service.leveldb-timeline-store.path: '{{ timeline_ldb_store_path }}'
yarn.timeline-service.leveldb-state-store.path: '{{ timeline_ldb_state_path }}'
yarn.nodemanager.log-dirs: '{{ nodemanager_log_dirs }}'
yarn.nodemanager.resource.memory-mb : '{{ nodemanager_mem_mb }}'
- storm-site:
supervisor.slots.ports: "[6700, 6701, 6702, 6703]"
storm.local.dir: '{{ storm_local_dir }}'
topology.classpath: '{{ topology_classpath }}'
- kafka-env:
content: "{% raw %}\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport KAFKA_HEAP_OPTS=\"-Xms256M -Xmx256M\"\nexport KAFKA_JVM_PERFORMANCE_OPTS=\"-server -XX:+UseG1GC -XX:+DisableExplicitGC -Djava.awt.headless=true\"\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport PID_DIR={{kafka_pid_dir}}\nexport LOG_DIR={{kafka_log_dir}}\nexport KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}\n# Add kafka sink to classpath and related depenencies\nif [ -e \"/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\" ]; then\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*\nfi\nif [ -f /etc/kafka/conf/kafka-ranger-env.sh ]; then\n . /etc/kafka/conf/kafka-ranger-env.sh\nfi{% endraw %}"
- kafka-broker:
log.dirs: '{{ kafka_log_dirs }}'
delete.topic.enable: "true"
- metron-env:
parsers: "bro,snort"
metron_spring_profiles_active: "dev"
- elastic-site:
index_number_of_shards: 1
index_number_of_replicas: 0
zen_discovery_ping_unicast_hosts: "[ {{ groups.search | join(', ') }} ]"
gateway_recover_after_data_nodes: 1
network_host: "[ _local_, {{ elasticsearch_network_interface }} ]"
masters_also_are_datanodes: "1"
required_configurations:
- metron-env:
storm_rest_addr: "http://{{ groups.ambari_slave[0] }}:8744"
es_hosts: "{{ groups.search | join(',') }}"
zeppelin_server_url: "{{ groups.zeppelin[0] }}:9995"
metron_jdbc_driver: "org.h2.Driver"
metron_jdbc_url: "jdbc:h2:file:~/metrondb"
metron_jdbc_username: "root"
metron_jdbc_password: "root"
metron_jdbc_platform: "h2"
- kibana-env:
kibana_pid_dir: /var/run/kibana
kibana_es_url: http://{{ groups.search[0] }}:9200
kibana_log_dir: /var/log/kibana
kibana_server_port: 5000
kibana_default_application: "dashboard/Metron-Dashboard"
blueprint:
stack_name: HDP
stack_version: "{{ hdp_stack }}"
required_configurations: "{{ required_configurations }}"
groups:
- name : host_group_1
cardinality: 1
configurations: []
components: "{{ metron_components }}"
hosts: "{{ hdp_host_group }}"