Merge branch 'master' into feature/METRON-1416-upgrade-solr
diff --git a/dependencies_with_url.csv b/dependencies_with_url.csv
index 438ce3e..6ac1f23 100644
--- a/dependencies_with_url.csv
+++ b/dependencies_with_url.csv
@@ -22,6 +22,7 @@
 com.flipkart.zjsonpatch:zjsonpatch:jar:0.3.4:compile,Apache v2, https://github.com/flipkart-incubator/zjsonpatch
 com.google.protobuf:protobuf-java:jar:2.5.0:compile,New BSD license,http://code.google.com/p/protobuf
 com.google.protobuf:protobuf-java:jar:2.6.1:compile,New BSD license,http://code.google.com/p/protobuf
+com.google.protobuf:protobuf-java:jar:3.1.0:compile,New BSD license,http://code.google.com/p/protobuf
 com.jcraft:jsch:jar:0.1.42:compile,BSD,http://www.jcraft.com/jsch/
 com.jayway.jsonpath:json-path:jar:2.3.0:compile,Apache v2,https://github.com/json-path/JsonPath
 com.jayway.jsonpath:json-path:jar:2.4.0:compile,Apache v2,https://github.com/json-path/JsonPath
@@ -85,6 +86,7 @@
 org.krakenapps:kraken-api:jar:2.1.1:compile, Apache v2,
 org.krakenapps:kraken-pcap:jar:1.7.1:compile, Apache v2,
 org.ow2.asm:asm:jar:4.0:compile,BSD,http://asm.ow2.org/
+org.ow2.asm:asm:jar:5.1:compile,BSD,http://asm.ow2.org/
 org.slf4j:slf4j-api:jar:1.6.1:compile,MIT,http://www.slf4j.org
 org.slf4j:slf4j-api:jar:1.7.10:compile,MIT,http://www.slf4j.org
 org.slf4j:slf4j-api:jar:1.7.5:compile,MIT,http://www.slf4j.org
@@ -100,6 +102,7 @@
 org.slf4j:slf4j-log4j12:jar:1.7.5:compile,MIT,http://www.slf4j.org
 org.slf4j:slf4j-log4j12:jar:1.7.7:compile,MIT,http://www.slf4j.org
 org.slf4j:slf4j-simple:jar:1.7.7:compile,MIT,http://www.slf4j.org
+org.slf4j:jcl-over-slf4j:jar:1.7.7:compile,MIT,http://www.slf4j.org
 org.slf4j:jcl-over-slf4j:jar:1.7.21:compile,MIT,http://www.slf4j.org
 org.slf4j:jul-to-slf4j:jar:1.7.21:compile,MIT,http://www.slf4j.org
 org.slf4j:jul-to-slf4j:jar:1.7.25:compile,MIT,http://www.slf4j.org
diff --git a/metron-analytics/metron-profiler/.gitignore b/metron-analytics/metron-profiler/.gitignore
new file mode 100644
index 0000000..df1a13b
--- /dev/null
+++ b/metron-analytics/metron-profiler/.gitignore
@@ -0,0 +1 @@
+/logs
\ No newline at end of file
diff --git a/metron-deployment/ansible/playbooks/metron_full_install.yml b/metron-deployment/ansible/playbooks/metron_full_install.yml
index 099d810..020cb50 100644
--- a/metron-deployment/ansible/playbooks/metron_full_install.yml
+++ b/metron-deployment/ansible/playbooks/metron_full_install.yml
@@ -25,6 +25,10 @@
   tags:
     - build
 
+- include: solr_install.yml
+  tags:
+    - solr
+
 - include: ambari_install.yml
   tags:
     - ambari
diff --git a/metron-deployment/ansible/playbooks/solr_install.yml b/metron-deployment/ansible/playbooks/solr_install.yml
new file mode 100644
index 0000000..464ec80
--- /dev/null
+++ b/metron-deployment/ansible/playbooks/solr_install.yml
@@ -0,0 +1,40 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+- hosts: ec2
+  become: true
+  tasks:
+    - include_vars: ../../amazon-ec2/conf/defaults.yml
+  tags:
+    - ec2
+
+- hosts: packer
+  become: true
+  tasks:
+    - include_vars: ../../development/centos6/ansible/inventory/group_vars/all
+  tags:
+    - packer
+
+#
+# Solr
+#
+- hosts: search
+  become: true
+  roles:
+    - { role: solr,    tags: ['solr'] }
+  tags:
+    - solr
diff --git a/metron-deployment/ansible/roles/ambari_config/vars/single_node_vm.yml b/metron-deployment/ansible/roles/ambari_config/vars/single_node_vm.yml
index 3f11ef8..6b67719 100644
--- a/metron-deployment/ansible/roles/ambari_config/vars/single_node_vm.yml
+++ b/metron-deployment/ansible/roles/ambari_config/vars/single_node_vm.yml
@@ -117,6 +117,7 @@
       storm_rest_addr: "http://{{ groups.ambari_slave[0] }}:8744"
       es_hosts: "{{ groups.search | join(',') }}"
       zeppelin_server_url: "{{ groups.zeppelin[0] }}:9995"
+      solr_zookeeper_url: "{{ groups.search[0] }}:9983"
   - metron-rest-env:
       metron_jdbc_driver: "org.h2.Driver"
       metron_jdbc_url: "jdbc:h2:file:~/metrondb"
diff --git a/metron-deployment/ansible/roles/solr/defaults/main.yml b/metron-deployment/ansible/roles/solr/defaults/main.yml
new file mode 100644
index 0000000..61562f8
--- /dev/null
+++ b/metron-deployment/ansible/roles/solr/defaults/main.yml
@@ -0,0 +1,23 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+solr_version: 6.6.2
+solr_installation_user: "solr"
+solr_user_home: /var/solr
+solr_home: "{{ solr_user_home }}/solr-{{ solr_version }}"
+solr_url: http://archive.apache.org/dist/lucene/solr/{{ solr_version }}/solr-{{ solr_version }}.tgz
+
diff --git a/metron-deployment/ansible/roles/solr/meta/main.yml b/metron-deployment/ansible/roles/solr/meta/main.yml
new file mode 100644
index 0000000..ddf6aa9
--- /dev/null
+++ b/metron-deployment/ansible/roles/solr/meta/main.yml
@@ -0,0 +1,19 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+dependencies:
+  - java_jdk
diff --git a/metron-deployment/ansible/roles/solr/tasks/main.yml b/metron-deployment/ansible/roles/solr/tasks/main.yml
new file mode 100644
index 0000000..cf47177
--- /dev/null
+++ b/metron-deployment/ansible/roles/solr/tasks/main.yml
@@ -0,0 +1,36 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+- name: Create Solr user
+  user:
+    name: solr
+    home: "{{ solr_user_home }}"
+    system: yes
+
+- name: Download Solr
+  get_url:
+    url: "{{ solr_url }}"
+    dest: "{{ solr_user_home }}/solr-{{ solr_version }}.tar.gz"
+
+- name: Extract Solr tarball
+  unarchive:
+    src: "{{ solr_user_home }}/solr-{{ solr_version }}.tar.gz"
+    dest: "{{ solr_user_home }}"
+    copy: no
+    creates: "{{ solr_home }}"
+    owner: "{{ solr_installation_user }}"
+    group: "{{ solr_installation_user }}"
diff --git a/metron-deployment/packaging/ambari/.gitignore b/metron-deployment/packaging/ambari/.gitignore
index 2f93166..d2cc43c 100644
--- a/metron-deployment/packaging/ambari/.gitignore
+++ b/metron-deployment/packaging/ambari/.gitignore
@@ -1,6 +1,7 @@
 archive.zip
 *.hash
 elasticsearch.properties.j2
+solr.properties.j2
 hdfs.properties.j2
 enrichment.properties.j2
 enrichment-splitjoin.properties.j2
diff --git a/metron-deployment/packaging/ambari/metron-mpack/pom.xml b/metron-deployment/packaging/ambari/metron-mpack/pom.xml
index 711d058..507b63e 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/pom.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/pom.xml
@@ -129,7 +129,13 @@
                                     </includes>
                                     <filtering>false</filtering>
                                 </resource>
-
+                                <resource>
+                                    <directory>${basedir}/../../../../metron-platform/metron-solr/src/main/config</directory>
+                                    <includes>
+                                        <include>solr.properties.j2</include>
+                                    </includes>
+                                    <filtering>false</filtering>
+                                </resource>
                             </resources>
                         </configuration>
                     </execution>
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
index 5c49799..cdef7cf 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
@@ -81,24 +81,54 @@
         <value>metron</value>
         <description>Name of Elasticsearch Cluster</description>
         <display-name>Elasticsearch Cluster Name</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
     </property>
-    <property require-input="true">
+    <property>
         <name>es_hosts</name>
         <value></value>
         <description>Comma delimited list of Elasticsearch Master Hosts: eshost1,eshost2</description>
         <display-name>Elasticsearch Hosts</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
     </property>
     <property>
         <name>es_binary_port</name>
         <value>9300</value>
         <description>Elasticsearch binary port. (9300)</description>
         <display-name>Elasticsearch Binary Port</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
     </property>
     <property>
         <name>es_http_port</name>
         <value>9200</value>
         <description>Elasticsearch HTTP port. (9200)</description>
         <display-name>Elasticsearch HTTP port</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+    </property>
+    <property>
+        <name>es_date_format</name>
+        <description>Elasticsearch Date Format</description>
+        <value>yyyy.MM.dd.HH</value>
+        <display-name>Elasticsearch Date Format</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+    </property>
+    <property>
+        <name>solr_zookeeper_url</name>
+        <value>{{zookeeper_quorum}}</value>
+        <description>Comma delimited list of Zookeeper Urls: zkhost1:2181,zkhost1:2181</description>
+        <display-name>Solr Zookeeper Urls</display-name>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
     </property>
     <property require-input = "true">
         <name>storm_rest_addr</name>
@@ -131,10 +161,4 @@
             <empty-value-valid>true</empty-value-valid>
         </value-attributes>
     </property>
-    <property>
-        <name>es_date_format</name>
-        <description>Elasticsearch Date Format</description>
-        <value>yyyy.MM.dd.HH</value>
-        <display-name>Elasticsearch Date Format</display-name>
-    </property>
 </configuration>
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-indexing-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-indexing-env.xml
index b960536..3d268b7 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-indexing-env.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-indexing-env.xml
@@ -22,7 +22,7 @@
         <name>ra_indexing_kafka_start</name>
         <description>Indexing Topology Spout Offset</description>
         <value>UNCOMMITTED_EARLIEST</value>
-        <display-name>Elasticsearch Indexing Offset</display-name>
+        <display-name>Random Access Indexing Offset</display-name>
         <value-attributes>
             <type>value-list</type>
             <entries>
@@ -80,10 +80,22 @@
         <display-name>Indexing Error Topic</display-name>
     </property>
     <property>
-        <name>ra_indexing_writer_class_name</name>
-        <description>Indexing Writer Class Name</description>
-        <value>org.apache.metron.elasticsearch.writer.ElasticsearchWriter</value>
-        <display-name>Indexing Writer Class Name</display-name>
+        <name>ra_indexing_writer</name>
+        <description>Search Engine Used for Random Access</description>
+        <value>Elasticsearch</value>
+        <display-name>Random Access Search Engine</display-name>
+        <value-attributes>
+            <type>value-list</type>
+            <entries>
+                <entry>
+                    <value>Elasticsearch</value>
+                </entry>
+                <entry>
+                    <value>Solr</value>
+                </entry>
+            </entries>
+            <selection-cardinality>1</selection-cardinality>
+        </value-attributes>
     </property>
     <property>
         <name>update_hbase_table</name>
@@ -101,7 +113,7 @@
         <name>ra_indexing_workers</name>
         <description>Number of Indexing Topology Workers</description>
         <value>1</value>
-        <display-name>Indexing Workers for Elasticsearch</display-name>
+        <display-name>Indexing Workers for Random Access</display-name>
     </property>
     <property>
         <name>batch_indexing_workers</name>
@@ -113,7 +125,7 @@
         <name>ra_indexing_acker_executors</name>
         <description>Number of Indexing Topology Ackers</description>
         <value>1</value>
-        <display-name>Enrichment Ackers for Elasticsearch</display-name>
+        <display-name>Enrichment Ackers for Random Access</display-name>
     </property>
     <property>
         <name>batch_indexing_acker_executors</name>
@@ -135,7 +147,7 @@
         <name>ra_indexing_topology_max_spout_pending</name>
         <description>Indexing Topology Spout Max Pending Tuples</description>
         <value/>
-        <display-name>Indexing Max Pending for Elasticsearch</display-name>
+        <display-name>Indexing Max Pending for Random Access</display-name>
         <value-attributes>
             <empty-value-valid>true</empty-value-valid>
         </value-attributes>
@@ -152,7 +164,7 @@
 
     <property>
         <name>ra_indexing_kafka_spout_parallelism</name>
-        <description>Indexing Topology Kafka Spout Parallelism for Elasticsearch</description>
+        <description>Indexing Topology Kafka Spout Parallelism for Random Access</description>
         <value>1</value>
         <display-name>Indexing Spout Parallelism</display-name>
     </property>
@@ -164,7 +176,7 @@
     </property>
     <property>
         <name>ra_indexing_writer_parallelism</name>
-        <description>Indexing Topology Writer Bolt Parallelism for Elasticsearch</description>
+        <description>Indexing Topology Writer Bolt Parallelism for Random Access</description>
         <value>1</value>
         <display-name>Indexing Writer Parallelism</display-name>
     </property>
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
index 38bd94a..f83d93b 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
@@ -283,6 +283,7 @@
             <scriptType>PYTHON</scriptType>
           </commandScript>
           <configuration-dependencies>
+            <config-type>metron-indexing-env</config-type>
             <config-type>metron-rest-env</config-type>
           </configuration-dependencies>
         </component>
@@ -412,6 +413,9 @@
               <name>metron-elasticsearch</name>
             </package>
             <package>
+              <name>metron-solr</name>
+            </package>
+            <package>
               <name>metron-pcap</name>
             </package>
             <package>
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
index fd78119..4802add 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
@@ -25,11 +25,11 @@
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute, File
 from resource_management.libraries.functions import format as ambari_format
+from resource_management.libraries.functions.format import format
 
 import metron_service
 import metron_security
 
-
 # Wrap major operations and functionality in this class
 class IndexingCommands:
     __params = None
@@ -54,6 +54,7 @@
         self.__hbase_configured = os.path.isfile(self.__params.indexing_hbase_configured_flag_file)
         self.__hbase_acl_configured = os.path.isfile(self.__params.indexing_hbase_acl_configured_flag_file)
         self.__elasticsearch_template_installed = os.path.isfile(self.__params.elasticsearch_template_installed_flag_file)
+        self.__solr_schema_installed = os.path.isfile(self.__params.solr_schema_installed_flag_file)
         self.__hdfs_perm_configured = os.path.isfile(self.__params.indexing_hdfs_perm_configured_flag_file)
 
     def __get_topics(self):
@@ -78,6 +79,20 @@
             "metaalert_index": params.meta_index_path
         }
 
+    def get_solr_schemas(self):
+        """
+        Defines the Solr schemas.
+        :return: Dict where key is the name of a collection and the
+          value is a path to file containing the schema definition.
+        """
+        return [
+            "bro",
+            "yaf",
+            "snort",
+            "error",
+            "metaalert"
+        ]
+
     def is_configured(self):
         return self.__configured
 
@@ -96,6 +111,9 @@
     def is_elasticsearch_template_installed(self):
         return self.__elasticsearch_template_installed
 
+    def is_solr_schema_installed(self):
+        return self.__solr_schema_installed
+
     def set_configured(self):
         metron_service.set_configured(self.__params.metron_user, self.__params.indexing_configured_flag_file, "Setting Indexing configured to True")
 
@@ -114,6 +132,9 @@
     def set_elasticsearch_template_installed(self):
         metron_service.set_configured(self.__params.metron_user, self.__params.elasticsearch_template_installed_flag_file, "Setting Elasticsearch template installed to True")
 
+    def set_solr_schema_installed(self):
+        metron_service.set_configured(self.__params.metron_user, self.__params.solr_schema_installed_flag_file, "Setting Solr schema installed to True")
+
     def create_hbase_tables(self):
         Logger.info("Creating HBase Tables for indexing")
         metron_service.create_hbase_table(self.__params,
@@ -178,6 +199,53 @@
               user=self.__params.metron_user,
               err_msg=err_msg.format(template_name))
 
+    def solr_schema_install(self, env):
+        from params import params
+        env.set_params(params)
+        Logger.info("Installing Solr schemas")
+        if self.__params.security_enabled:
+            metron_security.kinit(self.__params.kinit_path_local,
+                                  self.__params.solr_keytab_path,
+                                  self.__params.solr_principal_name,
+                                  self.__params.solr_user)
+
+        commands = IndexingCommands(params)
+        for collection_name in commands.get_solr_schemas():
+
+            # install the schema
+            cmd = format((
+                "export ZOOKEEPER={solr_zookeeper_url};"
+                "export SECURITY_ENABLED={security_enabled};"
+            ))
+            cmd += "{0}/bin/create_collection.sh {1};"
+
+            Execute(
+                cmd.format(params.metron_home, collection_name),
+                user=self.__params.solr_user)
+
+    def solr_schema_delete(self, env):
+        from params import params
+        env.set_params(params)
+        Logger.info("Deleting Solr schemas")
+        if self.__params.security_enabled:
+            metron_security.kinit(self.__params.kinit_path_local,
+                                  self.__params.solr_keytab_path,
+                                  self.__params.solr_principal_name,
+                                  self.__params.solr_user)
+
+        commands = IndexingCommands(params)
+        for collection_name in commands.get_solr_schemas():
+            # delete the schema
+            cmd = format((
+                "export ZOOKEEPER={solr_zookeeper_url};"
+                "export SECURITY_ENABLED={security_enabled};"
+            ))
+            cmd += "{0}/bin/delete_collection.sh {1};"
+
+            Execute(
+                cmd.format(params.metron_home, collection_name),
+                user=self.__params.solr_user)
+
     def start_batch_indexing_topology(self, env):
         Logger.info('Starting ' + self.__batch_indexing_topology)
 
@@ -206,8 +274,9 @@
                                       self.__params.metron_keytab_path,
                                       self.__params.metron_principal_name,
                                       execute_user=self.__params.metron_user)
-
             start_cmd_template = """{0}/bin/start_elasticsearch_topology.sh"""
+            if self.__params.ra_indexing_writer == 'Solr':
+                start_cmd_template = """{0}/bin/start_solr_topology.sh"""
             start_cmd = start_cmd_template.format(self.__params.metron_home)
             Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
 
@@ -324,7 +393,6 @@
             is_random_access_running = topologies[self.__random_access_indexing_topology] in ['ACTIVE', 'REBALANCING']
         return is_random_access_running
 
-
     def is_topology_active(self, env):
         return self.is_batch_topology_active(env) and self.is_random_access_topology_active(env)
 
@@ -333,6 +401,8 @@
         Performs a service check for Indexing.
         :param env: Environment
         """
+        metron_service.check_indexer_parameters()
+
         Logger.info('Checking Kafka topics for Indexing')
         metron_service.check_kafka_topics(self.__params, self.__get_topics())
 
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
index 1cd6f4c..9f9ab87 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
@@ -48,11 +48,17 @@
         env.set_params(params)
 
         Logger.info("Running indexing configure")
+        metron_service.check_indexer_parameters()
         File(format("{metron_config_path}/elasticsearch.properties"),
              content=Template("elasticsearch.properties.j2"),
              owner=params.metron_user,
              group=params.metron_group
              )
+        File(format("{metron_config_path}/solr.properties"),
+             content=Template("solr.properties.j2"),
+             owner=params.metron_user,
+             group=params.metron_group
+             )
         File(format("{metron_config_path}/hdfs.properties"),
              content=Template("hdfs.properties.j2"),
              owner=params.metron_user,
@@ -91,17 +97,28 @@
         env.set_params(params)
         self.configure(env)
         commands = IndexingCommands(params)
+        if params.ra_indexing_writer == 'Solr':
+            # Install Solr schemas
+            try:
+                if not commands.is_solr_schema_installed():
+                    commands.solr_schema_install(env)
+                    commands.set_solr_schema_installed()
 
-        # Install elasticsearch templates
-        try:
-            if not commands.is_elasticsearch_template_installed():
-                self.elasticsearch_template_install(env)
-                commands.set_elasticsearch_template_installed()
+            except Exception as e:
+                msg = "WARNING: Solr schemas could not be installed.  " \
+                      "Is Solr running?  Will reattempt install on next start.  error={0}"
+                Logger.warning(msg.format(e))
+        else:
+            # Install elasticsearch templates
+            try:
+                if not commands.is_elasticsearch_template_installed():
+                    self.elasticsearch_template_install(env)
+                    commands.set_elasticsearch_template_installed()
 
-        except Exception as e:
-            msg = "WARNING: Elasticsearch index templates could not be installed.  " \
-                  "Is Elasticsearch running?  Will reattempt install on next start.  error={0}"
-            Logger.warning(msg.format(e))
+            except Exception as e:
+                msg = "WARNING: Elasticsearch index templates could not be installed.  " \
+                      "Is Elasticsearch running?  Will reattempt install on next start.  error={0}"
+                Logger.warning(msg.format(e))
 
         commands.start_indexing_topology(env)
 
@@ -121,6 +138,7 @@
     def restart(self, env):
         from params import params
         env.set_params(params)
+
         self.configure(env)
         commands = IndexingCommands(params)
         commands.restart_indexing_topology(env)
@@ -129,6 +147,7 @@
         from params import params
         env.set_params(params)
         Logger.info("Installing Elasticsearch index templates")
+        metron_service.check_indexer_parameters()
 
         commands = IndexingCommands(params)
         for template_name, template_path in commands.get_templates().iteritems():
@@ -144,9 +163,11 @@
         from params import params
         env.set_params(params)
         Logger.info("Deleting Elasticsearch index templates")
+        metron_service.check_indexer_parameters()
 
         commands = IndexingCommands(params)
         for template_name in commands.get_templates():
+
             # delete the index template
             cmd = "curl -s -XDELETE \"http://{0}/_template/{1}\""
             Execute(
@@ -157,9 +178,9 @@
     def kibana_dashboard_install(self, env):
       from params import params
       env.set_params(params)
+      metron_service.check_indexer_parameters()
 
       Logger.info("Connecting to Elasticsearch on: %s" % (params.es_http_url))
-
       kibanaTemplate = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'kibana.template')
       if not os.path.isfile(kibanaTemplate):
         raise IOError(
@@ -184,8 +205,9 @@
     def zeppelin_notebook_import(self, env):
         from params import params
         env.set_params(params)
-        commands = IndexingCommands(params)
+        metron_service.check_indexer_parameters()
 
+        commands = IndexingCommands(params)
         Logger.info(ambari_format('Searching for Zeppelin Notebooks in {metron_config_zeppelin_path}'))
 
         # Check if authentication is configured on Zeppelin server, and fetch details if enabled.
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py
index e3e48ab..9d15e93 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py
@@ -27,6 +27,7 @@
 from resource_management.core.source import InlineTemplate
 from resource_management.libraries.functions import format as ambari_format
 from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.libraries.script import Script
 
 from metron_security import kinit
 
@@ -51,10 +52,23 @@
 def set_zk_configured(params):
   set_configured(params.metron_user, params.zk_configured_flag_file, "Setting Zookeeper configured to true")
 
-def build_global_config_patch(params, patch_file):
-  # see RFC 6902 at https://tools.ietf.org/html/rfc6902
-  patch_template = """
-  [
+def solr_global_config_patches():
+  """
+  Builds the global configuration patches required for Solr.
+  """
+  return """
+    {
+        "op": "add",
+        "path": "/solr.zookeeper",
+        "value": "{{solr_zookeeper_url}}"
+    }
+  """
+
+def elasticsearch_global_config_patches():
+  """
+  Builds the global configuration patches required for Elasticsearch.
+  """
+  return """
     {
         "op": "add",
         "path": "/es.clustername",
@@ -69,6 +83,31 @@
         "op": "add",
         "path": "/es.date.format",
         "value": "{{es_date_format}}"
+    }
+  """
+
+def build_global_config_patch(params, patch_file):
+  """
+  Build the file used to patch the global configuration.
+  See RFC 6902 at https://tools.ietf.org/html/rfc6902
+
+  :param params:
+  :param patch_file: The path where the patch file will be created.
+  """
+  if params.ra_indexing_writer == 'Solr':
+      indexing_patches = solr_global_config_patches()
+  else:
+      indexing_patches = elasticsearch_global_config_patches()
+  other_patches = """
+    {
+        "op": "add",
+        "path": "/profiler.client.period.duration",
+        "value": "{{profiler_period_duration}}"
+    },
+    {
+        "op": "add",
+        "path": "/profiler.client.period.duration.units",
+        "value": "{{profiler_period_units}}"
     },
     {
         "op": "add",
@@ -87,16 +126,6 @@
     },
     {
         "op": "add",
-        "path": "/profiler.client.period.duration",
-        "value": "{{profiler_period_duration}}"
-    },
-    {
-        "op": "add",
-        "path": "/profiler.client.period.duration.units",
-        "value": "{{profiler_period_units}}"
-    },
-    {
-        "op": "add",
         "path": "/user.settings.hbase.table",
         "value": "{{user_settings_hbase_table}}"
     },
@@ -140,8 +169,14 @@
         "path": "/profiler.writer.batchTimeout",
         "value": "{{profiler_kafka_writer_batch_timeout}}"
     }
-  ]
   """
+  patch_template = ambari_format(
+  """
+  [
+    {indexing_patches},
+    {other_patches}
+  ]
+  """)
   File(patch_file,
        content=InlineTemplate(patch_template),
        owner=params.metron_user,
@@ -157,6 +192,7 @@
       "{metron_home}/bin/zk_load_configs.sh --zk_quorum {zookeeper_quorum} --mode PATCH --config_type GLOBAL --patch_file " + patch_file),
       path=ambari_format("{java_home}/bin")
   )
+  Logger.info("Done patching global config")
 
 def pull_config(params):
   Logger.info('Pulling all Metron configs down from ZooKeeper to local file system')
@@ -166,17 +202,12 @@
       path=ambari_format("{java_home}/bin")
   )
 
-# pushes json patches to zookeeper based on Ambari parameters that are configurable by the user
 def refresh_configs(params):
   if not is_zk_configured(params):
     Logger.warning("The expected flag file '" + params.zk_configured_flag_file + "'indicating that Zookeeper has been configured does not exist. Skipping patching. An administrator should look into this.")
     return
-
-  Logger.info("Patch global config in Zookeeper")
+  check_indexer_parameters()
   patch_global_config(params)
-  Logger.info("Done patching global config")
-
-  Logger.info("Pull zookeeper config locally")
   pull_config(params)
 
 def get_running_topologies(params):
@@ -530,3 +561,32 @@
       Execute(cmd, tries=3, try_sleep=5, logoutput=False, user=user)
     except:
       raise ComponentIsNotRunning()
+
+def check_indexer_parameters():
+    """
+    Ensure that all required parameters have been defined for the chosen
+    Indexer; either Solr or Elasticsearch.
+    """
+    missing = []
+    config = Script.get_config()
+    indexer = config['configurations']['metron-indexing-env']['ra_indexing_writer']
+    Logger.info('Checking parameters for indexer = ' + indexer)
+
+    if indexer == 'Solr':
+      # check for all required solr parameters
+      if not config['configurations']['metron-env']['solr_zookeeper_url']:
+        missing.append("metron-env/solr_zookeeper_url")
+
+    else:
+      # check for all required elasticsearch parameters
+      if not config['configurations']['metron-env']['es_cluster_name']:
+        missing.append("metron-env/es_cluster_name")
+      if not config['configurations']['metron-env']['es_hosts']:
+        missing.append("metron-env/es_hosts")
+      if not config['configurations']['metron-env']['es_binary_port']:
+        missing.append("metron-env/es_binary_port")
+      if not config['configurations']['metron-env']['es_date_format']:
+        missing.append("metron-env/es_date_format")
+
+    if len(missing) > 0:
+      raise Fail("Missing required indexing parameters(s): indexer={0}, missing={1}".format(indexer, missing))
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
index ec01292..07132a0 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
@@ -83,6 +83,7 @@
 indexing_hbase_acl_configured_flag_file = status_params.indexing_hbase_acl_configured_flag_file
 indexing_hdfs_perm_configured_flag_file = status_params.indexing_hdfs_perm_configured_flag_file
 elasticsearch_template_installed_flag_file = status_params.elasticsearch_template_installed_flag_file
+solr_schema_installed_flag_file = status_params.solr_schema_installed_flag_file
 rest_kafka_configured_flag_file = status_params.rest_kafka_configured_flag_file
 rest_kafka_acl_configured_flag_file = status_params.rest_kafka_acl_configured_flag_file
 rest_hbase_configured_flag_file = status_params.rest_hbase_configured_flag_file
@@ -123,6 +124,14 @@
     # last port config
     zookeeper_quorum += ':' + zookeeper_clientPort
 
+# Solr params
+solr_version = '6.6.2'
+solr_home = '/var/solr/solr-' + solr_version
+solr_zookeeper_url = format(format(config['configurations']['metron-env']['solr_zookeeper_url']))
+solr_user = config['configurations']['solr-config-env']['solr_config_user']
+solr_principal_name = config['configurations']['solr-config-env']['solr_principal_name']
+solr_keytab_path = config['configurations']['solr-config-env']['solr_keytab_path']
+
 # Storm
 storm_rest_addr = status_params.storm_rest_addr
 
@@ -203,6 +212,13 @@
 error_index_path = tmp_dir + "/error_index.template"
 meta_index_path = tmp_dir + "/metaalert_index.template"
 
+# Solr Schemas
+bro_schema_path = metron_home + "/config/schema/bro"
+snort_schema_path = metron_home + "/config/schema/snort"
+yaf_schema_path = metron_home + "/config/schema/yaf"
+error_schema_path = metron_home + "/config/schema/error"
+meta_schema_path = metron_home + "/config/schema/metaalert"
+
 # Zeppelin Notebooks
 metron_config_zeppelin_path = format("{metron_config_path}/zeppelin")
 zeppelin_shiro_ini_content = status_params.zeppelin_shiro_ini_content
@@ -243,6 +259,8 @@
 
     nimbus_seeds = config['configurations']['storm-site']['nimbus.seeds']
 
+    solr_principal_name = solr_principal_name.replace('_HOST', hostname_lowercase)
+
 # Management UI
 metron_rest_host = default("/clusterHostInfo/metron_rest_hosts", [hostname])[0]
 
@@ -340,7 +358,7 @@
 indexing_error_topic = config['configurations']['metron-indexing-env']['indexing_error_topic']
 metron_random_access_indexing_topology = status_params.metron_random_access_indexing_topology
 metron_batch_indexing_topology = status_params.metron_batch_indexing_topology
-ra_indexing_writer_class_name = config['configurations']['metron-indexing-env']['ra_indexing_writer_class_name']
+ra_indexing_writer = config['configurations']['metron-indexing-env']['ra_indexing_writer']
 batch_indexing_writer_class_name = config['configurations']['metron-indexing-env']['batch_indexing_writer_class_name']
 ra_indexing_workers = config['configurations']['metron-indexing-env']['ra_indexing_workers']
 batch_indexing_workers = config['configurations']['metron-indexing-env']['batch_indexing_workers']
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/status_params.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/status_params.py
index ed2edfb..7eb9fab 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/status_params.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/status_params.py
@@ -78,6 +78,9 @@
 # Elasticsearch
 elasticsearch_template_installed_flag_file = metron_zookeeper_config_path + '/../metron_elasticsearch_template_installed_flag_file'
 
+# Solr
+solr_schema_installed_flag_file = metron_zookeeper_config_path + '/../metron_solr_schema_installed_flag_file'
+
 # REST
 metron_rest_port = config['configurations']['metron-rest-env']['metron_rest_port']
 rest_kafka_configured_flag_file = metron_zookeeper_config_path + '/../metron_rest_kafka_configured'
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py
index e97af05..0ff6fb1 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py
@@ -125,6 +125,7 @@
           "export METRON_INDEX_CP={metron_indexing_classpath};"
           "export METRON_LOG_DIR={metron_log_dir};"
           "export METRON_PID_FILE={pid_file};"
+          "export METRON_RA_INDEXING_WRITER={ra_indexing_writer};"
           "{metron_home}/bin/metron-rest.sh;"
           "unset METRON_JDBC_PASSWORD;"
         ))
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
index d757ec6..17b4460 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
@@ -221,7 +221,7 @@
                   "subsections": [
                     {
                       "name": "subsection-indexing-storm",
-                      "display-name": "Index Writer - Elasticsearch",
+                      "display-name": "Index Writer - Random Access",
                       "row-index": "0",
                       "column-index": "0",
                       "row-span": "1",
@@ -448,6 +448,10 @@
           "subsection-name": "subsection-index-settings"
         },
         {
+          "config": "metron-env/solr_zookeeper_url",
+          "subsection-name": "subsection-index-settings"
+        },
+        {
           "config": "metron-parsers-env/parsers",
           "subsection-name": "subsection-parsers"
         },
@@ -608,7 +612,7 @@
           "subsection-name": "subsection-indexing-update"
         },
         {
-          "config": "metron-indexing-env/ra_indexing_writer_class_name",
+          "config": "metron-indexing-env/ra_indexing_writer",
           "subsection-name": "subsection-indexing-storm"
         },
         {
@@ -857,6 +861,12 @@
         }
       },
       {
+        "config": "metron-env/solr_zookeeper_url",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
         "config": "metron-rest-env/source_type_field",
         "widget": {
           "type": "text-field"
@@ -1110,9 +1120,9 @@
         }
       },
       {
-        "config": "metron-indexing-env/ra_indexing_writer_class_name",
+        "config": "metron-indexing-env/ra_indexing_writer",
         "widget": {
-          "type": "text-field"
+          "type": "combo"
         }
       },
       {
diff --git a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
index 15469d9..4b88fd0 100644
--- a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
+++ b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
@@ -248,8 +248,23 @@
 %dir %{metron_home}/bin
 %dir %{metron_home}/config
 %dir %{metron_home}/lib
+%{metron_home}/bin/create_collection.sh
+%{metron_home}/bin/delete_collection.sh
+%{metron_home}/bin/install_solr.sh
+%{metron_home}/bin/start_solr.sh
 %{metron_home}/bin/start_solr_topology.sh
+%{metron_home}/bin/stop_solr.sh
 %{metron_home}/config/solr.properties
+%{metron_home}/config/schema/bro/schema.xml
+%{metron_home}/config/schema/bro/solrconfig.xml
+%{metron_home}/config/schema/error/schema.xml
+%{metron_home}/config/schema/error/solrconfig.xml
+%{metron_home}/config/schema/metaalert/schema.xml
+%{metron_home}/config/schema/metaalert/solrconfig.xml
+%{metron_home}/config/schema/snort/schema.xml
+%{metron_home}/config/schema/snort/solrconfig.xml
+%{metron_home}/config/schema/yaf/schema.xml
+%{metron_home}/config/schema/yaf/solrconfig.xml
 %attr(0644,root,root) %{metron_home}/lib/metron-solr-%{full_version}-uber.jar
 
 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -529,6 +544,8 @@
 chkconfig --del metron-alerts-ui
 
 %changelog
+* Thu Feb 1 2018 Apache Metron <dev@metron.apache.org> - 0.4.3
+- Add Solr install script to Solr RPM
 * Tue Sep 25 2017 Apache Metron <dev@metron.apache.org> - 0.4.2
 - Add Alerts UI
 * Tue Sep 19 2017 Apache Metron <dev@metron.apache.org> - 0.4.2
diff --git a/metron-interface/metron-alerts/README.md b/metron-interface/metron-alerts/README.md
index ca6dcb7..528878c 100644
--- a/metron-interface/metron-alerts/README.md
+++ b/metron-interface/metron-alerts/README.md
@@ -40,7 +40,8 @@
 Alerts that are contained in a a meta alert are generally excluded from search results, because a user has already grouped them in a meaningful way.
 
 ## Prerequisites
-* The Metron REST application should be up and running and Elasticsearch should have some alerts populated by Metron topologies
+* The Metron REST application should be up and running
+* Elasticsearch or Solr should have some alerts populated by Metron topologies, depending on which real-time store is enabled
 * The Management UI should be installed (which includes [Express](https://expressjs.com/))
 * The alerts can be populated using Full Dev or any other setup
 * UI is developed using angular4 and uses angular-cli
diff --git a/metron-interface/metron-alerts/src/app/alerts/alert-details/alert-details.component.ts b/metron-interface/metron-alerts/src/app/alerts/alert-details/alert-details.component.ts
index efc7e84..e68a8e0 100644
--- a/metron-interface/metron-alerts/src/app/alerts/alert-details/alert-details.component.ts
+++ b/metron-interface/metron-alerts/src/app/alerts/alert-details/alert-details.component.ts
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import { Component, OnInit } from '@angular/core';
-import {Router, ActivatedRoute} from '@angular/router';
+import {Component, OnInit} from '@angular/core';
+import {ActivatedRoute, Router} from '@angular/router';
 import * as moment from 'moment/moment';
-import {Observable, Subscription} from 'rxjs/Rx';
+import {Subscription} from 'rxjs/Rx';
 
 import {SearchService} from '../../service/search.service';
 import {UpdateService} from '../../service/update.service';
@@ -30,13 +30,13 @@
 import {AlertComment} from './alert-comment';
 import {AuthenticationService} from '../../service/authentication.service';
 import {MetronDialogBox} from '../../shared/metron-dialog-box';
-import {META_ALERTS_INDEX, META_ALERTS_SENSOR_TYPE} from '../../utils/constants';
-import { GlobalConfigService } from '../../service/global-config.service';
+import {CommentAddRemoveRequest} from "../../model/comment-add-remove-request";
+import {META_ALERTS_SENSOR_TYPE} from '../../utils/constants';
+import {GlobalConfigService} from '../../service/global-config.service';
 
 export enum AlertState {
   NEW, OPEN, ESCALATE, DISMISS, RESOLVE
 }
-
 export enum Tabs {
   DETAILS, COMMENTS
 }
@@ -135,7 +135,7 @@
       this.alertId = params['guid'];
       this.alertSourceType = params['source.type.field'];
       this.alertIndex = params['index'];
-      this.isMetaAlert = (this.alertIndex === META_ALERTS_INDEX && this.alertSourceType !== META_ALERTS_SENSOR_TYPE) ? true : false;
+      this.isMetaAlert = this.alertSourceType === META_ALERTS_SENSOR_TYPE;
       this.getData();
     });
   };
@@ -211,7 +211,6 @@
       let patchRequest = new PatchRequest();
       patchRequest.guid = this.alertId;
       patchRequest.sensorType = 'metaalert';
-      patchRequest.index = META_ALERTS_INDEX;
       patchRequest.patch = [new Patch('add', '/name', this.alertName)];
 
       this.updateService.patch(patchRequest).subscribe(rep => {
@@ -225,11 +224,19 @@
     let alertComments = this.alertCommentsWrapper.map(alertsWrapper => alertsWrapper.alertComment);
     alertComments.unshift(newComment);
     this.setComments(alertComments);
-    this.patchAlert(new Patch('add', '/comments', alertComments), () => {
-      let previousComments = this.alertCommentsWrapper.map(alertsWrapper => alertsWrapper.alertComment)
-              .filter(alertComment => alertComment !== newComment);
-      this.setComments(previousComments);
-    });
+    let commentRequest = new CommentAddRemoveRequest();
+    commentRequest.guid = this.alertSource.guid;
+    commentRequest.comment = this.alertCommentStr;
+    commentRequest.username = this.authenticationService.getCurrentUserName();
+    commentRequest.timestamp = new Date().getTime();
+    commentRequest.sensorType = this.alertSourceType;
+    this.updateService.addComment(commentRequest).subscribe(
+        () => {},
+        () => {
+          let previousComments = this.alertCommentsWrapper.map(alertsWrapper => alertsWrapper.alertComment)
+          .filter(alertComment => alertComment !== newComment);
+          this.setComments(previousComments);
+        });
   }
 
   patchAlert(patch: Patch, onPatchError) {
@@ -253,14 +260,22 @@
     this.metronDialogBox.showConfirmationMessage(commentText).subscribe(response => {
       if (response) {
         let deletedCommentWrapper = this.alertCommentsWrapper.splice(index, 1)[0];
-        this.patchAlert(new Patch('add', '/comments', this.alertCommentsWrapper.map(alertsWrapper => alertsWrapper.alertComment)), () => {
-          // add the deleted comment back
-          this.alertCommentsWrapper.unshift(deletedCommentWrapper);
-          this.alertCommentsWrapper.sort((a, b) => b.alertComment.timestamp - a.alertComment.timestamp);
-        });
+        let commentRequest = new CommentAddRemoveRequest();
+        commentRequest.guid = this.alertSource.guid;
+        commentRequest.comment = this.alertCommentsWrapper[index].alertComment.comment;
+        commentRequest.username = this.alertCommentsWrapper[index].alertComment.username;
+        commentRequest.timestamp = this.alertCommentsWrapper[index].alertComment.timestamp;
+        commentRequest.sensorType = this.alertSourceType;
+        this.updateService.removeComment(commentRequest).subscribe(
+            () => {
+              this.alertCommentsWrapper.map(alertsWrapper => alertsWrapper.alertComment)
+            },
+            () => {
+              // add the deleted comment back
+              this.alertCommentsWrapper.unshift(deletedCommentWrapper);
+              this.alertCommentsWrapper.sort((a, b) => b.alertComment.timestamp - a.alertComment.timestamp);
+            });
       }
     });
   }
 }
-
-
diff --git a/metron-interface/metron-alerts/src/app/alerts/alerts-list/alerts-list.component.ts b/metron-interface/metron-alerts/src/app/alerts/alerts-list/alerts-list.component.ts
index c13b7d4..ca1bd50 100644
--- a/metron-interface/metron-alerts/src/app/alerts/alerts-list/alerts-list.component.ts
+++ b/metron-interface/metron-alerts/src/app/alerts/alerts-list/alerts-list.component.ts
@@ -39,7 +39,7 @@
 import { TIMESTAMP_FIELD_NAME, ALL_TIME, POLLING_DEFAULT_STATE } from '../../utils/constants';
 import {TableViewComponent} from './table-view/table-view.component';
 import {Pagination} from '../../model/pagination';
-import {META_ALERTS_SENSOR_TYPE, META_ALERTS_INDEX} from '../../utils/constants';
+import {META_ALERTS_SENSOR_TYPE} from '../../utils/constants';
 import {MetaAlertService} from '../../service/meta-alert.service';
 import {Facets} from '../../model/facets';
 import { GlobalConfigService } from '../../service/global-config.service';
@@ -399,8 +399,7 @@
     this.selectedAlerts = [];
     this.selectedAlerts = [alert];
     this.saveRefreshState();
-    let sourceType = (alert.index === META_ALERTS_INDEX && !alert.source[this.globalConfig['source.type.field']])
-        ? META_ALERTS_SENSOR_TYPE : alert.source[this.globalConfig['source.type.field']];
+    let sourceType = alert.source[this.globalConfig['source.type.field']];
     let url = '/alerts-list(dialog:details/' + sourceType + '/' + alert.source.guid + '/' + alert.index + ')';
     this.router.navigateByUrl(url);
   }
diff --git a/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.html b/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.html
index ab7072c..4793efa 100644
--- a/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.html
+++ b/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.html
@@ -69,7 +69,7 @@
               {{ alert.source['alert_status'] ?alert.source['alert_status'] : 'New' | centerEllipses:20:cell }}
             </a>
           </td>
-          <td width="20" class="icon-cell" (click)="deleteMetaAlert($event, alert, alertIndex)">
+          <td width="20" class="icon-cell" (click)="deleteMetaAlert($event, alert)">
             <i class="fa fa-chain-broken" aria-hidden="true"></i>
           </td>
           <td width="20" class="icon-cell">
diff --git a/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.ts b/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.ts
index ee1970f..4d915cd 100644
--- a/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.ts
+++ b/metron-interface/metron-alerts/src/app/alerts/alerts-list/table-view/table-view.component.ts
@@ -33,7 +33,6 @@
 import {PatchRequest} from '../../../model/patch-request';
 import {Patch} from '../../../model/patch';
 import {UpdateService} from '../../../service/update.service';
-import {META_ALERTS_INDEX} from '../../../utils/constants';
 import {MetaAlertService} from '../../../service/meta-alert.service';
 import {MetaAlertAddRemoveRequest} from '../../../model/meta-alert-add-remove-request';
 import {GetRequest} from '../../../model/get-request';
@@ -241,7 +240,6 @@
   showMetaAlertDetails($event, alertSource: AlertSource) {
     let alert = new Alert();
     alert.source = alertSource;
-    alert.index = META_ALERTS_INDEX;
     this.showDetails($event, alert);
   }
 
@@ -275,10 +273,10 @@
     $event.stopPropagation();
   }
 
-  deleteMetaAlert($event, alert: Alert, index: number) {
+  deleteMetaAlert($event, alert: Alert) {
     this.metronDialogBox.showConfirmationMessage('Do you wish to remove all the alerts from meta alert?').subscribe(response => {
       if (response) {
-        this.doDeleteMetaAlert(alert, index);
+        this.doDeleteMetaAlert(alert);
       }
     });
     $event.stopPropagation();
@@ -294,7 +292,7 @@
     });
   }
 
-  doDeleteMetaAlert(alert: Alert, index: number) {
+  doDeleteMetaAlert(alert: Alert) {
     this.metaAlertService.updateMetaAlertStatus(alert.source.guid, 'inactive').subscribe(() => {
     });
   }
diff --git a/metron-interface/metron-alerts/src/app/model/comment-add-remove-request.ts b/metron-interface/metron-alerts/src/app/model/comment-add-remove-request.ts
new file mode 100644
index 0000000..35f5d86
--- /dev/null
+++ b/metron-interface/metron-alerts/src/app/model/comment-add-remove-request.ts
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export class CommentAddRemoveRequest {
+  guid: string;
+  comment: string;
+  username: string;
+  sensorType: string;
+  timestamp: number;
+}
\ No newline at end of file
diff --git a/metron-interface/metron-alerts/src/app/service/update.service.ts b/metron-interface/metron-alerts/src/app/service/update.service.ts
index 24b55f0..3a18fc2 100644
--- a/metron-interface/metron-alerts/src/app/service/update.service.ts
+++ b/metron-interface/metron-alerts/src/app/service/update.service.ts
@@ -29,8 +29,8 @@
 import {PatchRequest} from '../model/patch-request';
 import {Utils} from '../utils/utils';
 import {Patch} from '../model/patch';
-import {META_ALERTS_INDEX, META_ALERTS_SENSOR_TYPE} from '../utils/constants';
 import { GlobalConfigService } from './global-config.service';
+import {CommentAddRemoveRequest} from "../model/comment-add-remove-request";
 
 @Injectable()
 export class UpdateService {
@@ -40,6 +40,8 @@
   alertChangedSource = new Subject<PatchRequest>();
   alertChanged$ = this.alertChangedSource.asObservable();
   sourceType = 'source:type';
+  alertCommentChangedSource = new Subject<CommentAddRemoveRequest>();
+  alertCommentChanged$ = this.alertCommentChangedSource.asObservable();
 
   constructor(private http: Http, private globalConfigService: GlobalConfigService) {
     this.globalConfigService.get().subscribe((config: {}) => {
@@ -47,6 +49,30 @@
     });
   }
 
+  public addComment(commentRequest: CommentAddRemoveRequest, fireChangeListener = true): Observable<{}> {
+    let url = '/api/v1/update/add/comment';
+    return this.http.post(url, commentRequest, new RequestOptions({headers: new Headers(this.defaultHeaders)}))
+    .catch(HttpUtil.handleError)
+    .map(result => {
+      if (fireChangeListener) {
+        this.alertCommentChangedSource.next(commentRequest);
+      }
+      return result;
+    });
+  }
+
+  public removeComment(commentRequest: CommentAddRemoveRequest, fireChangeListener = true): Observable<{}> {
+    let url = '/api/v1/update/remove/comment';
+    return this.http.post(url, commentRequest, new RequestOptions({headers: new Headers(this.defaultHeaders)}))
+    .catch(HttpUtil.handleError)
+    .map(result => {
+      if (fireChangeListener) {
+        this.alertCommentChangedSource.next(commentRequest);
+      }
+      return result;
+    });
+  }
+
   public patch(patchRequest: PatchRequest, fireChangeListener = true): Observable<{}> {
     let url = '/api/v1/update/patch';
     return this.http.patch(url, patchRequest, new RequestOptions({headers: new Headers(this.defaultHeaders)}))
@@ -65,9 +91,6 @@
       patchRequest.guid = alert.source.guid;
       patchRequest.sensorType = Utils.getAlertSensorType(alert, this.sourceType);
       patchRequest.patch = [new Patch('add', '/alert_status', state)];
-      if (patchRequest.sensorType === META_ALERTS_SENSOR_TYPE) {
-        patchRequest.index = META_ALERTS_INDEX;
-      }
       return patchRequest;
     });
     let patchObservables = [];
diff --git a/metron-interface/metron-alerts/src/app/utils/constants.ts b/metron-interface/metron-alerts/src/app/utils/constants.ts
index bfa04bc..74e887a 100644
--- a/metron-interface/metron-alerts/src/app/utils/constants.ts
+++ b/metron-interface/metron-alerts/src/app/utils/constants.ts
@@ -19,7 +19,6 @@
 import { environment } from '../../environments/environment';
 
 export const META_ALERTS_SENSOR_TYPE = 'metaalert';
-export const META_ALERTS_INDEX = 'metaalert_index';
 
 export const NUM_SAVED_SEARCH = 10;
 export const ALERTS_RECENT_SEARCH = 'metron-alerts-recent-saved-search';
diff --git a/metron-interface/metron-rest/README.md b/metron-interface/metron-rest/README.md
index 2a6a0e0..44594f7 100644
--- a/metron-interface/metron-rest/README.md
+++ b/metron-interface/metron-rest/README.md
@@ -22,6 +22,7 @@
 ## Prerequisites
 
 * A running Metron cluster
+* A running real-time store, either Elasticsearch or Solr depending on which one is enabled
 * Java 8 installed
 * Storm CLI and Metron topology scripts (start_parser_topology.sh, start_enrichment_topology.sh, start_elasticsearch_topology.sh) installed
 * A relational database
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/config/IndexConfig.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/config/IndexConfig.java
index 25bb809..53b10f9 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/config/IndexConfig.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/config/IndexConfig.java
@@ -18,14 +18,17 @@
 package org.apache.metron.rest.config;
 
 import static org.apache.metron.rest.MetronRestConstants.INDEX_DAO_IMPL;
+import static org.apache.metron.rest.MetronRestConstants.INDEX_WRITER_NAME;
 
 import java.util.Optional;
+import org.apache.metron.common.zookeeper.ConfigurationsCache;
 import org.apache.metron.hbase.HTableProvider;
 import org.apache.metron.hbase.TableProvider;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
 import org.apache.metron.indexing.dao.IndexDaoFactory;
-import org.apache.metron.indexing.dao.MetaAlertDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
+import org.apache.metron.indexing.util.IndexingCacheUtil;
 import org.apache.metron.rest.MetronRestConstants;
 import org.apache.metron.rest.RestException;
 import org.apache.metron.rest.service.GlobalConfigService;
@@ -34,10 +37,6 @@
 import org.springframework.context.annotation.Configuration;
 import org.springframework.core.env.Environment;
 
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Set;
-
 @Configuration
 public class IndexConfig {
 
@@ -45,6 +44,9 @@
   private GlobalConfigService globalConfigService;
 
   @Autowired
+  private ConfigurationsCache cache;
+
+  @Autowired
   private Environment environment;
 
   @Autowired
@@ -72,7 +74,9 @@
           throw new IllegalStateException("Unable to retrieve the global config.", e);
         }
       });
+      config.setIndexSupplier(IndexingCacheUtil.getIndexLookupFunction(cache, environment.getProperty(INDEX_WRITER_NAME)));
       config.setTableProvider(TableProvider.create(hbaseProviderImpl, () -> new HTableProvider()));
+      config.setKerberosEnabled(environment.getProperty(MetronRestConstants.KERBEROS_ENABLED_SPRING_PROPERTY, Boolean.class, false));
       if (indexDaoImpl == null) {
         throw new IllegalStateException("You must provide an index DAO implementation via the " + INDEX_DAO_IMPL + " config");
       }
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/controller/UpdateController.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/controller/UpdateController.java
index 56b0b7b..609442b 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/controller/UpdateController.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/controller/UpdateController.java
@@ -20,6 +20,7 @@
 import io.swagger.annotations.ApiOperation;
 import io.swagger.annotations.ApiParam;
 import io.swagger.annotations.ApiResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
 import org.apache.metron.indexing.dao.update.PatchRequest;
 import org.apache.metron.indexing.dao.update.ReplaceRequest;
@@ -67,4 +68,26 @@
     service.replace(request);
     return new ResponseEntity<>(HttpStatus.OK);
   }
+
+  @ApiOperation(value = "Add a comment to an alert")
+  @ApiResponse(message = "Nothing", code = 200)
+  @RequestMapping(value = "/add/comment", method = RequestMethod.POST)
+  ResponseEntity<Void> addCommentToAlert(
+      @RequestBody @ApiParam(name = "request", value = "Comment add request", required = true) final
+      CommentAddRemoveRequest request
+  ) throws RestException {
+    service.addComment(request);
+    return new ResponseEntity<>(HttpStatus.OK);
+  }
+
+  @ApiOperation(value = "Remove a comment to an alert")
+  @ApiResponse(message = "Nothing", code = 200)
+  @RequestMapping(value = "/remove/comment", method = RequestMethod.POST)
+  ResponseEntity<Void> removeCommentFromAlert(
+      @RequestBody @ApiParam(name = "request", value = "Comment remove request", required = true) final
+      CommentAddRemoveRequest request
+  ) throws RestException {
+    service.removeComment(request);
+    return new ResponseEntity<>(HttpStatus.OK);
+  }
 }
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/UpdateService.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/UpdateService.java
index 4cdf4b3..bd59f39 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/UpdateService.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/UpdateService.java
@@ -17,6 +17,7 @@
  */
 package org.apache.metron.rest.service;
 
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
 import org.apache.metron.indexing.dao.update.PatchRequest;
 import org.apache.metron.indexing.dao.update.ReplaceRequest;
@@ -26,4 +27,6 @@
 
   void patch(PatchRequest request) throws RestException, OriginalNotFoundException;
   void replace(ReplaceRequest request) throws RestException;
+  void addComment(CommentAddRemoveRequest request) throws RestException;
+  void removeComment(CommentAddRemoveRequest request) throws RestException;
 }
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/MetaAlertServiceImpl.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/MetaAlertServiceImpl.java
index aafab24..3f9b3e4 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/MetaAlertServiceImpl.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/MetaAlertServiceImpl.java
@@ -19,16 +19,14 @@
 package org.apache.metron.rest.service.impl;
 
 import java.io.IOException;
-import java.util.Collection;
 import org.apache.metron.indexing.dao.IndexDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertAddRemoveRequest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
 import org.apache.metron.indexing.dao.search.InvalidCreateException;
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
-import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
 import org.apache.metron.rest.RestException;
 import org.apache.metron.rest.service.MetaAlertService;
@@ -48,7 +46,6 @@
     this.environment = environment;
   }
 
-
   @Override
   public MetaAlertCreateResponse create(MetaAlertCreateRequest createRequest) throws RestException {
     try {
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/SearchServiceImpl.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/SearchServiceImpl.java
index 1c92fcb..54759e4 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/SearchServiceImpl.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/SearchServiceImpl.java
@@ -18,8 +18,8 @@
 package org.apache.metron.rest.service.impl;
 
 import static org.apache.metron.common.Constants.ERROR_TYPE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
 import static org.apache.metron.common.Constants.SENSOR_TYPE_FIELD_PROPERTY;
-import static org.apache.metron.indexing.dao.MetaAlertDao.METAALERT_TYPE;
 import static org.apache.metron.rest.MetronRestConstants.INDEX_WRITER_NAME;
 import static org.apache.metron.rest.MetronRestConstants.SEARCH_FACET_FIELDS_SPRING_PROPERTY;
 
diff --git a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/UpdateServiceImpl.java b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/UpdateServiceImpl.java
index 76ac75d..49490fd 100644
--- a/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/UpdateServiceImpl.java
+++ b/metron-interface/metron-rest/src/main/java/org/apache/metron/rest/service/impl/UpdateServiceImpl.java
@@ -18,6 +18,7 @@
 package org.apache.metron.rest.service.impl;
 
 import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
 import org.apache.metron.indexing.dao.update.PatchRequest;
 import org.apache.metron.indexing.dao.update.ReplaceRequest;
@@ -44,7 +45,7 @@
   @Override
   public void patch(PatchRequest request) throws RestException, OriginalNotFoundException {
     try {
-      dao.patch(request, Optional.of(System.currentTimeMillis()));
+      dao.patch(dao, request, Optional.of(System.currentTimeMillis()));
     } catch (Exception e) {
 
       throw new RestException(e.getMessage(), e);
@@ -59,4 +60,22 @@
       throw new RestException(e.getMessage(), e);
     }
   }
+
+  @Override
+  public void addComment(CommentAddRemoveRequest request) throws RestException {
+    try {
+      dao.addCommentToAlert(request);
+    } catch (Exception e) {
+      throw new RestException(e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public void removeComment(CommentAddRemoveRequest request) throws RestException {
+    try {
+      dao.removeCommentFromAlert(request);
+    } catch (Exception e) {
+      throw new RestException(e.getMessage(), e);
+    }
+  }
 }
diff --git a/metron-interface/metron-rest/src/main/scripts/metron-rest.sh b/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
index 2c7c75b..c0c9fac 100644
--- a/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
+++ b/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
@@ -96,19 +96,35 @@
     METRON_REST_CLASSPATH+=":${METRON_JDBC_CLIENT_PATH}"
 fi
 
-# Use a custom indexing jar if provided, else pull the metron-elasticsearch uber jar
-if [ ${METRON_INDEX_CP} ]; then
-    echo "Default metron indexing jar is: ${METRON_INDEX_CP}"
-    METRON_REST_CLASSPATH+=":${METRON_INDEX_CP}"
-else
-    indexing_jar_pattern="${METRON_HOME}/lib/metron-elasticsearch*uber.jar"
-    indexing_files=( ${indexing_jar_pattern} )
-    echo "Default metron indexing jar is: ${indexing_files[0]}"
-    METRON_REST_CLASSPATH+=":${indexing_files[0]}"
+# Use metron-elasticsearch uber jar by default
+indexing_jar_pattern="${METRON_HOME}/lib/metron-elasticsearch*uber.jar"
+# Use metron-solr uber jar if ra indexing writer set to Solr
+if [[ ${METRON_RA_INDEXING_WRITER} == "Solr" ]]; then
+    indexing_jar_pattern="${METRON_HOME}/lib/metron-solr*uber.jar"
 fi
+# Use a custom indexing jar if provided
+if [ ${METRON_INDEX_CP} ]; then
+    indexing_jar_pattern="${METRON_INDEX_CP}"
+fi
+indexing_files=( ${indexing_jar_pattern} )
+echo "Metron indexing jar is: ${indexing_files[0]}"
+METRON_REST_CLASSPATH+=":${indexing_files[0]}"
 
 echo "METRON_REST_CLASSPATH=${METRON_REST_CLASSPATH}"
 
+#Use Solr daos if ra indexing writer set to Solr
+if [[ ${METRON_RA_INDEXING_WRITER} == "Solr" ]]; then
+    METRON_INDEX_DAO=" --index.dao.impl=org.apache.metron.solr.dao.SolrDao,org.apache.metron.indexing.dao.HBaseDao"
+    METRON_METAALERT_DAO=" --meta.dao.impl=org.apache.metron.solr.dao.SolrMetaAlertDao"
+    METRON_WRITER_NAME=" --index.writer.name=solr"
+    echo "METRON_INDEX_DAO=${METRON_INDEX_DAO}"
+    echo "METRON_METAALERT_DAO=${METRON_METAALERT_DAO}"
+    echo "METRON_WRITER_NAME=${METRON_WRITER_NAME}"
+    METRON_SPRING_OPTIONS+=${METRON_INDEX_DAO}
+    METRON_SPRING_OPTIONS+=${METRON_METAALERT_DAO}
+    METRON_SPRING_OPTIONS+=${METRON_WRITER_NAME}
+fi
+
 echo "Starting application"
 ${JAVA_HOME}/bin/java ${METRON_JVMFLAGS} \
 -cp ${METRON_REST_CLASSPATH} \
diff --git a/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/MetaAlertControllerIntegrationTest.java b/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/MetaAlertControllerIntegrationTest.java
index f86f227..b216990 100644
--- a/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/MetaAlertControllerIntegrationTest.java
+++ b/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/MetaAlertControllerIntegrationTest.java
@@ -30,13 +30,10 @@
 import com.google.common.collect.ImmutableMap;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.indexing.dao.InMemoryMetaAlertDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
 import org.apache.metron.indexing.dao.SearchIntegrationTest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertAddRemoveRequest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
@@ -75,6 +72,7 @@
   private String metaalertUrl = "/api/v1/metaalert";
   private String user = "user";
   private String password = "password";
+  private String metaAlertIndex = "metaalert_index";
 
   /**
    {
@@ -111,7 +109,7 @@
     ImmutableMap<String, String> testData = ImmutableMap.of(
         "bro_index_2017.01.01.01", SearchIntegrationTest.broData,
         "snort_index_2017.01.01.01", SearchIntegrationTest.snortData,
-        MetaAlertDao.METAALERTS_INDEX, metaAlertData
+        metaAlertIndex, metaAlertData
     );
     loadTestData(testData);
   }
diff --git a/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/UpdateControllerIntegrationTest.java b/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/UpdateControllerIntegrationTest.java
index e8d00d3..6b8d5d3 100644
--- a/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/UpdateControllerIntegrationTest.java
+++ b/metron-interface/metron-rest/src/test/java/org/apache/metron/rest/controller/UpdateControllerIntegrationTest.java
@@ -17,16 +17,29 @@
  */
 package org.apache.metron.rest.controller;
 
+import static org.apache.metron.rest.MetronRestConstants.TEST_PROFILE;
+import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.csrf;
+import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.httpBasic;
+import static org.springframework.security.test.web.servlet.setup.SecurityMockMvcConfigurers.springSecurity;
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
+
 import com.google.common.collect.ImmutableMap;
+import java.util.NavigableMap;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.metron.hbase.mock.MockHTable;
+import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.hbase.mock.MockHBaseTableProvider;
+import org.apache.metron.hbase.mock.MockHTable;
 import org.apache.metron.indexing.dao.HBaseDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
 import org.apache.metron.indexing.dao.SearchIntegrationTest;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.rest.service.UpdateService;
 import org.junit.Assert;
 import org.junit.Before;
@@ -38,28 +51,17 @@
 import org.springframework.test.context.ActiveProfiles;
 import org.springframework.test.context.junit4.SpringRunner;
 import org.springframework.test.web.servlet.MockMvc;
+import org.springframework.test.web.servlet.MvcResult;
 import org.springframework.test.web.servlet.ResultActions;
 import org.springframework.test.web.servlet.setup.MockMvcBuilders;
 import org.springframework.web.context.WebApplicationContext;
 
-import java.util.NavigableMap;
-
-import static org.apache.metron.rest.MetronRestConstants.TEST_PROFILE;
-import static org.springframework.security.test.web.servlet.setup.SecurityMockMvcConfigurers.springSecurity;
-import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.csrf;
-import static org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors.httpBasic;
-import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
-import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch;
-import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
-import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
-import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
-
 @RunWith(SpringRunner.class)
 @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
 @ActiveProfiles(TEST_PROFILE)
 public class UpdateControllerIntegrationTest extends DaoControllerTest {
   @Autowired
-  private UpdateService searchService;
+  private UpdateService updateService;
   @Autowired
   public CuratorFramework client;
 
@@ -72,6 +74,7 @@
   private String searchUrl = "/api/v1/search";
   private String user = "user";
   private String password = "password";
+  private String metaAlertIndex = "metaalert_index";
 
   /**
    {
@@ -115,13 +118,37 @@
   @Multiline
   public static String replace;
 
+  /**
+   {
+     "guid" : "bro_2",
+     "sensorType" : "bro",
+     "comment": "test_comment",
+     "username" : "test_username",
+     "timestamp":0
+   }
+   */
+  @Multiline
+  public static String addComment;
+
+  /**
+   {
+   "guid" : "bro_2",
+   "sensorType" : "bro",
+   "comment": "test_comment",
+   "username" : "test_username",
+   "timestamp":0
+   }
+   */
+  @Multiline
+  public static String removeComment;
+
   @Before
   public void setup() throws Exception {
     this.mockMvc = MockMvcBuilders.webAppContextSetup(this.wac).apply(springSecurity()).build();
     ImmutableMap<String, String> testData = ImmutableMap.of(
         "bro_index_2017.01.01.01", SearchIntegrationTest.broData,
         "snort_index_2017.01.01.01", SearchIntegrationTest.snortData,
-        MetaAlertDao.METAALERTS_INDEX, MetaAlertControllerIntegrationTest.metaAlertData
+        metaAlertIndex, MetaAlertControllerIntegrationTest.metaAlertData
     );
     loadTestData(testData);
   }
@@ -191,4 +218,41 @@
     }
   }
 
+  @Test
+  public void shouldAddComment() throws Exception {
+    CommentAddRemoveRequest commentAddRemoveRequest = new CommentAddRemoveRequest();
+    commentAddRemoveRequest.setGuid("bro_1");
+    commentAddRemoveRequest.setSensorType("bro");
+    commentAddRemoveRequest.setComment("test_comment");
+    commentAddRemoveRequest.setUsername("test_username");
+    commentAddRemoveRequest.setTimestamp(0L);
+
+    updateService.addComment(commentAddRemoveRequest);
+
+    ResultActions result = this.mockMvc.perform(
+        post(updateUrl + "/add/comment")
+            .with(httpBasic(user, password)).with(csrf())
+            .contentType(MediaType.parseMediaType("application/json;charset=UTF-8"))
+            .content(addComment));
+    result.andExpect(status().isOk());
+  }
+
+  @Test
+  public void shouldRemoveComment() throws Exception {
+    CommentAddRemoveRequest commentAddRemoveRequest = new CommentAddRemoveRequest();
+    commentAddRemoveRequest.setGuid("bro_1");
+    commentAddRemoveRequest.setSensorType("bro");
+    commentAddRemoveRequest.setComment("test_comment");
+    commentAddRemoveRequest.setUsername("test_username");
+    commentAddRemoveRequest.setTimestamp(0L);
+
+    updateService.removeComment(commentAddRemoveRequest);
+
+    ResultActions result = this.mockMvc.perform(
+        post(updateUrl + "/remove/comment")
+            .with(httpBasic(user, password)).with(csrf())
+            .contentType(MediaType.parseMediaType("application/json;charset=UTF-8"))
+            .content(removeComment));
+    result.andExpect(status().isOk());
+  }
 }
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/Constants.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/Constants.java
index f74660c..4a8bea2 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/Constants.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/Constants.java
@@ -28,6 +28,7 @@
   public static final long DEFAULT_CONFIGURED_BOLT_TIMEOUT = 5000;
   public static final String SENSOR_TYPE = "source.type";
   public static final String SENSOR_TYPE_FIELD_PROPERTY = "source.type.field";
+  public static final String THREAT_SCORE_FIELD_PROPERTY = "threat.triage.score.field";
   public static final String ENRICHMENT_TOPIC = "enrichments";
   public static final String INDEXING_TOPIC = "indexing";
   public static final String ERROR_STREAM = "error";
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/error/MetronError.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/error/MetronError.java
index 9a553ca..bc02c5c 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/error/MetronError.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/error/MetronError.java
@@ -26,10 +26,7 @@
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
+import java.util.*;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.metron.common.Constants.ERROR_TYPE;
@@ -92,6 +89,7 @@
   @SuppressWarnings({"unchecked"})
   public JSONObject getJSONObject() {
     JSONObject errorMessage = new JSONObject();
+    errorMessage.put(Constants.GUID, UUID.randomUUID().toString());
     errorMessage.put(Constants.SENSOR_TYPE, "error");
     errorMessage.put(ErrorFields.FAILED_SENSOR_TYPE.getName(), sensorType);
     errorMessage.put(ErrorFields.ERROR_TYPE.getName(), errorType.getType());
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
index 02e6015..12bb879 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
@@ -19,6 +19,7 @@
 package org.apache.metron.common.utils;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.core.JsonParser;
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.JsonNode;
@@ -71,7 +72,8 @@
       new JSONParser());
 
   private static ThreadLocal<ObjectMapper> _mapper = ThreadLocal.withInitial(() ->
-      new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL));
+      new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL)
+                        .configure(JsonParser.Feature.ALLOW_COMMENTS, true));
 
   public <T> T convert(Object original, Class<T> targetClass) {
     return _mapper.get().convertValue(original, targetClass);
diff --git a/metron-platform/metron-elasticsearch/src/main/config/elasticsearch.properties.j2 b/metron-platform/metron-elasticsearch/src/main/config/elasticsearch.properties.j2
index 00ad9dc..023ba2f 100644
--- a/metron-platform/metron-elasticsearch/src/main/config/elasticsearch.properties.j2
+++ b/metron-platform/metron-elasticsearch/src/main/config/elasticsearch.properties.j2
@@ -35,7 +35,7 @@
 indexing.error.topic={{indexing_error_topic}}
 
 ##### Indexing #####
-indexing.writer.class.name={{ra_indexing_writer_class_name}}
+indexing.writer.class.name=org.apache.metron.elasticsearch.writer.ElasticsearchWriter
 
 ##### Parallelism #####
 kafka.spout.parallelism={{ra_indexing_kafka_spout_parallelism}}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchColumnMetadataDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchColumnMetadataDao.java
index c12802e..6a8cad8 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchColumnMetadataDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchColumnMetadataDao.java
@@ -18,6 +18,7 @@
 
 package org.apache.metron.elasticsearch.dao;
 
+import org.apache.metron.indexing.dao.ColumnMetadataDao;
 import org.apache.metron.indexing.dao.search.FieldType;
 import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
 import org.elasticsearch.client.AdminClient;
@@ -140,12 +141,32 @@
   }
 
   /**
-   * Retrieves the latest indices.
-   * @param includeIndices
-   * @return
+   * Finds the latest version of a set of base indices.  This can be used to find
+   * the latest 'bro' index, for example.
+   *
+   * Assuming the following indices exist...
+   *
+   *    [
+   *      'bro_index_2017.10.03.19'
+   *      'bro_index_2017.10.03.20',
+   *      'bro_index_2017.10.03.21',
+   *      'snort_index_2017.10.03.19',
+   *      'snort_index_2017.10.03.20',
+   *      'snort_index_2017.10.03.21'
+   *    ]
+   *
+   *  And the include indices are given as...
+   *
+   *    ['bro', 'snort']
+   *
+   * Then the latest indices are...
+   *
+   *    ['bro_index_2017.10.03.21', 'snort_index_2017.10.03.21']
+   *
+   * @param includeIndices The base names of the indices to include
+   * @return The latest version of a set of indices.
    */
-  @Override
-  public String[] getLatestIndices(List<String> includeIndices) {
+  String[] getLatestIndices(List<String> includeIndices) {
     LOG.debug("Getting latest indices; indices={}", includeIndices);
     Map<String, String> latestIndices = new HashMap<>();
     String[] indices = adminClient
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
index 181cb87..59f25f0 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
@@ -17,66 +17,29 @@
  */
 package org.apache.metron.elasticsearch.dao;
 
-import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.INDEX_NAME_DELIMITER;
-
-import com.google.common.base.Splitter;
-import com.google.common.collect.Iterables;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.function.Function;
 import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
 import org.apache.metron.indexing.dao.search.FieldType;
 import org.apache.metron.indexing.dao.search.GetRequest;
-import org.apache.metron.indexing.dao.search.Group;
-import org.apache.metron.indexing.dao.search.GroupOrder;
-import org.apache.metron.indexing.dao.search.GroupOrderType;
 import org.apache.metron.indexing.dao.search.GroupRequest;
 import org.apache.metron.indexing.dao.search.GroupResponse;
-import org.apache.metron.indexing.dao.search.GroupResult;
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
-import org.apache.metron.indexing.dao.search.SearchResult;
-import org.apache.metron.indexing.dao.search.SortField;
-import org.apache.metron.indexing.dao.search.SortOrder;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
-import org.elasticsearch.action.bulk.BulkRequestBuilder;
-import org.elasticsearch.action.bulk.BulkResponse;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.index.IndexResponse;
-import org.elasticsearch.action.search.SearchRequestBuilder;
-import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.metron.indexing.dao.update.ReplaceRequest;
 import org.elasticsearch.client.transport.TransportClient;
-import org.elasticsearch.index.mapper.LegacyIpFieldMapper;
-import org.elasticsearch.index.query.IdsQueryBuilder;
 import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.index.query.QueryStringQueryBuilder;
-import org.elasticsearch.search.SearchHit;
-import org.elasticsearch.search.SearchHits;
-import org.elasticsearch.search.aggregations.Aggregation;
-import org.elasticsearch.search.aggregations.AggregationBuilders;
-import org.elasticsearch.search.aggregations.Aggregations;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
-import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
-import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
-import org.elasticsearch.search.aggregations.metrics.sum.Sum;
-import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
-import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.search.sort.FieldSortBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -84,25 +47,15 @@
 
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
-  /**
-   * The value required to ensure that Elasticsearch sorts missing values last.
-   */
-  private static final String SORT_MISSING_LAST = "_last";
-
-  /**
-   * The value required to ensure that Elasticsearch sorts missing values last.
-   */
-  private static final String SORT_MISSING_FIRST = "_first";
-
-  /**
-   * The Elasticsearch client.
-   */
   private transient TransportClient client;
+  private ElasticsearchSearchDao searchDao;
+  private ElasticsearchUpdateDao updateDao;
+  private ElasticsearchRetrieveLatestDao retrieveLatestDao;
 
   /**
    * Retrieves column metadata about search indices.
    */
-  private ColumnMetadataDao columnMetadataDao;
+  private ElasticsearchColumnMetadataDao columnMetadataDao;
 
   /**
    * Handles the submission of search requests to Elasticsearch.
@@ -112,10 +65,17 @@
   private AccessConfig accessConfig;
 
   protected ElasticsearchDao(TransportClient client,
-                             ColumnMetadataDao columnMetadataDao,
-                             ElasticsearchRequestSubmitter requestSubmitter,
-                             AccessConfig config) {
+      AccessConfig config,
+      ElasticsearchSearchDao searchDao,
+      ElasticsearchUpdateDao updateDao,
+      ElasticsearchRetrieveLatestDao retrieveLatestDao,
+      ElasticsearchColumnMetadataDao columnMetadataDao,
+      ElasticsearchRequestSubmitter requestSubmitter
+  ) {
     this.client = client;
+    this.searchDao = searchDao;
+    this.updateDao = updateDao;
+    this.retrieveLatestDao = retrieveLatestDao;
     this.columnMetadataDao = columnMetadataDao;
     this.requestSubmitter = requestSubmitter;
     this.accessConfig = config;
@@ -133,600 +93,116 @@
     this.accessConfig = accessConfig;
   }
 
-  private static Map<String, FieldType> elasticsearchSearchTypeMap;
-
-  static {
-    Map<String, FieldType> fieldTypeMap = new HashMap<>();
-    fieldTypeMap.put("text", FieldType.TEXT);
-    fieldTypeMap.put("keyword", FieldType.KEYWORD);
-    fieldTypeMap.put("ip", FieldType.IP);
-    fieldTypeMap.put("integer", FieldType.INTEGER);
-    fieldTypeMap.put("long", FieldType.LONG);
-    fieldTypeMap.put("date", FieldType.DATE);
-    fieldTypeMap.put("float", FieldType.FLOAT);
-    fieldTypeMap.put("double", FieldType.DOUBLE);
-    fieldTypeMap.put("boolean", FieldType.BOOLEAN);
-    elasticsearchSearchTypeMap = Collections.unmodifiableMap(fieldTypeMap);
-  }
-
-  @Override
-  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
-    if(searchRequest.getQuery() == null) {
-      throw new InvalidSearchException("Search query is invalid: null");
-    }
-    return search(searchRequest, new QueryStringQueryBuilder(searchRequest.getQuery()));
-  }
-
-  /**
-   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
-   * @param request The request defining the parameters of the search
-   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
-   * @return The results of the query
-   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
-   */
-  protected SearchResponse search(SearchRequest request, QueryBuilder queryBuilder) throws InvalidSearchException {
-    org.elasticsearch.action.search.SearchRequest esRequest;
-    org.elasticsearch.action.search.SearchResponse esResponse;
-
-    if(client == null) {
-      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
-    }
-
-    if (request.getSize() > accessConfig.getMaxSearchResults()) {
-      throw new InvalidSearchException("Search result size must be less than " + accessConfig.getMaxSearchResults());
-    }
-
-    esRequest = buildSearchRequest(request, queryBuilder);
-    esResponse = requestSubmitter.submitSearch(esRequest);
-    return buildSearchResponse(request, esResponse);
-  }
-
-  /**
-   * Builds an Elasticsearch search request.
-   * @param searchRequest The Metron search request.
-   * @param queryBuilder
-   * @return An Elasticsearch search request.
-   */
-  private org.elasticsearch.action.search.SearchRequest buildSearchRequest(
-          SearchRequest searchRequest,
-          QueryBuilder queryBuilder) throws InvalidSearchException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got search request; request={}", ElasticsearchUtils.toJSON(searchRequest).orElse("???"));
-    }
-    SearchSourceBuilder searchBuilder = new SearchSourceBuilder()
-            .size(searchRequest.getSize())
-            .from(searchRequest.getFrom())
-            .query(queryBuilder)
-            .trackScores(true);
-    List<String> fields = searchRequest.getFields();
-    // column metadata needed to understand the type of each sort field
-    Map<String, FieldType> meta;
-    try {
-      meta = getColumnMetadata(searchRequest.getIndices());
-    } catch(IOException e) {
-      throw new InvalidSearchException("Unable to get column metadata", e);
-    }
-
-    // handle sort fields
-    for(SortField sortField : searchRequest.getSort()) {
-
-      // what type is the sort field?
-      FieldType sortFieldType = meta.getOrDefault(sortField.getField(), FieldType.OTHER);
-
-      // sort order - if ascending missing values sorted last. otherwise, missing values sorted first
-      org.elasticsearch.search.sort.SortOrder sortOrder = getElasticsearchSortOrder(sortField.getSortOrder());
-      String missingSortOrder;
-      if(sortOrder == org.elasticsearch.search.sort.SortOrder.DESC) {
-        missingSortOrder = SORT_MISSING_LAST;
-      } else {
-        missingSortOrder = SORT_MISSING_FIRST;
-      }
-
-      // sort by the field - missing fields always last
-      FieldSortBuilder sortBy = new FieldSortBuilder(sortField.getField())
-              .order(sortOrder)
-              .missing(missingSortOrder)
-              .unmappedType(sortFieldType.getFieldType());
-      searchBuilder.sort(sortBy);
-    }
-
-    // handle search fields
-    if (fields != null) {
-      searchBuilder.fetchSource("*", null);
-    } else {
-      searchBuilder.fetchSource(true);
-    }
-
-    List<String> facetFields = searchRequest.getFacetFields();
-
-    // handle facet fields
-    if (facetFields != null) {
-      // https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/_bucket_aggregations.html
-      for(String field : facetFields) {
-        String name = getFacetAggregationName(field);
-        TermsAggregationBuilder terms = AggregationBuilders.terms( name).field(field);
-               // new TermsBuilder(name).field(field);
-        searchBuilder.aggregation(terms);
-      }
-    }
-
-    // return the search request
-    String[] indices = wildcardIndices(searchRequest.getIndices());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Built Elasticsearch request; indices={}, request={}", indices, searchBuilder.toString());
-    }
-    return new org.elasticsearch.action.search.SearchRequest()
-            .indices(indices)
-            .source(searchBuilder);
-  }
-
-  /**
-   * Builds a search response.
-   *
-   * This effectively transforms an Elasticsearch search response into a Metron search response.
-   *
-   * @param searchRequest The Metron search request.
-   * @param esResponse The Elasticsearch search response.
-   * @return A Metron search response.
-   * @throws InvalidSearchException
-   */
-  private SearchResponse buildSearchResponse(
-          SearchRequest searchRequest,
-          org.elasticsearch.action.search.SearchResponse esResponse) throws InvalidSearchException {
-
-    SearchResponse searchResponse = new SearchResponse();
-
-    searchResponse.setTotal(esResponse.getHits().getTotalHits());
-
-    // search hits --> search results
-    List<SearchResult> results = new ArrayList<>();
-    for(SearchHit hit: esResponse.getHits().getHits()) {
-      results.add(getSearchResult(hit, searchRequest.getFields()));
-    }
-    searchResponse.setResults(results);
-
-    // handle facet fields
-    if (searchRequest.getFacetFields() != null) {
-      List<String> facetFields = searchRequest.getFacetFields();
-      Map<String, FieldType> commonColumnMetadata;
-      try {
-        commonColumnMetadata = getColumnMetadata(searchRequest.getIndices());
-      } catch (IOException e) {
-        throw new InvalidSearchException(String.format(
-                "Could not get common column metadata for indices %s",
-                Arrays.toString(searchRequest.getIndices().toArray())));
-      }
-      searchResponse.setFacetCounts(getFacetCounts(facetFields, esResponse.getAggregations(), commonColumnMetadata ));
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Built search response; response={}", ElasticsearchUtils.toJSON(searchResponse).orElse("???"));
-    }
-    return searchResponse;
-  }
-
-  @Override
-  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
-    return group(groupRequest, new QueryStringQueryBuilder(groupRequest.getQuery()));
-  }
-
-  /**
-   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
-   * @param groupRequest The request defining the parameters of the grouping
-   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
-   * @return The results of the query
-   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
-   */
-  protected GroupResponse group(GroupRequest groupRequest, QueryBuilder queryBuilder)
-      throws InvalidSearchException {
-    org.elasticsearch.action.search.SearchRequest esRequest;
-    org.elasticsearch.action.search.SearchResponse esResponse;
-
-    if (client == null) {
-      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
-    }
-    if (groupRequest.getGroups() == null || groupRequest.getGroups().size() == 0) {
-      throw new InvalidSearchException("At least 1 group must be provided.");
-    }
-
-    esRequest = buildGroupRequest(groupRequest, queryBuilder);
-    esResponse = requestSubmitter.submitSearch(esRequest);
-    GroupResponse response = buildGroupResponse(groupRequest, esResponse);
-
-    return response;
-  }
-
-  /**
-   * Builds a group search request.
-   * @param groupRequest The Metron group request.
-   * @param queryBuilder The search query.
-   * @return An Elasticsearch search request.
-   */
-  private org.elasticsearch.action.search.SearchRequest buildGroupRequest(
-          GroupRequest groupRequest,
-          QueryBuilder queryBuilder) {
-
-    // handle groups
-    TermsAggregationBuilder groups = getGroupsTermBuilder(groupRequest, 0);
-    final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
-            .query(queryBuilder)
-            .aggregation(groups);
-
-    // return the search request
-    String[] indices = wildcardIndices(groupRequest.getIndices());
-    return new org.elasticsearch.action.search.SearchRequest()
-            .indices(indices)
-            .source(searchSourceBuilder);
-  }
-
-  /**
-   * Build a group response.
-   * @param groupRequest The original group request.
-   * @param response The search response.
-   * @return A group response.
-   * @throws InvalidSearchException
-   */
-  private GroupResponse buildGroupResponse(
-          GroupRequest groupRequest,
-          org.elasticsearch.action.search.SearchResponse response) throws InvalidSearchException {
-
-    // build the search response
-    Map<String, FieldType> commonColumnMetadata;
-    try {
-      commonColumnMetadata = getColumnMetadata(groupRequest.getIndices());
-    } catch (IOException e) {
-      throw new InvalidSearchException(String.format("Could not get common column metadata for indices %s",
-              Arrays.toString(groupRequest.getIndices().toArray())));
-    }
-
-    GroupResponse groupResponse = new GroupResponse();
-    groupResponse.setGroupedBy(groupRequest.getGroups().get(0).getField());
-    groupResponse.setGroupResults(getGroupResults(groupRequest, 0, response.getAggregations(), commonColumnMetadata));
-    return groupResponse;
-  }
-
-  private String[] wildcardIndices(List<String> indices) {
-    if(indices == null)
-      return new String[] {};
-
-    return indices
-            .stream()
-            .map(index -> String.format("%s%s*", index, INDEX_NAME_DELIMITER))
-            .toArray(value -> new String[indices.size()]);
-  }
-
   @Override
   public synchronized void init(AccessConfig config) {
-    if(this.client == null) {
-      this.client = ElasticsearchUtils.getClient(config.getGlobalConfigSupplier().get());
+    if (this.client == null) {
+      this.client = ElasticsearchUtils
+          .getClient(config.getGlobalConfigSupplier().get());
       this.accessConfig = config;
       this.columnMetadataDao = new ElasticsearchColumnMetadataDao(this.client.admin());
       this.requestSubmitter = new ElasticsearchRequestSubmitter(this.client);
+      this.searchDao = new ElasticsearchSearchDao(client, accessConfig, columnMetadataDao,
+          requestSubmitter);
+      this.retrieveLatestDao = new ElasticsearchRetrieveLatestDao(client);
+      this.updateDao = new ElasticsearchUpdateDao(client, accessConfig, retrieveLatestDao);
     }
 
-    if(columnMetadataDao == null) {
+    if (columnMetadataDao == null) {
       throw new IllegalArgumentException("No ColumnMetadataDao available");
     }
 
-    if(requestSubmitter == null) {
+    if (requestSubmitter == null) {
       throw new IllegalArgumentException("No ElasticsearchRequestSubmitter available");
     }
   }
 
   @Override
-  public Document getLatest(final String guid, final String sensorType) throws IOException {
-    Optional<Document> doc = searchByGuid(guid, sensorType, hit -> toDocument(guid, hit));
-    return doc.orElse(null);
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    return this.searchDao.search(searchRequest);
   }
 
-  private Optional<Document> toDocument(final String guid, SearchHit hit) {
-    Long ts = 0L;
-    String doc = hit.getSourceAsString();
-    String sourceType = toSourceType(hit.getType());
-    try {
-      return Optional.of(new Document(doc, guid, sourceType, ts));
-    } catch (IOException e) {
-      throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
-    }
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    return this.searchDao.group(groupRequest);
   }
 
-  /**
-   * Returns the source type based on a given doc type.
-   * @param docType The document type.
-   * @return The source type.
-   */
-  private String toSourceType(String docType) {
-    return Iterables.getFirst(Splitter.on("_doc").split(docType), null);
+  @Override
+  public Document getLatest(final String guid, final String sensorType) {
+    return retrieveLatestDao.getLatest(guid, sensorType);
   }
 
   @Override
   public Iterable<Document> getAllLatest(
-      final List<GetRequest> getRequests) throws IOException {
-    Collection<String> guids = new HashSet<>();
-    Collection<String> sensorTypes = new HashSet<>();
-    for (GetRequest getRequest: getRequests) {
-      guids.add(getRequest.getGuid());
-      sensorTypes.add(getRequest.getSensorType());
-    }
-    List<Document> documents = searchByGuids(
-        guids
-        , sensorTypes
-        , hit -> {
-          Long ts = 0L;
-          String doc = hit.getSourceAsString();
-          String sourceType = Iterables.getFirst(Splitter.on("_doc").split(hit.getType()), null);
-          try {
-            return Optional.of(new Document(doc, hit.getId(), sourceType, ts));
-          } catch (IOException e) {
-            throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
-          }
-        }
-
-    );
-    return documents;
-  }
-
-  <T> Optional<T> searchByGuid(String guid, String sensorType,
-      Function<SearchHit, Optional<T>> callback) {
-    Collection<String> sensorTypes = sensorType != null ? Collections.singleton(sensorType) : null;
-    List<T> results = searchByGuids(Collections.singleton(guid), sensorTypes, callback);
-    if (results.size() > 0) {
-      return Optional.of(results.get(0));
-    } else {
-      return Optional.empty();
-    }
-  }
-
-  /**
-   * Return the search hit based on the UUID and sensor type.
-   * A callback can be specified to transform the hit into a type T.
-   * If more than one hit happens, the first one will be returned.
-   */
-  <T> List<T> searchByGuids(Collection<String> guids, Collection<String> sensorTypes,
-      Function<SearchHit, Optional<T>> callback) {
-    if(guids == null || guids.isEmpty()) {
-      return Collections.EMPTY_LIST;
-    }
-    QueryBuilder query = null;
-    IdsQueryBuilder idsQuery = null;
-    if (sensorTypes != null) {
-      String[] types = sensorTypes.stream().map(sensorType -> sensorType + "_doc").toArray(String[]::new);
-      idsQuery = QueryBuilders.idsQuery(types);
-    } else {
-      idsQuery = QueryBuilders.idsQuery();
-    }
-
-    for(String guid : guids) {
-        query = idsQuery.addIds(guid);
-    }
-
-    SearchRequestBuilder request = client.prepareSearch()
-                                         .setQuery(query)
-                                         .setSize(guids.size())
-                                         ;
-    org.elasticsearch.action.search.SearchResponse response = request.get();
-    SearchHits hits = response.getHits();
-    List<T> results = new ArrayList<>();
-    for (SearchHit hit : hits) {
-      Optional<T> result = callback.apply(hit);
-      if (result.isPresent()) {
-        results.add(result.get());
-      }
-    }
-    return results;
+      final List<GetRequest> getRequests) {
+    return retrieveLatestDao.getAllLatest(getRequests);
   }
 
   @Override
   public void update(Document update, Optional<String> index) throws IOException {
-    String indexPostfix = ElasticsearchUtils
-        .getIndexFormat(accessConfig.getGlobalConfigSupplier().get()).format(new Date());
-    String sensorType = update.getSensorType();
-    String indexName = getIndexName(update, index, indexPostfix);
-
-    IndexRequest indexRequest = buildIndexRequest(update, sensorType, indexName);
-    try {
-      IndexResponse response = client.index(indexRequest).get();
-
-      ShardInfo shardInfo = response.getShardInfo();
-      int failed = shardInfo.getFailed();
-      if (failed > 0) {
-        throw new IOException(
-            "ElasticsearchDao index failed: " + Arrays.toString(shardInfo.getFailures()));
-      }
-    } catch (Exception e) {
-      throw new IOException(e.getMessage(), e);
-    }
+    updateDao.update(update, index);
   }
 
   @Override
   public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
-    String indexPostfix = ElasticsearchUtils
-        .getIndexFormat(accessConfig.getGlobalConfigSupplier().get()).format(new Date());
-
-    BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
-
-    // Get the indices we'll actually be using for each Document.
-    for (Map.Entry<Document, Optional<String>> updateEntry : updates.entrySet()) {
-      Document update = updateEntry.getKey();
-      String sensorType = update.getSensorType();
-      String indexName = getIndexName(update, updateEntry.getValue(), indexPostfix);
-      IndexRequest indexRequest = buildIndexRequest(
-          update,
-          sensorType,
-          indexName
-      );
-
-      bulkRequestBuilder.add(indexRequest);
-    }
-
-    BulkResponse bulkResponse = bulkRequestBuilder.get();
-    if (bulkResponse.hasFailures()) {
-      LOG.error("Bulk Request has failures: {}", bulkResponse.buildFailureMessage());
-      throw new IOException(
-          "ElasticsearchDao upsert failed: " + bulkResponse.buildFailureMessage());
-    }
+    updateDao.batchUpdate(updates);
   }
 
-  protected String getIndexName(Document update, Optional<String> index, String indexPostFix) {
-      return index.orElse(getIndexName(update.getGuid(), update.getSensorType())
-                  .orElse(ElasticsearchUtils.getIndexName(update.getSensorType(), indexPostFix, null))
-      );
+  @Override
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request, Optional<Long> timestamp)
+      throws OriginalNotFoundException, IOException {
+    updateDao.patch(retrieveLatestDao, request, timestamp);
   }
 
-  protected Optional<String> getIndexName(String guid, String sensorType) {
-    return searchByGuid(guid,
-        sensorType,
-        hit -> Optional.ofNullable(hit.getIndex())
-    );
+  @Override
+  public void replace(ReplaceRequest request, Optional<Long> timestamp) throws IOException {
+    updateDao.replace(request, timestamp);
   }
 
-  protected IndexRequest buildIndexRequest(Document update, String sensorType, String indexName) {
-    String type = sensorType + "_doc";
-    Object ts = update.getTimestamp();
-    IndexRequest indexRequest = new IndexRequest(indexName, type, update.getGuid())
-        .source(update.getDocument());
-    if(ts != null) {
-      indexRequest = indexRequest.timestamp(ts.toString());
-    }
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    updateDao.addCommentToAlert(request);
+  }
 
-    return indexRequest;
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    updateDao.removeCommentFromAlert(request);
   }
 
   @Override
   public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
-    return columnMetadataDao.getColumnMetadata(indices);
+    return this.columnMetadataDao.getColumnMetadata(indices);
   }
 
-  private org.elasticsearch.search.sort.SortOrder getElasticsearchSortOrder(
-      org.apache.metron.indexing.dao.search.SortOrder sortOrder) {
-    return sortOrder == org.apache.metron.indexing.dao.search.SortOrder.DESC ?
-        org.elasticsearch.search.sort.SortOrder.DESC : org.elasticsearch.search.sort.SortOrder.ASC;
+  @Override
+  public Optional<Map<String, Object>> getLatestResult(GetRequest request) throws IOException {
+    return retrieveLatestDao.getLatestResult(request);
   }
 
-  private Order getElasticsearchGroupOrder(GroupOrder groupOrder) {
-    if (groupOrder.getGroupOrderType() == GroupOrderType.TERM) {
-      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.term(true) : Order.term(false);
-    } else {
-      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.count(true) : Order.count(false);
-    }
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    this.updateDao.addCommentToAlert(request, latest);
   }
 
-  public Map<String, Map<String, Long>> getFacetCounts(List<String> fields, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
-    Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
-    for (String field: fields) {
-      Map<String, Long> valueCounts = new HashMap<>();
-      if(aggregations != null ){
-        Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
-        if (aggregation instanceof Terms) {
-          Terms terms = (Terms) aggregation;
-          terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
-        }
-      }
-      fieldCounts.put(field, valueCounts);
-    }
-    return fieldCounts;
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    this.updateDao.removeCommentFromAlert(request, latest);
   }
 
-  private String formatKey(Object key, FieldType type) {
-    if (FieldType.IP.equals(type) && key instanceof Long) {
-      return LegacyIpFieldMapper.longToIp((Long) key);
-    } else if (FieldType.BOOLEAN.equals(type)) {
-      return (Long) key == 1 ? "true" : "false";
-    } else {
-      return key.toString();
-    }
+  protected Optional<String> getIndexName(String guid, String sensorType) {
+    return updateDao.getIndexName(guid, sensorType);
   }
 
-  private TermsAggregationBuilder getGroupsTermBuilder(GroupRequest groupRequest, int index) {
-    List<Group> groups = groupRequest.getGroups();
-    Group group = groups.get(index);
-    String aggregationName = getGroupByAggregationName(group.getField());
-    TermsAggregationBuilder termsBuilder = AggregationBuilders.terms(aggregationName);
-    termsBuilder
-        .field(group.getField())
-        .size(accessConfig.getMaxSearchGroups())
-        .order(getElasticsearchGroupOrder(group.getOrder()));
-    if (index < groups.size() - 1) {
-      termsBuilder.subAggregation(getGroupsTermBuilder(groupRequest, index + 1));
-    }
-    Optional<String> scoreField = groupRequest.getScoreField();
-    if (scoreField.isPresent()) {
-      SumAggregationBuilder scoreSumAggregationBuilder = AggregationBuilders.sum(getSumAggregationName(scoreField.get())).field(scoreField.get()).missing(0);
-      termsBuilder.subAggregation(scoreSumAggregationBuilder);
-    }
-    return termsBuilder;
+  protected SearchResponse search(SearchRequest request, QueryBuilder queryBuilder)
+      throws InvalidSearchException {
+    return searchDao.search(request, queryBuilder);
   }
 
-  private List<GroupResult> getGroupResults(GroupRequest groupRequest, int index, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
-    List<Group> groups = groupRequest.getGroups();
-    String field = groups.get(index).getField();
-    Terms terms = aggregations.get(getGroupByAggregationName(field));
-    List<GroupResult> searchResultGroups = new ArrayList<>();
-    for(Bucket bucket: terms.getBuckets()) {
-      GroupResult groupResult = new GroupResult();
-      groupResult.setKey(formatKey(bucket.getKey(), commonColumnMetadata.get(field)));
-      groupResult.setTotal(bucket.getDocCount());
-      Optional<String> scoreField = groupRequest.getScoreField();
-      if (scoreField.isPresent()) {
-        Sum score = bucket.getAggregations().get(getSumAggregationName(scoreField.get()));
-        groupResult.setScore(score.getValue());
-      }
-      if (index < groups.size() - 1) {
-        groupResult.setGroupedBy(groups.get(index + 1).getField());
-        groupResult.setGroupResults(getGroupResults(groupRequest, index + 1, bucket.getAggregations(), commonColumnMetadata));
-      }
-      searchResultGroups.add(groupResult);
-    }
-    return searchResultGroups;
-  }
-
-  private SearchResult getSearchResult(SearchHit searchHit, List<String> fields) {
-    SearchResult searchResult = new SearchResult();
-    searchResult.setId(searchHit.getId());
-    Map<String, Object> source;
-    if (fields != null) {
-      Map<String, Object> resultSourceAsMap = searchHit.getSourceAsMap();
-      source = new HashMap<>();
-      fields.forEach(field -> {
-        source.put(field, resultSourceAsMap.get(field));
-      });
-    } else {
-      source = searchHit.getSource();
-    }
-    searchResult.setSource(source);
-    searchResult.setScore(searchHit.getScore());
-    searchResult.setIndex(searchHit.getIndex());
-    return searchResult;
-  }
-
-  private String getFacetAggregationName(String field) {
-    return String.format("%s_count", field);
+  protected GroupResponse group(GroupRequest groupRequest, QueryBuilder queryBuilder)
+      throws InvalidSearchException {
+    return searchDao.group(groupRequest, queryBuilder);
   }
 
   public TransportClient getClient() {
-    return client;
-  }
-
-  private String getGroupByAggregationName(String field) {
-    return String.format("%s_group", field);
-  }
-
-  private String getSumAggregationName(String field) {
-    return String.format("%s_score", field);
-  }
-
-  public ElasticsearchDao client(TransportClient client) {
-    this.client = client;
-    return this;
-  }
-
-  public ElasticsearchDao columnMetadataDao(ColumnMetadataDao columnMetadataDao) {
-    this.columnMetadataDao = columnMetadataDao;
-    return this;
-  }
-
-  public ElasticsearchDao accessConfig(AccessConfig accessConfig) {
-    this.accessConfig = accessConfig;
-    return this;
+    return this.client;
   }
 }
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDao.java
index 71fe181..55123a5 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDao.java
@@ -18,17 +18,17 @@
 
 package org.apache.metron.elasticsearch.dao;
 
-import org.apache.lucene.search.join.ScoreMode;
 import org.apache.metron.common.Constants;
-import org.apache.metron.common.configuration.ConfigurationsUtils;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
 import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
-import org.apache.metron.indexing.dao.metaalert.MetaScores;
 import org.apache.metron.indexing.dao.search.FieldType;
 import org.apache.metron.indexing.dao.search.GetRequest;
 import org.apache.metron.indexing.dao.search.GroupRequest;
@@ -37,69 +37,41 @@
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
-import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
 import org.apache.metron.indexing.dao.update.PatchRequest;
-import org.apache.metron.stellar.common.utils.ConversionUtils;
-import org.elasticsearch.action.search.SearchRequestBuilder;
-import org.elasticsearch.index.IndexNotFoundException;
-import org.elasticsearch.index.query.InnerHitBuilder;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.index.query.QueryStringQueryBuilder;
-import org.elasticsearch.search.SearchHit;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Optional;
-import java.util.Set;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import static org.apache.metron.common.Constants.GUID;
-import static org.apache.metron.common.Constants.SENSOR_TYPE_FIELD_PROPERTY;
-import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
-import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
-import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
-import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
-import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import java.util.function.Supplier;
 
 public class ElasticsearchMetaAlertDao implements MetaAlertDao {
 
-  public static final String SOURCE_TYPE = Constants.SENSOR_TYPE.replace('.', ':');
-  public static final String THREAT_TRIAGE_FIELD = THREAT_FIELD_DEFAULT.replace('.', ':');
-  private static final String STATUS_PATH = "/status";
-  private static final String ALERT_PATH = "/alert";
-  private static final String INDEX_NOT_FOUND_INDICES_KEY = "es.index";
+  public static final String THREAT_TRIAGE_FIELD = MetaAlertConstants.THREAT_FIELD_DEFAULT
+      .replace('.', ':');
+  public static final String METAALERTS_INDEX = "metaalert_index";
+  public static final String SOURCE_TYPE_FIELD = Constants.SENSOR_TYPE.replace('.', ':');
+  protected String metaAlertsIndex = METAALERTS_INDEX;
+  protected String threatSort = MetaAlertConstants.THREAT_SORT_DEFAULT;
 
-  private IndexDao indexDao;
   private ElasticsearchDao elasticsearchDao;
-  private String index = METAALERTS_INDEX;
+  private IndexDao indexDao;
+  private ElasticsearchMetaAlertSearchDao metaAlertSearchDao;
+  private ElasticsearchMetaAlertUpdateDao metaAlertUpdateDao;
+  private ElasticsearchMetaAlertRetrieveLatestDao metaAlertRetrieveLatestDao;
 
-  /**
-   * Defines which summary aggregation is used to represent the overall threat triage score for
-   * the metaalert. The summary aggregation is applied to the threat triage score of all child alerts.
-   *
-   * This overall score is primarily used for sorting; hence it is called the 'threatSort'.  This
-   * can be either max, min, average, count, median, or sum.
-   */
-  private String threatSort = THREAT_SORT_DEFAULT;
-  private int pageSize = 500;
+  protected int pageSize = 500;
 
   /**
    * Wraps an {@link org.apache.metron.indexing.dao.IndexDao} to handle meta alerts.
    * @param indexDao The Dao to wrap
    */
   public ElasticsearchMetaAlertDao(IndexDao indexDao) {
-    this(indexDao, METAALERTS_INDEX, THREAT_SORT_DEFAULT);
+    this(indexDao, METAALERTS_INDEX, MetaAlertConstants.THREAT_SORT_DEFAULT);
   }
 
   /**
@@ -109,9 +81,11 @@
    *                   as the overall threat triage score for the metaalert. This
    *                   can be either max, min, average, count, median, or sum.
    */
-  public ElasticsearchMetaAlertDao(IndexDao indexDao, String index, String threatSort) {
+  public ElasticsearchMetaAlertDao(IndexDao indexDao, String metaAlertsIndex,
+      String threatSort) {
     init(indexDao, Optional.of(threatSort));
-    this.index = index;
+    this.threatSort = threatSort;
+    this.metaAlertsIndex = metaAlertsIndex;
   }
 
   public ElasticsearchMetaAlertDao() {
@@ -119,8 +93,10 @@
   }
 
   /**
-   * Initializes this implementation by setting the supplied IndexDao and also setting a separate ElasticsearchDao.
-   * This is needed for some specific Elasticsearch functions (looking up an index from a GUID for example).
+   * Initializes this implementation by setting the supplied IndexDao and also setting a separate
+   *     ElasticsearchDao.
+   * This is needed for some specific Elasticsearch functions (looking up an index from a GUID for
+   *     example).
    * @param indexDao The DAO to wrap for our queries
    * @param threatSort The summary aggregation of the child threat triage scores used
    *                   as the overall threat triage score for the metaalert. This
@@ -148,6 +124,36 @@
     if (threatSort.isPresent()) {
       this.threatSort = threatSort.get();
     }
+    Supplier<Map<String, Object>> globalConfigSupplier = () -> new HashMap<>();
+    if(elasticsearchDao != null && elasticsearchDao.getAccessConfig() != null) {
+      globalConfigSupplier = elasticsearchDao.getAccessConfig().getGlobalConfigSupplier();
+    }
+    MetaAlertConfig config = new MetaAlertConfig(
+        metaAlertsIndex,
+            this.threatSort,
+            globalConfigSupplier
+    ) {
+      @Override
+      protected String getDefaultThreatTriageField() {
+        return THREAT_TRIAGE_FIELD;
+      }
+
+      @Override
+      protected String getDefaultSourceTypeField() {
+        return SOURCE_TYPE_FIELD;
+      }
+    };
+
+    this.metaAlertSearchDao = new ElasticsearchMetaAlertSearchDao(
+        elasticsearchDao,
+        config,
+        pageSize);
+    this.metaAlertRetrieveLatestDao = new ElasticsearchMetaAlertRetrieveLatestDao(indexDao);
+    this.metaAlertUpdateDao = new ElasticsearchMetaAlertUpdateDao(
+        elasticsearchDao,
+        metaAlertRetrieveLatestDao,
+        config,
+        pageSize);
   }
 
   @Override
@@ -156,252 +162,8 @@
   }
 
   @Override
-  public SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException {
-    if (guid == null || guid.trim().isEmpty()) {
-      throw new InvalidSearchException("Guid cannot be empty");
-    }
-    // Searches for all alerts containing the meta alert guid in it's "metalerts" array
-    QueryBuilder qb = boolQuery()
-        .must(
-            nestedQuery(
-                ALERT_FIELD,
-                boolQuery()
-                    .must(termQuery(ALERT_FIELD + "." + GUID, guid)),
-                    ScoreMode.None
-            ).innerHit(new InnerHitBuilder())
-        )
-        .must(termQuery(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString()));
-    return queryAllResults(qb);
-  }
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
-      throws InvalidCreateException, IOException {
-    List<GetRequest> alertRequests = request.getAlerts();
-    if (request.getAlerts().isEmpty()) {
-      throw new InvalidCreateException("MetaAlertCreateRequest must contain alerts");
-    }
-    if (request.getGroups().isEmpty()) {
-      throw new InvalidCreateException("MetaAlertCreateRequest must contain UI groups");
-    }
-
-    // Retrieve the documents going into the meta alert and build it
-    Iterable<Document> alerts = indexDao.getAllLatest(alertRequests);
-
-    Document metaAlert = buildCreateDocument(alerts, request.getGroups());
-    calculateMetaScores(metaAlert);
-    // Add source type to be consistent with other sources and allow filtering
-    metaAlert.getDocument().put(getFieldName(SENSOR_TYPE_FIELD_PROPERTY, SOURCE_TYPE), MetaAlertDao.METAALERT_TYPE);
-
-    // Start a list of updates / inserts we need to run
-    Map<Document, Optional<String>> updates = new HashMap<>();
-    updates.put(metaAlert, Optional.of(MetaAlertDao.METAALERTS_INDEX));
-
-    try {
-      // We need to update the associated alerts with the new meta alerts, making sure existing
-      // links are maintained.
-      Map<String, Optional<String>> guidToIndices = alertRequests.stream().collect(Collectors.toMap(
-          GetRequest::getGuid, GetRequest::getIndex));
-      Map<String, String> guidToSensorTypes = alertRequests.stream().collect(Collectors.toMap(
-          GetRequest::getGuid, GetRequest::getSensorType));
-      for (Document alert: alerts) {
-        if (addMetaAlertToAlert(metaAlert.getGuid(), alert)) {
-          // Use the index in the request if it exists
-          Optional<String> index = guidToIndices.get(alert.getGuid());
-          if (!index.isPresent()) {
-            // Look up the index from Elasticsearch if one is not supplied in the request
-            index = elasticsearchDao.getIndexName(alert.getGuid(), guidToSensorTypes.get(alert.getGuid()));
-            if (!index.isPresent()) {
-              throw new IllegalArgumentException("Could not find index for " + alert.getGuid());
-            }
-          }
-          updates.put(alert, index);
-        }
-      }
-
-      // Kick off any updates.
-      indexDaoUpdate(updates);
-
-      MetaAlertCreateResponse createResponse = new MetaAlertCreateResponse();
-      createResponse.setCreated(true);
-      createResponse.setGuid(metaAlert.getGuid());
-      return createResponse;
-    } catch (IOException ioe) {
-      throw new InvalidCreateException("Unable to create meta alert", ioe);
-    }
-  }
-
-  @Override
-  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
-      throws IOException {
-    Map<Document, Optional<String>> updates = new HashMap<>();
-    Document metaAlert = indexDao.getLatest(metaAlertGuid, METAALERT_TYPE);
-    if (MetaAlertStatus.ACTIVE.getStatusString().equals(metaAlert.getDocument().get(STATUS_FIELD))) {
-      Iterable<Document> alerts = indexDao.getAllLatest(alertRequests);
-      boolean metaAlertUpdated = addAlertsToMetaAlert(metaAlert, alerts);
-      if (metaAlertUpdated) {
-        calculateMetaScores(metaAlert);
-        updates.put(metaAlert, Optional.of(index));
-        for(Document alert: alerts) {
-          if (addMetaAlertToAlert(metaAlert.getGuid(), alert)) {
-            updates.put(alert, Optional.empty());
-          }
-        }
-        indexDaoUpdate(updates);
-      }
-      return metaAlertUpdated;
-    } else {
-      throw new IllegalStateException("Adding alerts to an INACTIVE meta alert is not allowed");
-    }
-  }
-
-  protected boolean addAlertsToMetaAlert(Document metaAlert, Iterable<Document> alerts) {
-    boolean alertAdded = false;
-    List<Map<String,Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument().get(ALERT_FIELD);
-    Set<String> currentAlertGuids = currentAlerts.stream().map(currentAlert ->
-        (String) currentAlert.get(GUID)).collect(Collectors.toSet());
-    for (Document alert: alerts) {
-      String alertGuid = alert.getGuid();
-      // Only add an alert if it isn't already in the meta alert
-      if (!currentAlertGuids.contains(alertGuid)) {
-        currentAlerts.add(alert.getDocument());
-        alertAdded = true;
-      }
-    }
-    return alertAdded;
-  }
-
-  protected boolean addMetaAlertToAlert(String metaAlertGuid, Document alert) {
-    List<String> metaAlertField = new ArrayList<>();
-    List<String> alertField = (List<String>) alert.getDocument()
-        .get(MetaAlertDao.METAALERT_FIELD);
-    if (alertField != null) {
-      metaAlertField.addAll(alertField);
-    }
-    boolean metaAlertAdded = !metaAlertField.contains(metaAlertGuid);
-    if (metaAlertAdded) {
-      metaAlertField.add(metaAlertGuid);
-      alert.getDocument().put(MetaAlertDao.METAALERT_FIELD, metaAlertField);
-    }
-    return metaAlertAdded;
-  }
-
-  @Override
-  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
-      throws IOException {
-    Map<Document, Optional<String>> updates = new HashMap<>();
-    Document metaAlert = indexDao.getLatest(metaAlertGuid, METAALERT_TYPE);
-    if (MetaAlertStatus.ACTIVE.getStatusString().equals(metaAlert.getDocument().get(STATUS_FIELD))) {
-      Iterable<Document> alerts = indexDao.getAllLatest(alertRequests);
-      Collection<String> alertGuids = alertRequests.stream().map(GetRequest::getGuid).collect(
-          Collectors.toList());
-      boolean metaAlertUpdated = removeAlertsFromMetaAlert(metaAlert, alertGuids);
-      if (metaAlertUpdated) {
-        calculateMetaScores(metaAlert);
-        updates.put(metaAlert, Optional.of(index));
-        for(Document alert: alerts) {
-          if (removeMetaAlertFromAlert(metaAlert.getGuid(), alert)) {
-            updates.put(alert, Optional.empty());
-          }
-        }
-        indexDaoUpdate(updates);
-      }
-      return metaAlertUpdated;
-    } else {
-      throw new IllegalStateException("Removing alerts from an INACTIVE meta alert is not allowed");
-    }
-
-  }
-
-  protected boolean removeAlertsFromMetaAlert(Document metaAlert, Collection<String> alertGuids) {
-    List<Map<String,Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument().get(ALERT_FIELD);
-    int previousSize = currentAlerts.size();
-    // Only remove an alert if it is in the meta alert
-    currentAlerts.removeIf(currentAlert -> alertGuids.contains((String) currentAlert.get(GUID)));
-    return currentAlerts.size() != previousSize;
-  }
-
-  protected boolean removeMetaAlertFromAlert(String metaAlertGuid, Document alert) {
-    List<String> metaAlertField = new ArrayList<>();
-    List<String> alertField = (List<String>) alert.getDocument()
-        .get(MetaAlertDao.METAALERT_FIELD);
-    if (alertField != null) {
-      metaAlertField.addAll(alertField);
-    }
-    boolean metaAlertRemoved = metaAlertField.remove(metaAlertGuid);
-    if (metaAlertRemoved) {
-      alert.getDocument().put(MetaAlertDao.METAALERT_FIELD, metaAlertField);
-    }
-    return metaAlertRemoved;
-  }
-
-  @Override
-  public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
-      throws IOException {
-    Map<Document, Optional<String>> updates = new HashMap<>();
-    Document metaAlert = indexDao.getLatest(metaAlertGuid, METAALERT_TYPE);
-    String currentStatus = (String) metaAlert.getDocument().get(MetaAlertDao.STATUS_FIELD);
-    boolean metaAlertUpdated = !status.getStatusString().equals(currentStatus);
-    if (metaAlertUpdated) {
-      metaAlert.getDocument().put(MetaAlertDao.STATUS_FIELD, status.getStatusString());
-      List<GetRequest> getRequests = new ArrayList<>();
-      List<Map<String, Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument()
-          .get(MetaAlertDao.ALERT_FIELD);
-      currentAlerts.stream().forEach(currentAlert -> {
-        getRequests.add(new GetRequest((String) currentAlert.get(GUID), (String) currentAlert.get(getFieldName(SENSOR_TYPE_FIELD_PROPERTY, SOURCE_TYPE))));
-      });
-      Iterable<Document> alerts = indexDao.getAllLatest(getRequests);
-      List<Map<String, Object>> updatedAlerts = new ArrayList<>();
-      for (Document alert : alerts) {
-        boolean metaAlertAdded = false;
-        boolean metaAlertRemoved = false;
-        // If we're making it active add add the meta alert guid for every alert.
-        if (MetaAlertStatus.ACTIVE.equals(status)) {
-          metaAlertAdded = addMetaAlertToAlert(metaAlert.getGuid(), alert);
-        }
-        // If we're making it inactive, remove the meta alert guid from every alert.
-        if (MetaAlertStatus.INACTIVE.equals(status)) {
-          metaAlertRemoved = removeMetaAlertFromAlert(metaAlert.getGuid(), alert);
-        }
-        if (metaAlertAdded || metaAlertRemoved) {
-          updates.put(alert, Optional.empty());
-        }
-        updatedAlerts.add(alert.getDocument());
-      }
-      if (MetaAlertStatus.ACTIVE.equals(status)) {
-        metaAlert.getDocument().put(MetaAlertDao.ALERT_FIELD, updatedAlerts);
-      }
-      updates.put(metaAlert, Optional.of(index));
-    }
-    if (metaAlertUpdated) {
-      indexDaoUpdate(updates);
-    }
-    return metaAlertUpdated;
-  }
-
-  @Override
-  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
-    // Wrap the query to also get any meta-alerts.
-    QueryBuilder qb = constantScoreQuery(boolQuery()
-        .must(boolQuery()
-            .should(new QueryStringQueryBuilder(searchRequest.getQuery()))
-            .should(nestedQuery(
-                ALERT_FIELD,
-                new QueryStringQueryBuilder(searchRequest.getQuery()),
-                ScoreMode.None
-                )
-            )
-        )
-        // Ensures that it's a meta alert with active status or that it's an alert (signified by
-        // having no status field)
-        .must(boolQuery()
-            .should(termQuery(MetaAlertDao.STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString()))
-            .should(boolQuery().mustNot(existsQuery(MetaAlertDao.STATUS_FIELD)))
-        )
-        .mustNot(existsQuery(MetaAlertDao.METAALERT_FIELD))
-    );
-    return elasticsearchDao.search(searchRequest, qb);
+  public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
+    return indexDao.getColumnMetadata(indices);
   }
 
   @Override
@@ -410,327 +172,88 @@
   }
 
   @Override
-  public Iterable<Document> getAllLatest(
-      List<GetRequest> getRequests) throws IOException {
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
     return indexDao.getAllLatest(getRequests);
   }
 
   @Override
-  public void update(Document update, Optional<String> index) throws IOException {
-    if (METAALERT_TYPE.equals(update.getSensorType())) {
-      // We've been passed an update to the meta alert.
-      throw new UnsupportedOperationException("Meta alerts cannot be directly updated");
-    } else {
-      Map<Document, Optional<String>> updates = new HashMap<>();
-      updates.put(update, index);
-      try {
-        // We need to update an alert itself.  Only that portion of the update can be delegated.
-        // We still need to get meta alerts potentially associated with it and update.
-        Collection<Document> metaAlerts = getMetaAlertsForAlert(update.getGuid()).getResults().stream()
-                .map(searchResult -> new Document(searchResult.getSource(), searchResult.getId(), METAALERT_TYPE, update.getTimestamp()))
-                .collect(Collectors.toList());
-        // Each meta alert needs to be updated with the new alert
-        for (Document metaAlert : metaAlerts) {
-          replaceAlertInMetaAlert(metaAlert, update);
-          updates.put(metaAlert, Optional.of(METAALERTS_INDEX));
-        }
-      } catch (IndexNotFoundException e) {
-        List<String> indicesNotFound = e.getMetadata(INDEX_NOT_FOUND_INDICES_KEY);
-        // If no metaalerts have been created yet and the metaalerts index does not exist, assume no metaalerts exist for alert.
-        // Otherwise throw the exception.
-        if (indicesNotFound.size() != 1 || !METAALERTS_INDEX.equals(indicesNotFound.get(0))) {
-          throw e;
-        }
-      }
-
-      // Run the alert's update
-      indexDao.batchUpdate(updates);
-    }
-  }
-
-  protected boolean replaceAlertInMetaAlert(Document metaAlert, Document alert) {
-    boolean metaAlertUpdated = removeAlertsFromMetaAlert(metaAlert, Collections.singleton(alert.getGuid()));
-    if (metaAlertUpdated) {
-      addAlertsToMetaAlert(metaAlert, Collections.singleton(alert));
-    }
-    return metaAlertUpdated;
+  public SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException {
+    return metaAlertSearchDao.getAllMetaAlertsForAlert(guid);
   }
 
   @Override
-  public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
-    throw new UnsupportedOperationException("Meta alerts do not allow for bulk updates");
+  public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
+      throws InvalidCreateException, IOException {
+    return metaAlertUpdateDao.createMetaAlert(request);
   }
 
-  /**
-   * Does not allow patches on the "alerts" or "status" fields.  These fields must be updated with their
-   * dedicated methods.
-   *
-   * @param request The patch request
-   * @param timestamp Optionally a timestamp to set. If not specified then current time is used.
-   * @throws OriginalNotFoundException
-   * @throws IOException
-   */
-  @Override
-  public void patch(PatchRequest request, Optional<Long> timestamp)
-      throws OriginalNotFoundException, IOException {
-    if (METAALERT_TYPE.equals(request.getSensorType())) {
-      if (isPatchAllowed(request)) {
-        Document d = getPatchedDocument(request, timestamp);
-        indexDao.update(d, Optional.ofNullable(request.getIndex()));
-      } else {
-        throw new IllegalArgumentException("Meta alert patches are not allowed for /alert or /status paths.  "
-                + "Please use the add/remove alert or update status functions instead.");
-      }
-    } else {
-      Document d = getPatchedDocument(request, timestamp);
-      update(d, Optional.ofNullable(request.getIndex()));
-    }
-  }
-
-  protected boolean isPatchAllowed(PatchRequest request) {
-    if(request.getPatch() != null && !request.getPatch().isEmpty()) {
-      for(Map<String, Object> patch : request.getPatch()) {
-        Object pathObj = patch.get("path");
-        if(pathObj != null && pathObj instanceof String) {
-          String path = (String)pathObj;
-          if (STATUS_PATH.equals(path) || ALERT_PATH.equals(path)) {
-            return false;
-          }
-        }
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Given an alert GUID, retrieve all associated meta alerts.
-   * @param alertGuid The GUID of the child alert
-   * @return The Elasticsearch response containing the meta alerts
-   */
-  protected SearchResponse getMetaAlertsForAlert(String alertGuid) {
-    QueryBuilder qb = boolQuery()
-        .must(
-            nestedQuery(
-                ALERT_FIELD,
-                boolQuery()
-                    .must(termQuery(ALERT_FIELD + "." + Constants.GUID, alertGuid)),
-                ScoreMode.None
-            ).innerHit(new InnerHitBuilder())
-        )
-        .must(termQuery(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString()));
-    return queryAllResults(qb);
-  }
-
-  /**
-   * Elasticsearch queries default to 10 records returned.  Some internal queries require that all
-   * results are returned.  Rather than setting an arbitrarily high size, this method pages through results
-   * and returns them all in a single SearchResponse.
-   * @param qb
-   * @return
-   */
-  protected SearchResponse queryAllResults(QueryBuilder qb) {
-    SearchRequestBuilder searchRequestBuilder = elasticsearchDao
-        .getClient()
-        .prepareSearch(index)
-        .addStoredField("*")
-        .setFetchSource(true)
-        .setQuery(qb)
-        .setSize(pageSize);
-    org.elasticsearch.action.search.SearchResponse esResponse = searchRequestBuilder
-        .execute()
-        .actionGet();
-    List<SearchResult> allResults = getSearchResults(esResponse);
-    long total = esResponse.getHits().getTotalHits();
-    if (total > pageSize) {
-      int pages = (int) (total / pageSize) + 1;
-      for (int i = 1; i < pages; i++) {
-        int from = i * pageSize;
-        searchRequestBuilder.setFrom(from);
-        esResponse = searchRequestBuilder
-            .execute()
-            .actionGet();
-        allResults.addAll(getSearchResults(esResponse));
-      }
-    }
-    SearchResponse searchResponse = new SearchResponse();
-    searchResponse.setTotal(total);
-    searchResponse.setResults(allResults);
-    return searchResponse;
-  }
-
-  /**
-   * Transforms a list of Elasticsearch SearchHits to a list of SearchResults
-   * @param searchResponse
-   * @return
-   */
-  protected List<SearchResult> getSearchResults(org.elasticsearch.action.search.SearchResponse searchResponse) {
-    return Arrays.stream(searchResponse.getHits().getHits()).map(searchHit -> {
-          SearchResult searchResult = new SearchResult();
-          searchResult.setId(searchHit.getId());
-          searchResult.setSource(searchHit.getSource());
-          searchResult.setScore(searchHit.getScore());
-          searchResult.setIndex(searchHit.getIndex());
-          return searchResult;
-        }
-    ).collect(Collectors.toList());
-  }
-
-  /**
-   * Build the Document representing a meta alert to be created.
-   * @param alerts The Elasticsearch results for the meta alerts child documents
-   * @param groups The groups used to create this meta alert
-   * @return A Document representing the new meta alert
-   */
-  protected Document buildCreateDocument(Iterable<Document> alerts, List<String> groups) {
-    // Need to create a Document from the multiget. Scores will be calculated later
-    Map<String, Object> metaSource = new HashMap<>();
-    List<Map<String, Object>> alertList = new ArrayList<>();
-    for (Document alert: alerts) {
-      alertList.add(alert.getDocument());
-    }
-    metaSource.put(ALERT_FIELD, alertList);
-
-    // Add any meta fields
-    String guid = UUID.randomUUID().toString();
-    metaSource.put(GUID, guid);
-    metaSource.put(Constants.Fields.TIMESTAMP.getName(), System.currentTimeMillis());
-    metaSource.put(GROUPS_FIELD, groups);
-    metaSource.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
-
-    return new Document(metaSource, guid, METAALERT_TYPE, System.currentTimeMillis());
-  }
-
-  /**
-   * Calls the single update variant if there's only one update, otherwise calls batch.
-   * @param updates The list of updates to run
-   * @throws IOException If there's an update error
-   */
-  protected void indexDaoUpdate(Map<Document, Optional<String>> updates) throws IOException {
-    if (updates.size() == 1) {
-      Entry<Document, Optional<String>> singleUpdate = updates.entrySet().iterator().next();
-      indexDao.update(singleUpdate.getKey(), singleUpdate.getValue());
-    } else if (updates.size() > 1) {
-      indexDao.batchUpdate(updates);
-    } // else we have no updates, so don't do anything
-  }
-
-
-
-  @SuppressWarnings("unchecked")
-  protected List<Map<String, Object>> getAllAlertsForMetaAlert(Document update) throws IOException {
-    Document latest = indexDao.getLatest(update.getGuid(), MetaAlertDao.METAALERT_TYPE);
-    if (latest == null) {
-      return new ArrayList<>();
-    }
-    List<String> guids = new ArrayList<>();
-    List<Map<String, Object>> latestAlerts = (List<Map<String, Object>>) latest.getDocument()
-        .get(MetaAlertDao.ALERT_FIELD);
-    for (Map<String, Object> alert : latestAlerts) {
-      guids.add((String) alert.get(Constants.GUID));
-    }
-
-    List<Map<String, Object>> alerts = new ArrayList<>();
-    QueryBuilder query = QueryBuilders.idsQuery().addIds(guids.toArray(new String[0]));
-    SearchRequestBuilder request = elasticsearchDao.getClient().prepareSearch()
-        .setQuery(query);
-    org.elasticsearch.action.search.SearchResponse response = request.get();
-    for (SearchHit hit : response.getHits().getHits()) {
-      alerts.add(hit.sourceAsMap());
-    }
-    return alerts;
-  }
-
-  /**
-   * Builds an update Document for updating the meta alerts list.
-   * @param alertGuid The GUID of the alert to update
-   * @param sensorType The sensor type to update
-   * @param metaAlertField The new metaAlertList to use
-   * @return The update Document
-   */
-  protected Document buildAlertUpdate(String alertGuid, String sensorType,
-      List<String> metaAlertField, Long timestamp) {
-    Document alertUpdate;
-    Map<String, Object> document = new HashMap<>();
-    document.put(MetaAlertDao.METAALERT_FIELD, metaAlertField);
-    alertUpdate = new Document(
-        document,
-        alertGuid,
-        sensorType,
-        timestamp
-    );
-    return alertUpdate;
-  }
-
-
   @Override
-  public Map<String, FieldType> getColumnMetadata(List<String> indices)
+  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
       throws IOException {
-    return indexDao.getColumnMetadata(indices);
+    return metaAlertUpdateDao.addAlertsToMetaAlert(metaAlertGuid, alertRequests);
+  }
+
+  @Override
+  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+    return metaAlertUpdateDao.removeAlertsFromMetaAlert(metaAlertGuid, alertRequests);
+  }
+
+  @Override
+  public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
+      throws IOException {
+    return metaAlertUpdateDao.updateMetaAlertStatus(metaAlertGuid, status);
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    return metaAlertSearchDao.search(searchRequest);
   }
 
   @Override
   public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
-    // Wrap the query to hide any alerts already contained in meta alerts
-    QueryBuilder qb = QueryBuilders.boolQuery()
-        .must(new QueryStringQueryBuilder(groupRequest.getQuery()))
-        .mustNot(existsQuery(MetaAlertDao.METAALERT_FIELD));
-    return elasticsearchDao.group(groupRequest, qb);
+    return metaAlertSearchDao.group(groupRequest);
   }
 
-  /**
-   * Calculate the meta alert scores for a Document.
-   * @param metaAlert The Document containing scores
-   * @return Set of score statistics
-   */
-  @SuppressWarnings("unchecked")
-  protected void calculateMetaScores(Document metaAlert) {
-    MetaScores metaScores = new MetaScores(new ArrayList<>());
-    List<Object> alertsRaw = ((List<Object>) metaAlert.getDocument().get(ALERT_FIELD));
-    if (alertsRaw != null && !alertsRaw.isEmpty()) {
-      ArrayList<Double> scores = new ArrayList<>();
-      for (Object alertRaw : alertsRaw) {
-        Map<String, Object> alert = (Map<String, Object>) alertRaw;
-        Double scoreNum = parseThreatField(alert.get(getFieldName(THREAT_FIELD_PROPERTY, THREAT_TRIAGE_FIELD)));
-        if (scoreNum != null) {
-          scores.add(scoreNum);
-        }
-      }
-      metaScores = new MetaScores(scores);
-    }
-
-    // add a summary (max, min, avg, ...) of all the threat scores from the child alerts
-    metaAlert.getDocument().putAll(metaScores.getMetaScores());
-
-    // add the overall threat score for the metaalert; one of the summary aggregations as defined by `threatSort`
-    Object threatScore = metaScores.getMetaScores().get(threatSort);
-
-    // add the threat score as a float; type needs to match the threat score field from each of the sensor indices
-    metaAlert.getDocument().put(getFieldName(THREAT_FIELD_PROPERTY, THREAT_TRIAGE_FIELD), ConversionUtils.convert(threatScore, Float.class));
+  @Override
+  public void update(Document update, Optional<String> index) throws IOException {
+    metaAlertUpdateDao.update(update, index);
   }
 
-  private Double parseThreatField(Object threatRaw) {
-    Double threat = null;
-    if (threatRaw instanceof Number) {
-      threat = ((Number) threatRaw).doubleValue();
-    } else if (threatRaw instanceof String) {
-      threat = Double.parseDouble((String) threatRaw);
-    }
-    return threat;
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) {
+    metaAlertUpdateDao.batchUpdate(updates);
   }
 
-  public int getPageSize() {
-    return pageSize;
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    indexDao.addCommentToAlert(request);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    indexDao.removeCommentFromAlert(request);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    indexDao.addCommentToAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    indexDao.removeCommentFromAlert(request, latest);
+  }
+
+  @Override
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp)
+      throws OriginalNotFoundException, IOException {
+    metaAlertUpdateDao.patch(retrieveLatestDao, request, timestamp);
   }
 
   public void setPageSize(int pageSize) {
     this.pageSize = pageSize;
   }
 
-  private String getFieldName(String globalConfigKey, String defaultFieldName) {
-    if (this.elasticsearchDao == null || this.elasticsearchDao.getAccessConfig() == null) {
-      return defaultFieldName;
-    }
-    Map<String, Object> globalConfig = this.elasticsearchDao.getAccessConfig().getGlobalConfigSupplier().get();
-    return ConfigurationsUtils.getFieldName(globalConfig, globalConfigKey, defaultFieldName);
-  }
 }
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertRetrieveLatestDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertRetrieveLatestDao.java
new file mode 100644
index 0000000..8aa55d6
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertRetrieveLatestDao.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.elasticsearch.dao;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertRetrieveLatestDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+
+public class ElasticsearchMetaAlertRetrieveLatestDao implements MetaAlertRetrieveLatestDao {
+  private RetrieveLatestDao retrieveLatestDao;
+
+  public ElasticsearchMetaAlertRetrieveLatestDao(RetrieveLatestDao retrieveLatestDao) {
+    this.retrieveLatestDao = retrieveLatestDao;
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) throws IOException {
+    return retrieveLatestDao.getLatest(guid, sensorType);
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
+    return retrieveLatestDao.getAllLatest(getRequests);
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertSearchDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertSearchDao.java
new file mode 100644
index 0000000..00fc9d0
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertSearchDao.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.elasticsearch.dao;
+
+import static org.apache.metron.common.Constants.GUID;
+import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.queryAllResults;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertSearchDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.elasticsearch.index.query.InnerHitBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+
+public class ElasticsearchMetaAlertSearchDao implements MetaAlertSearchDao {
+
+  protected ElasticsearchDao elasticsearchDao;
+  private MetaAlertConfig config;
+  private int pageSize;
+
+  public ElasticsearchMetaAlertSearchDao(ElasticsearchDao elasticsearchDao,
+      MetaAlertConfig config, int pageSize) {
+    this.elasticsearchDao = elasticsearchDao;
+    this.config = config;
+    this.pageSize = pageSize;
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    // Wrap the query to also get any meta-alerts.
+    QueryBuilder qb = constantScoreQuery(boolQuery()
+        .must(boolQuery()
+            .should(new QueryStringQueryBuilder(searchRequest.getQuery()))
+            .should(nestedQuery(
+                MetaAlertConstants.ALERT_FIELD,
+                new QueryStringQueryBuilder(searchRequest.getQuery()),
+                ScoreMode.None
+                )
+            )
+        )
+        // Ensures that it's a meta alert with active status or that it's an alert (signified by
+        // having no status field)
+        .must(boolQuery()
+            .should(termQuery(MetaAlertConstants.STATUS_FIELD,
+                MetaAlertStatus.ACTIVE.getStatusString()))
+            .should(boolQuery().mustNot(existsQuery(MetaAlertConstants.STATUS_FIELD)))
+        )
+        .mustNot(existsQuery(MetaAlertConstants.METAALERT_FIELD))
+    );
+    return elasticsearchDao.search(searchRequest, qb);
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    // Wrap the query to hide any alerts already contained in meta alerts
+    QueryBuilder qb = QueryBuilders.boolQuery()
+        .must(new QueryStringQueryBuilder(groupRequest.getQuery()))
+        .mustNot(existsQuery(MetaAlertConstants.METAALERT_FIELD));
+    return elasticsearchDao.group(groupRequest, qb);
+  }
+
+  @Override
+  public SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException {
+    if (guid == null || guid.trim().isEmpty()) {
+      throw new InvalidSearchException("Guid cannot be empty");
+    }
+    // Searches for all alerts containing the meta alert guid in it's "metalerts" array
+    QueryBuilder qb = boolQuery()
+        .must(
+            nestedQuery(
+                MetaAlertConstants.ALERT_FIELD,
+                boolQuery()
+                    .must(termQuery(MetaAlertConstants.ALERT_FIELD + "." + GUID, guid)),
+                ScoreMode.None
+            ).innerHit(new InnerHitBuilder())
+        )
+        .must(termQuery(MetaAlertConstants.STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString()));
+    return queryAllResults(elasticsearchDao.getClient(), qb, config.getMetaAlertIndex(),
+        pageSize);
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertUpdateDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertUpdateDao.java
new file mode 100644
index 0000000..bb79b7a
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertUpdateDao.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.elasticsearch.dao;
+
+import static org.apache.metron.elasticsearch.dao.ElasticsearchMetaAlertDao.METAALERTS_INDEX;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.metron.common.Constants;
+import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertRetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.metaalert.MetaScores;
+import org.apache.metron.indexing.dao.metaalert.lucene.AbstractLuceneMetaAlertUpdateDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.InvalidCreateException;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.index.query.InnerHitBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+
+public class ElasticsearchMetaAlertUpdateDao extends AbstractLuceneMetaAlertUpdateDao {
+
+  private static final String INDEX_NOT_FOUND_INDICES_KEY = "es.index";
+
+  private ElasticsearchDao elasticsearchDao;
+  private MetaAlertRetrieveLatestDao retrieveLatestDao;
+  private int pageSize;
+
+  /**
+   * Constructor an ElasticsearchMetaAlertUpdateDao
+   * @param elasticsearchDao An UpdateDao to defer queries to.
+   * @param retrieveLatestDao A RetrieveLatestDao for getting the current state of items being
+   *     mutated.
+   * @param config The meta alert config to use.
+   */
+  public ElasticsearchMetaAlertUpdateDao(
+      ElasticsearchDao elasticsearchDao,
+      MetaAlertRetrieveLatestDao retrieveLatestDao,
+      MetaAlertConfig config,
+      int pageSize
+  ) {
+    super(elasticsearchDao, retrieveLatestDao, config);
+    this.elasticsearchDao = elasticsearchDao;
+    this.retrieveLatestDao = retrieveLatestDao;
+    this.pageSize = pageSize;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
+      throws InvalidCreateException, IOException {
+    List<GetRequest> alertRequests = request.getAlerts();
+    if (request.getAlerts().isEmpty()) {
+      throw new InvalidCreateException("MetaAlertCreateRequest must contain alerts");
+    }
+    if (request.getGroups().isEmpty()) {
+      throw new InvalidCreateException("MetaAlertCreateRequest must contain UI groups");
+    }
+
+    // Retrieve the documents going into the meta alert and build it
+    Iterable<Document> alerts = retrieveLatestDao.getAllLatest(alertRequests);
+
+    Document metaAlert = buildCreateDocument(alerts, request.getGroups(),
+        MetaAlertConstants.ALERT_FIELD);
+    MetaScores
+        .calculateMetaScores(metaAlert, getConfig().getThreatTriageField(),
+            getConfig().getThreatSort());
+    // Add source type to be consistent with other sources and allow filtering
+    metaAlert.getDocument()
+        .put(getConfig().getSourceTypeField(), MetaAlertConstants.METAALERT_TYPE);
+
+    // Start a list of updates / inserts we need to run
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    updates.put(metaAlert, Optional.of(getConfig().getMetaAlertIndex()));
+
+    try {
+      // We need to update the associated alerts with the new meta alerts, making sure existing
+      // links are maintained.
+      Map<String, Optional<String>> guidToIndices = alertRequests.stream().collect(Collectors.toMap(
+          GetRequest::getGuid, GetRequest::getIndex));
+      Map<String, String> guidToSensorTypes = alertRequests.stream().collect(Collectors.toMap(
+          GetRequest::getGuid, GetRequest::getSensorType));
+      for (Document alert : alerts) {
+        if (addMetaAlertToAlert(metaAlert.getGuid(), alert)) {
+          // Use the index in the request if it exists
+          Optional<String> index = guidToIndices.get(alert.getGuid());
+          if (!index.isPresent()) {
+            // Look up the index from Elasticsearch if one is not supplied in the request
+            index = elasticsearchDao
+                .getIndexName(alert.getGuid(), guidToSensorTypes.get(alert.getGuid()));
+            if (!index.isPresent()) {
+              throw new IllegalArgumentException("Could not find index for " + alert.getGuid());
+            }
+          }
+          updates.put(alert, index);
+        }
+      }
+
+      // Kick off any updates.
+      update(updates);
+
+      MetaAlertCreateResponse createResponse = new MetaAlertCreateResponse();
+      createResponse.setCreated(true);
+      createResponse.setGuid(metaAlert.getGuid());
+      return createResponse;
+    } catch (IOException ioe) {
+      throw new InvalidCreateException("Unable to create meta alert", ioe);
+    }
+  }
+
+  /**
+   * Adds alerts to a metaalert, based on a list of GetRequests provided for retrieval.
+   * @param metaAlertGuid The GUID of the metaalert to be given new children.
+   * @param alertRequests GetRequests for the appropriate alerts to add.
+   * @return True if metaalert is modified, false otherwise.
+   */
+  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+
+    Document metaAlert = retrieveLatestDao
+        .getLatest(metaAlertGuid, MetaAlertConstants.METAALERT_TYPE);
+    if (MetaAlertStatus.ACTIVE.getStatusString()
+        .equals(metaAlert.getDocument().get(MetaAlertConstants.STATUS_FIELD))) {
+      Iterable<Document> alerts = retrieveLatestDao.getAllLatest(alertRequests);
+      Map<Document, Optional<String>> updates = buildAddAlertToMetaAlertUpdates(metaAlert, alerts);
+      update(updates);
+      return updates.size() != 0;
+    } else {
+      throw new IllegalStateException("Adding alerts to an INACTIVE meta alert is not allowed");
+    }
+  }
+
+  @Override
+  public void update(Document update, Optional<String> index) throws IOException {
+    if (MetaAlertConstants.METAALERT_TYPE.equals(update.getSensorType())) {
+      // We've been passed an update to the meta alert.
+      throw new UnsupportedOperationException("Meta alerts cannot be directly updated");
+    } else {
+      Map<Document, Optional<String>> updates = new HashMap<>();
+      updates.put(update, index);
+      try {
+        // We need to update an alert itself.  Only that portion of the update can be delegated.
+        // We still need to get meta alerts potentially associated with it and update.
+        Collection<Document> metaAlerts = getMetaAlertsForAlert(update.getGuid()).getResults().stream()
+                .map(searchResult -> new Document(searchResult.getSource(), searchResult.getId(), MetaAlertConstants.METAALERT_TYPE, update.getTimestamp()))
+                .collect(Collectors.toList());
+        // Each meta alert needs to be updated with the new alert
+        for (Document metaAlert : metaAlerts) {
+          replaceAlertInMetaAlert(metaAlert, update);
+          updates.put(metaAlert, Optional.of(METAALERTS_INDEX));
+        }
+      } catch (IndexNotFoundException e) {
+        List<String> indicesNotFound = e.getMetadata(INDEX_NOT_FOUND_INDICES_KEY);
+        // If no metaalerts have been created yet and the metaalerts index does not exist, assume no metaalerts exist for alert.
+        // Otherwise throw the exception.
+        if (indicesNotFound.size() != 1 || !METAALERTS_INDEX.equals(indicesNotFound.get(0))) {
+          throw e;
+        }
+      }
+
+      // Run the alert's update
+      elasticsearchDao.batchUpdate(updates);
+    }
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    getUpdateDao().addCommentToAlert(request);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    getUpdateDao().removeCommentFromAlert(request);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    getUpdateDao().addCommentToAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    getUpdateDao().removeCommentFromAlert(request, latest);
+  }
+
+  /**
+   * Given an alert GUID, retrieve all associated meta alerts.
+   * @param alertGuid The GUID of the child alert
+   * @return The Elasticsearch response containing the meta alerts
+   */
+  protected SearchResponse getMetaAlertsForAlert(String alertGuid) {
+    QueryBuilder qb = boolQuery()
+        .must(
+            nestedQuery(
+                MetaAlertConstants.ALERT_FIELD,
+                boolQuery()
+                    .must(termQuery(MetaAlertConstants.ALERT_FIELD + "." + Constants.GUID,
+                        alertGuid)),
+                ScoreMode.None
+            ).innerHit(new InnerHitBuilder())
+        )
+        .must(termQuery(MetaAlertConstants.STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString()));
+    return ElasticsearchUtils
+        .queryAllResults(elasticsearchDao.getClient(), qb, getConfig().getMetaAlertIndex(),
+            pageSize);
+  }
+
+
+  protected boolean replaceAlertInMetaAlert(Document metaAlert, Document alert) {
+    boolean metaAlertUpdated = removeAlertsFromMetaAlert(metaAlert,
+        Collections.singleton(alert.getGuid()));
+    if (metaAlertUpdated) {
+      addAlertsToMetaAlert(metaAlert, Collections.singleton(alert));
+    }
+    return metaAlertUpdated;
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchRetrieveLatestDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchRetrieveLatestDao.java
new file mode 100644
index 0000000..f6bfeda
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchRetrieveLatestDao.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.elasticsearch.dao;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.function.Function;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.index.query.IdsQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+
+public class ElasticsearchRetrieveLatestDao implements RetrieveLatestDao {
+
+  private TransportClient transportClient;
+
+  public ElasticsearchRetrieveLatestDao(TransportClient transportClient) {
+    this.transportClient = transportClient;
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) {
+    Optional<Document> doc = searchByGuid(guid, sensorType, hit -> toDocument(guid, hit));
+    return doc.orElse(null);
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) {
+    Collection<String> guids = new HashSet<>();
+    Collection<String> sensorTypes = new HashSet<>();
+    for (GetRequest getRequest : getRequests) {
+      guids.add(getRequest.getGuid());
+      sensorTypes.add(getRequest.getSensorType());
+    }
+    List<Document> documents = searchByGuids(
+        guids,
+        sensorTypes,
+        hit -> {
+          Long ts = 0L;
+          String doc = hit.getSourceAsString();
+          String sourceType = Iterables.getFirst(Splitter.on("_doc").split(hit.getType()), null);
+          try {
+            return Optional.of(new Document(doc, hit.getId(), sourceType, ts));
+          } catch (IOException e) {
+            throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
+          }
+        }
+
+    );
+    return documents;
+  }
+
+  <T> Optional<T> searchByGuid(String guid, String sensorType,
+      Function<SearchHit, Optional<T>> callback) {
+    Collection<String> sensorTypes = sensorType != null ? Collections.singleton(sensorType) : null;
+    List<T> results = searchByGuids(Collections.singleton(guid), sensorTypes, callback);
+    if (results.size() > 0) {
+      return Optional.of(results.get(0));
+    } else {
+      return Optional.empty();
+    }
+  }
+
+  /**
+   * Return the search hit based on the UUID and sensor type.
+   * A callback can be specified to transform the hit into a type T.
+   * If more than one hit happens, the first one will be returned.
+   */
+  <T> List<T> searchByGuids(Collection<String> guids, Collection<String> sensorTypes,
+      Function<SearchHit, Optional<T>> callback) {
+    if (guids == null || guids.isEmpty()) {
+      return Collections.emptyList();
+    }
+    QueryBuilder query = null;
+    IdsQueryBuilder idsQuery;
+    if (sensorTypes != null) {
+      String[] types = sensorTypes.stream().map(sensorType -> sensorType + "_doc")
+          .toArray(String[]::new);
+      idsQuery = QueryBuilders.idsQuery(types);
+    } else {
+      idsQuery = QueryBuilders.idsQuery();
+    }
+
+    for (String guid : guids) {
+      query = idsQuery.addIds(guid);
+    }
+
+    SearchRequestBuilder request = transportClient.prepareSearch()
+        .setQuery(query)
+        .setSize(guids.size());
+    org.elasticsearch.action.search.SearchResponse response = request.get();
+    SearchHits hits = response.getHits();
+    List<T> results = new ArrayList<>();
+    for (SearchHit hit : hits) {
+      Optional<T> result = callback.apply(hit);
+      if (result.isPresent()) {
+        results.add(result.get());
+      }
+    }
+    return results;
+  }
+
+  private Optional<Document> toDocument(final String guid, SearchHit hit) {
+    Long ts = 0L;
+    String doc = hit.getSourceAsString();
+    String sourceType = toSourceType(hit.getType());
+    try {
+      return Optional.of(new Document(doc, guid, sourceType, ts));
+    } catch (IOException e) {
+      throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
+    }
+  }
+
+  /**
+   * Returns the source type based on a given doc type.
+   * @param docType The document type.
+   * @return The source type.
+   */
+  private String toSourceType(String docType) {
+    return Iterables.getFirst(Splitter.on("_doc").split(docType), null);
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
new file mode 100644
index 0000000..5cd0a4d
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
@@ -0,0 +1,467 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.elasticsearch.dao;
+
+import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.INDEX_NAME_DELIMITER;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.Group;
+import org.apache.metron.indexing.dao.search.GroupOrder;
+import org.apache.metron.indexing.dao.search.GroupOrderType;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.GroupResult;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchDao;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.search.SortField;
+import org.apache.metron.indexing.dao.search.SortOrder;
+import org.apache.metron.indexing.dao.update.Document;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.index.mapper.LegacyIpFieldMapper;
+import org.elasticsearch.index.query.IdsQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ElasticsearchSearchDao implements SearchDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * The value required to ensure that Elasticsearch sorts missing values last.
+   */
+  private static final String SORT_MISSING_LAST = "_last";
+
+  /**
+   * The value required to ensure that Elasticsearch sorts missing values last.
+   */
+  private static final String SORT_MISSING_FIRST = "_first";
+
+  private transient TransportClient client;
+  private AccessConfig accessConfig;
+  private ElasticsearchColumnMetadataDao columnMetadataDao;
+  private ElasticsearchRequestSubmitter requestSubmitter;
+
+  public ElasticsearchSearchDao(TransportClient client,
+      AccessConfig accessConfig,
+      ElasticsearchColumnMetadataDao columnMetadataDao,
+      ElasticsearchRequestSubmitter requestSubmitter) {
+    this.client = client;
+    this.accessConfig = accessConfig;
+    this.columnMetadataDao = columnMetadataDao;
+    this.requestSubmitter = requestSubmitter;
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    if(searchRequest.getQuery() == null) {
+      throw new InvalidSearchException("Search query is invalid: null");
+    }
+    return search(searchRequest, new QueryStringQueryBuilder(searchRequest.getQuery()));
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    return group(groupRequest, new QueryStringQueryBuilder(groupRequest.getQuery()));
+  }
+
+  /**
+   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
+   * @param request The request defining the parameters of the search
+   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
+   * @return The results of the query
+   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
+   */
+  protected SearchResponse search(SearchRequest request, QueryBuilder queryBuilder) throws InvalidSearchException {
+    org.elasticsearch.action.search.SearchRequest esRequest;
+    org.elasticsearch.action.search.SearchResponse esResponse;
+
+    if(client == null) {
+      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
+    }
+
+    if (request.getSize() > accessConfig.getMaxSearchResults()) {
+      throw new InvalidSearchException("Search result size must be less than " + accessConfig.getMaxSearchResults());
+    }
+
+    esRequest = buildSearchRequest(request, queryBuilder);
+    esResponse = requestSubmitter.submitSearch(esRequest);
+    return buildSearchResponse(request, esResponse);
+  }
+
+  /**
+   * Builds an Elasticsearch search request.
+   * @param searchRequest The Metron search request.
+   * @param queryBuilder
+   * @return An Elasticsearch search request.
+   */
+  private org.elasticsearch.action.search.SearchRequest buildSearchRequest(
+      SearchRequest searchRequest,
+      QueryBuilder queryBuilder) throws InvalidSearchException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Got search request; request={}", ElasticsearchUtils.toJSON(searchRequest).orElse("???"));
+    }
+    SearchSourceBuilder searchBuilder = new SearchSourceBuilder()
+        .size(searchRequest.getSize())
+        .from(searchRequest.getFrom())
+        .query(queryBuilder)
+        .trackScores(true);
+    List<String> fields = searchRequest.getFields();
+    // column metadata needed to understand the type of each sort field
+    Map<String, FieldType> meta;
+    try {
+      meta = columnMetadataDao.getColumnMetadata(searchRequest.getIndices());
+    } catch(IOException e) {
+      throw new InvalidSearchException("Unable to get column metadata", e);
+    }
+
+    // handle sort fields
+    for(SortField sortField : searchRequest.getSort()) {
+
+      // what type is the sort field?
+      FieldType sortFieldType = meta.getOrDefault(sortField.getField(), FieldType.OTHER);
+
+      // sort order - if ascending missing values sorted last. otherwise, missing values sorted first
+      org.elasticsearch.search.sort.SortOrder sortOrder = getElasticsearchSortOrder(sortField.getSortOrder());
+      String missingSortOrder;
+      if(sortOrder == org.elasticsearch.search.sort.SortOrder.DESC) {
+        missingSortOrder = SORT_MISSING_LAST;
+      } else {
+        missingSortOrder = SORT_MISSING_FIRST;
+      }
+
+      // sort by the field - missing fields always last
+      FieldSortBuilder sortBy = new FieldSortBuilder(sortField.getField())
+          .order(sortOrder)
+          .missing(missingSortOrder)
+          .unmappedType(sortFieldType.getFieldType());
+      searchBuilder.sort(sortBy);
+    }
+
+    // handle search fields
+    if (fields != null) {
+      searchBuilder.fetchSource("*", null);
+    } else {
+      searchBuilder.fetchSource(true);
+    }
+
+    List<String> facetFields = searchRequest.getFacetFields();
+
+    // handle facet fields
+    if (facetFields != null) {
+      // https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/_bucket_aggregations.html
+      for(String field : facetFields) {
+        String name = getFacetAggregationName(field);
+        TermsAggregationBuilder terms = AggregationBuilders.terms( name).field(field);
+        // new TermsBuilder(name).field(field);
+        searchBuilder.aggregation(terms);
+      }
+    }
+
+    // return the search request
+    String[] indices = wildcardIndices(searchRequest.getIndices());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Built Elasticsearch request; indices={}, request={}", indices, searchBuilder.toString());
+    }
+    return new org.elasticsearch.action.search.SearchRequest()
+        .indices(indices)
+        .source(searchBuilder);
+  }
+
+  /**
+   * Builds a search response.
+   *
+   * This effectively transforms an Elasticsearch search response into a Metron search response.
+   *
+   * @param searchRequest The Metron search request.
+   * @param esResponse The Elasticsearch search response.
+   * @return A Metron search response.
+   * @throws InvalidSearchException
+   */
+  private SearchResponse buildSearchResponse(
+      SearchRequest searchRequest,
+      org.elasticsearch.action.search.SearchResponse esResponse) throws InvalidSearchException {
+
+    SearchResponse searchResponse = new SearchResponse();
+
+    searchResponse.setTotal(esResponse.getHits().getTotalHits());
+
+    // search hits --> search results
+    List<SearchResult> results = new ArrayList<>();
+    for(SearchHit hit: esResponse.getHits().getHits()) {
+      results.add(getSearchResult(hit, searchRequest.getFields()));
+    }
+    searchResponse.setResults(results);
+
+    // handle facet fields
+    if (searchRequest.getFacetFields() != null) {
+      List<String> facetFields = searchRequest.getFacetFields();
+      Map<String, FieldType> commonColumnMetadata;
+      try {
+        commonColumnMetadata = columnMetadataDao.getColumnMetadata(searchRequest.getIndices());
+      } catch (IOException e) {
+        throw new InvalidSearchException(String.format(
+            "Could not get common column metadata for indices %s",
+            Arrays.toString(searchRequest.getIndices().toArray())));
+      }
+      searchResponse.setFacetCounts(getFacetCounts(facetFields, esResponse.getAggregations(), commonColumnMetadata ));
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Built search response; response={}", ElasticsearchUtils.toJSON(searchResponse).orElse("???"));
+    }
+    return searchResponse;
+  }
+
+  private org.elasticsearch.search.sort.SortOrder getElasticsearchSortOrder(
+      org.apache.metron.indexing.dao.search.SortOrder sortOrder) {
+    return sortOrder == org.apache.metron.indexing.dao.search.SortOrder.DESC ?
+        org.elasticsearch.search.sort.SortOrder.DESC : org.elasticsearch.search.sort.SortOrder.ASC;
+  }
+
+  private String getFacetAggregationName(String field) {
+    return String.format("%s_count", field);
+  }
+
+  private String[] wildcardIndices(List<String> indices) {
+    if(indices == null)
+      return new String[] {};
+
+    return indices
+        .stream()
+        .map(index -> String.format("%s%s*", index, INDEX_NAME_DELIMITER))
+        .toArray(value -> new String[indices.size()]);
+  }
+
+  private SearchResult getSearchResult(SearchHit searchHit, List<String> fields) {
+    SearchResult searchResult = new SearchResult();
+    searchResult.setId(searchHit.getId());
+    Map<String, Object> source;
+    if (fields != null) {
+      Map<String, Object> resultSourceAsMap = searchHit.getSourceAsMap();
+      source = new HashMap<>();
+      fields.forEach(field -> {
+        source.put(field, resultSourceAsMap.get(field));
+      });
+    } else {
+      source = searchHit.getSource();
+    }
+    searchResult.setSource(source);
+    searchResult.setScore(searchHit.getScore());
+    searchResult.setIndex(searchHit.getIndex());
+    return searchResult;
+  }
+
+  private Map<String, Map<String, Long>> getFacetCounts(List<String> fields, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
+    Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
+    for (String field: fields) {
+      Map<String, Long> valueCounts = new HashMap<>();
+      if(aggregations != null ) {
+        Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
+        if (aggregation instanceof Terms) {
+          Terms terms = (Terms) aggregation;
+          terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
+        }
+      }
+      fieldCounts.put(field, valueCounts);
+    }
+    return fieldCounts;
+  }
+
+  private String formatKey(Object key, FieldType type) {
+    if (FieldType.IP.equals(type) && key instanceof Long) {
+      return LegacyIpFieldMapper.longToIp((Long) key);
+    } else if (FieldType.BOOLEAN.equals(type)) {
+      return (Long) key == 1 ? "true" : "false";
+    } else {
+      return key.toString();
+    }
+  }
+
+  /**
+   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
+   * @param groupRequest The request defining the parameters of the grouping
+   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
+   * @return The results of the query
+   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
+   */
+  protected GroupResponse group(GroupRequest groupRequest, QueryBuilder queryBuilder)
+      throws InvalidSearchException {
+    org.elasticsearch.action.search.SearchRequest esRequest;
+    org.elasticsearch.action.search.SearchResponse esResponse;
+
+    if (client == null) {
+      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
+    }
+    if (groupRequest.getGroups() == null || groupRequest.getGroups().size() == 0) {
+      throw new InvalidSearchException("At least 1 group must be provided.");
+    }
+
+    esRequest = buildGroupRequest(groupRequest, queryBuilder);
+    esResponse = requestSubmitter.submitSearch(esRequest);
+    GroupResponse response = buildGroupResponse(groupRequest, esResponse);
+
+    return response;
+  }
+
+  /**
+   * Builds a group search request.
+   * @param groupRequest The Metron group request.
+   * @param queryBuilder The search query.
+   * @return An Elasticsearch search request.
+   */
+  private org.elasticsearch.action.search.SearchRequest buildGroupRequest(
+      GroupRequest groupRequest,
+      QueryBuilder queryBuilder) {
+
+    // handle groups
+    TermsAggregationBuilder groups = getGroupsTermBuilder(groupRequest, 0);
+    final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
+        .query(queryBuilder)
+        .aggregation(groups);
+
+    // return the search request
+    String[] indices = wildcardIndices(groupRequest.getIndices());
+    return new org.elasticsearch.action.search.SearchRequest()
+        .indices(indices)
+        .source(searchSourceBuilder);
+  }
+
+  private TermsAggregationBuilder getGroupsTermBuilder(GroupRequest groupRequest, int index) {
+    List<Group> groups = groupRequest.getGroups();
+    Group group = groups.get(index);
+    String aggregationName = getGroupByAggregationName(group.getField());
+    TermsAggregationBuilder termsBuilder = AggregationBuilders.terms(aggregationName);
+    termsBuilder
+        .field(group.getField())
+        .size(accessConfig.getMaxSearchGroups())
+        .order(getElasticsearchGroupOrder(group.getOrder()));
+    if (index < groups.size() - 1) {
+      termsBuilder.subAggregation(getGroupsTermBuilder(groupRequest, index + 1));
+    }
+    Optional<String> scoreField = groupRequest.getScoreField();
+    if (scoreField.isPresent()) {
+      SumAggregationBuilder scoreSumAggregationBuilder = AggregationBuilders.sum(getSumAggregationName(scoreField.get())).field(scoreField.get()).missing(0);
+      termsBuilder.subAggregation(scoreSumAggregationBuilder);
+    }
+    return termsBuilder;
+  }
+
+  private String getGroupByAggregationName(String field) {
+    return String.format("%s_group", field);
+  }
+
+  private String getSumAggregationName(String field) {
+    return String.format("%s_score", field);
+  }
+
+  private Order getElasticsearchGroupOrder(GroupOrder groupOrder) {
+    if (groupOrder.getGroupOrderType() == GroupOrderType.TERM) {
+      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.term(true) : Order.term(false);
+    } else {
+      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.count(true) : Order.count(false);
+    }
+  }
+
+  /**
+   * Build a group response.
+   * @param groupRequest The original group request.
+   * @param response The search response.
+   * @return A group response.
+   * @throws InvalidSearchException
+   */
+  private GroupResponse buildGroupResponse(
+      GroupRequest groupRequest,
+      org.elasticsearch.action.search.SearchResponse response) throws InvalidSearchException {
+
+    // build the search response
+    Map<String, FieldType> commonColumnMetadata;
+    try {
+      commonColumnMetadata = columnMetadataDao.getColumnMetadata(groupRequest.getIndices());
+    } catch (IOException e) {
+      throw new InvalidSearchException(String.format("Could not get common column metadata for indices %s",
+          Arrays.toString(groupRequest.getIndices().toArray())));
+    }
+
+    GroupResponse groupResponse = new GroupResponse();
+    groupResponse.setGroupedBy(groupRequest.getGroups().get(0).getField());
+    groupResponse.setGroupResults(getGroupResults(groupRequest, 0, response.getAggregations(), commonColumnMetadata));
+    return groupResponse;
+  }
+
+  private List<GroupResult> getGroupResults(GroupRequest groupRequest, int index, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
+    List<Group> groups = groupRequest.getGroups();
+    String field = groups.get(index).getField();
+    List<GroupResult> searchResultGroups = new ArrayList<>();
+    if(aggregations != null) {
+      Terms terms = aggregations.get(getGroupByAggregationName(field));
+      for (Bucket bucket : terms.getBuckets()) {
+        GroupResult groupResult = new GroupResult();
+        groupResult.setKey(formatKey(bucket.getKey(), commonColumnMetadata.get(field)));
+        groupResult.setTotal(bucket.getDocCount());
+        Optional<String> scoreField = groupRequest.getScoreField();
+        if (scoreField.isPresent()) {
+          Sum score = bucket.getAggregations().get(getSumAggregationName(scoreField.get()));
+          groupResult.setScore(score.getValue());
+        }
+        if (index < groups.size() - 1) {
+          groupResult.setGroupedBy(groups.get(index + 1).getField());
+          groupResult.setGroupResults(getGroupResults(groupRequest, index + 1, bucket.getAggregations(), commonColumnMetadata));
+        }
+        searchResultGroups.add(groupResult);
+      }
+    }
+    return searchResultGroups;
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchUpdateDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchUpdateDao.java
new file mode 100644
index 0000000..f2b08d2
--- /dev/null
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchUpdateDao.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.elasticsearch.dao;
+
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.UpdateDao;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
+import org.elasticsearch.client.transport.TransportClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ElasticsearchUpdateDao implements UpdateDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private transient TransportClient client;
+  private AccessConfig accessConfig;
+  private ElasticsearchRetrieveLatestDao retrieveLatestDao;
+
+  public ElasticsearchUpdateDao(TransportClient client,
+      AccessConfig accessConfig,
+      ElasticsearchRetrieveLatestDao searchDao) {
+    this.client = client;
+    this.accessConfig = accessConfig;
+    this.retrieveLatestDao = searchDao;
+  }
+
+  @Override
+  public void update(Document update, Optional<String> index) throws IOException {
+    String indexPostfix = ElasticsearchUtils
+        .getIndexFormat(accessConfig.getGlobalConfigSupplier().get()).format(new Date());
+    String sensorType = update.getSensorType();
+    String indexName = getIndexName(update, index, indexPostfix);
+
+    IndexRequest indexRequest = buildIndexRequest(update, sensorType, indexName);
+    try {
+      IndexResponse response = client.index(indexRequest).get();
+
+      ShardInfo shardInfo = response.getShardInfo();
+      int failed = shardInfo.getFailed();
+      if (failed > 0) {
+        throw new IOException(
+            "ElasticsearchDao index failed: " + Arrays.toString(shardInfo.getFailures()));
+      }
+    } catch (Exception e) {
+      throw new IOException(e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
+    String indexPostfix = ElasticsearchUtils
+        .getIndexFormat(accessConfig.getGlobalConfigSupplier().get()).format(new Date());
+
+    BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
+
+    // Get the indices we'll actually be using for each Document.
+    for (Map.Entry<Document, Optional<String>> updateEntry : updates.entrySet()) {
+      Document update = updateEntry.getKey();
+      String sensorType = update.getSensorType();
+      String indexName = getIndexName(update, updateEntry.getValue(), indexPostfix);
+      IndexRequest indexRequest = buildIndexRequest(
+          update,
+          sensorType,
+          indexName
+      );
+
+      bulkRequestBuilder.add(indexRequest);
+    }
+
+    BulkResponse bulkResponse = bulkRequestBuilder.get();
+    if (bulkResponse.hasFailures()) {
+      LOG.error("Bulk Request has failures: {}", bulkResponse.buildFailureMessage());
+      throw new IOException(
+          "ElasticsearchDao upsert failed: " + bulkResponse.buildFailureMessage());
+    }
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = retrieveLatestDao.getLatest(request.getGuid(), request.getSensorType());
+    addCommentToAlert(request, latest);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    if (latest == null) {
+      return;
+    }
+    List<Map<String, Object>> commentsField = (List<Map<String, Object>>) latest.getDocument()
+        .getOrDefault(COMMENTS_FIELD, new ArrayList<>());
+    List<Map<String, Object>> originalComments = new ArrayList<>(commentsField);
+
+    originalComments.add(
+        new AlertComment(request.getComment(), request.getUsername(), request.getTimestamp())
+            .asMap());
+
+    Document newVersion = new Document(latest);
+    newVersion.getDocument().put(COMMENTS_FIELD, originalComments);
+    update(newVersion, Optional.empty());
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = retrieveLatestDao.getLatest(request.getGuid(), request.getSensorType());
+    removeCommentFromAlert(request, latest);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    if (latest == null) {
+      return;
+    }
+    List<Map<String, Object>> commentsField = (List<Map<String, Object>>) latest.getDocument()
+        .getOrDefault(COMMENTS_FIELD, new ArrayList<>());
+    List<Map<String, Object>> originalComments = new ArrayList<>(commentsField);
+
+    List<AlertComment> alertComments = new ArrayList<>();
+    for (Map<String, Object> commentRaw : originalComments) {
+      alertComments.add(new AlertComment(commentRaw));
+    }
+
+    alertComments.remove(
+        new AlertComment(request.getComment(), request.getUsername(), request.getTimestamp()));
+    List<Map<String, Object>> commentsFinal = alertComments.stream().map(AlertComment::asMap)
+        .collect(Collectors.toList());
+    Document newVersion = new Document(latest);
+    if (commentsFinal.size() > 0) {
+      newVersion.getDocument().put(COMMENTS_FIELD, commentsFinal);
+      update(newVersion, Optional.empty());
+    } else {
+      newVersion.getDocument().remove(COMMENTS_FIELD);
+    }
+
+    update(newVersion, Optional.empty());
+  }
+
+  protected String getIndexName(Document update, Optional<String> index, String indexPostFix) {
+    return index.orElse(getIndexName(update.getGuid(), update.getSensorType())
+        .orElse(ElasticsearchUtils.getIndexName(update.getSensorType(), indexPostFix, null))
+    );
+  }
+
+  protected Optional<String> getIndexName(String guid, String sensorType) {
+    return retrieveLatestDao.searchByGuid(guid,
+        sensorType,
+        hit -> Optional.ofNullable(hit.getIndex())
+    );
+  }
+
+  protected IndexRequest buildIndexRequest(Document update, String sensorType, String indexName) {
+    String type = sensorType + "_doc";
+    Object ts = update.getTimestamp();
+    IndexRequest indexRequest = new IndexRequest(indexName, type, update.getGuid())
+        .source(update.getDocument());
+    if (ts != null) {
+      indexRequest = indexRequest.timestamp(ts.toString());
+    }
+
+    return indexRequest;
+  }
+}
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
index 24f7a27..98dc66d 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
@@ -36,18 +36,24 @@
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
+import java.util.stream.Collectors;
 import org.apache.commons.lang.StringUtils;
 import org.apache.metron.common.configuration.writer.WriterConfiguration;
 import org.apache.metron.common.utils.HDFSUtils;
 import org.apache.metron.common.utils.ReflectionUtils;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
 import org.apache.metron.netty.utils.NettyRuntimeWrapper;
 import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.elasticsearch.action.search.SearchRequestBuilder;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.InetSocketTransportAddress;
 import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.transport.client.PreBuiltTransportClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -321,4 +327,62 @@
 
     return json;
   }
+
+  /**
+   * Elasticsearch queries default to 10 records returned.  Some internal queries require that all
+   * results are returned.  Rather than setting an arbitrarily high size, this method pages through results
+   * and returns them all in a single SearchResponse.
+   * @param qb A QueryBuilder that provides the query to be run.
+   * @return A SearchResponse containing the appropriate results.
+   */
+  public static  SearchResponse queryAllResults(TransportClient transportClient,
+      QueryBuilder qb,
+      String index,
+      int pageSize
+  ) {
+    SearchRequestBuilder searchRequestBuilder = transportClient
+        .prepareSearch(index)
+        .addStoredField("*")
+        .setFetchSource(true)
+        .setQuery(qb)
+        .setSize(pageSize);
+    org.elasticsearch.action.search.SearchResponse esResponse = searchRequestBuilder
+        .execute()
+        .actionGet();
+    List<SearchResult> allResults = getSearchResults(esResponse);
+    long total = esResponse.getHits().getTotalHits();
+    if (total > pageSize) {
+      int pages = (int) (total / pageSize) + 1;
+      for (int i = 1; i < pages; i++) {
+        int from = i * pageSize;
+        searchRequestBuilder.setFrom(from);
+        esResponse = searchRequestBuilder
+            .execute()
+            .actionGet();
+        allResults.addAll(getSearchResults(esResponse));
+      }
+    }
+    SearchResponse searchResponse = new SearchResponse();
+    searchResponse.setTotal(total);
+    searchResponse.setResults(allResults);
+    return searchResponse;
+  }
+
+  /**
+   * Transforms a list of Elasticsearch SearchHits to a list of SearchResults
+   * @param searchResponse An Elasticsearch SearchHit to be converted.
+   * @return The list of SearchResults for the SearchHit
+   */
+  protected static List<SearchResult> getSearchResults(
+      org.elasticsearch.action.search.SearchResponse searchResponse) {
+    return Arrays.stream(searchResponse.getHits().getHits()).map(searchHit -> {
+          SearchResult searchResult = new SearchResult();
+          searchResult.setId(searchHit.getId());
+          searchResult.setSource(searchHit.getSource());
+          searchResult.setScore(searchHit.getScore());
+          searchResult.setIndex(searchHit.getIndex());
+          return searchResult;
+        }
+    ).collect(Collectors.toList());
+  }
 }
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchDaoTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchDaoTest.java
index 2a6fb4f..6c3c327 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchDaoTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchDaoTest.java
@@ -17,7 +17,9 @@
  */
 package org.apache.metron.elasticsearch.dao;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
@@ -26,14 +28,15 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
 import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.FieldType;
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
 import org.apache.metron.indexing.dao.search.SortField;
 import org.apache.metron.indexing.dao.search.SortOrder;
-import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
-import org.apache.metron.indexing.dao.search.FieldType;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.rest.RestStatus;
 import org.elasticsearch.search.SearchHit;
@@ -44,42 +47,43 @@
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 
-import java.util.Map;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertNotNull;
-
 public class ElasticsearchDaoTest {
 
   private ElasticsearchDao dao;
   private ElasticsearchRequestSubmitter requestSubmitter;
 
-  private void setup(RestStatus status, int maxSearchResults, Map<String, FieldType> metadata) throws Exception {
+  private void setup(RestStatus status, int maxSearchResults, Map<String, FieldType> metadata)
+      throws Exception {
 
     // setup the mock search hits
     SearchHit hit1 = mock(SearchHit.class);
     when(hit1.getId()).thenReturn("id1");
-    when(hit1.getSource()).thenReturn(new HashMap<String, Object>(){{ put("field", "value1"); }});
+    when(hit1.getSource()).thenReturn(new HashMap<String, Object>() {{
+      put("field", "value1");
+    }});
     when(hit1.getScore()).thenReturn(0.1f);
 
     SearchHit hit2 = mock(SearchHit.class);
     when(hit2.getId()).thenReturn("id2");
-    when(hit2.getSource()).thenReturn(new HashMap<String, Object>(){{ put("field", "value2"); }});
+    when(hit2.getSource()).thenReturn(new HashMap<String, Object>() {{
+      put("field", "value2");
+    }});
     when(hit2.getScore()).thenReturn(0.2f);
 
     // search hits
-    SearchHit[] hits = { hit1, hit2 };
+    SearchHit[] hits = {hit1, hit2};
     SearchHits searchHits = mock(SearchHits.class);
     when(searchHits.getHits()).thenReturn(hits);
     when(searchHits.getTotalHits()).thenReturn(Integer.toUnsignedLong(hits.length));
 
     // search response which returns the search hits
-    org.elasticsearch.action.search.SearchResponse response = mock(org.elasticsearch.action.search.SearchResponse.class);
+    org.elasticsearch.action.search.SearchResponse response = mock(
+        org.elasticsearch.action.search.SearchResponse.class);
     when(response.status()).thenReturn(status);
     when(response.getHits()).thenReturn(searchHits);
 
     // provides column metadata
-    ColumnMetadataDao columnMetadataDao = mock(ColumnMetadataDao.class);
+    ElasticsearchColumnMetadataDao columnMetadataDao = mock(ElasticsearchColumnMetadataDao.class);
     when(columnMetadataDao.getColumnMetadata(any())).thenReturn(metadata);
 
     // returns the search response
@@ -92,7 +96,21 @@
     AccessConfig config = mock(AccessConfig.class);
     when(config.getMaxSearchResults()).thenReturn(maxSearchResults);
 
-    dao = new ElasticsearchDao(client, columnMetadataDao, requestSubmitter, config);
+    ElasticsearchSearchDao elasticsearchSearchDao = new ElasticsearchSearchDao(client, config,
+        columnMetadataDao, requestSubmitter);
+    ElasticsearchRetrieveLatestDao elasticsearchRetrieveLatestDao = new ElasticsearchRetrieveLatestDao(
+        client);
+    ElasticsearchUpdateDao elasticsearchUpdateDao = new ElasticsearchUpdateDao(client, config,
+        elasticsearchRetrieveLatestDao);
+
+    dao = new ElasticsearchDao(
+        client,
+        config,
+        elasticsearchSearchDao,
+        elasticsearchUpdateDao,
+        elasticsearchRetrieveLatestDao,
+        columnMetadataDao,
+        requestSubmitter);
   }
 
   private void setup(RestStatus status, int maxSearchResults) throws Exception {
@@ -112,9 +130,9 @@
 
     // "sort by" fields for the search request
     SortField[] expectedSortFields = {
-            sortBy("sortByStringDesc", SortOrder.DESC),
-            sortBy("sortByIntAsc", SortOrder.ASC),
-            sortBy("sortByUndefinedDesc", SortOrder.DESC)
+        sortBy("sortByStringDesc", SortOrder.DESC),
+        sortBy("sortByIntAsc", SortOrder.ASC),
+        sortBy("sortByUndefinedDesc", SortOrder.DESC)
     };
 
     // create a metron search request
@@ -131,7 +149,8 @@
     assertNotNull(searchResponse);
 
     // capture the elasticsearch search request that was created
-    ArgumentCaptor<org.elasticsearch.action.search.SearchRequest> argument = ArgumentCaptor.forClass(org.elasticsearch.action.search.SearchRequest.class);
+    ArgumentCaptor<org.elasticsearch.action.search.SearchRequest> argument = ArgumentCaptor
+        .forClass(org.elasticsearch.action.search.SearchRequest.class);
     verify(requestSubmitter).submitSearch(argument.capture());
     org.elasticsearch.action.search.SearchRequest request = argument.getValue();
 
@@ -177,9 +196,9 @@
 
     // "sort by" fields for the search request
     SortField[] expectedSortFields = {
-            sortBy("sortByStringDesc", SortOrder.DESC),
-            sortBy("sortByIntAsc", SortOrder.ASC),
-            sortBy("sortByUndefinedDesc", SortOrder.DESC)
+        sortBy("sortByStringDesc", SortOrder.DESC),
+        sortBy("sortByIntAsc", SortOrder.ASC),
+        sortBy("sortByUndefinedDesc", SortOrder.DESC)
     };
 
     // create a metron search request
@@ -196,7 +215,8 @@
     assertNotNull(searchResponse);
 
     // capture the elasticsearch search request that was created
-    ArgumentCaptor<org.elasticsearch.action.search.SearchRequest> argument = ArgumentCaptor.forClass(org.elasticsearch.action.search.SearchRequest.class);
+    ArgumentCaptor<org.elasticsearch.action.search.SearchRequest> argument = ArgumentCaptor
+        .forClass(org.elasticsearch.action.search.SearchRequest.class);
     verify(requestSubmitter).submitSearch(argument.capture());
     org.elasticsearch.action.search.SearchRequest request = argument.getValue();
 
@@ -205,7 +225,7 @@
     JSONObject json = (JSONObject) parser.parse(ElasticsearchUtils.toJSON(request).orElse("???"));
 
     // ensure that the index names are 'wildcard-ed'
-    String[] expected = { "bro_index*", "snort_index*" };
+    String[] expected = {"bro_index*", "snort_index*"};
     assertArrayEquals(expected, request.indices());
   }
 
@@ -217,7 +237,7 @@
     setup(RestStatus.OK, maxSearchResults);
 
     SearchRequest searchRequest = new SearchRequest();
-    searchRequest.setSize(maxSearchResults+1);
+    searchRequest.setSize(maxSearchResults + 1);
     searchRequest.setQuery("");
     dao.search(searchRequest);
     // exception expected - size > max
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDaoTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDaoTest.java
index df782bd..b1da2a4 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDaoTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/dao/ElasticsearchMetaAlertDaoTest.java
@@ -18,15 +18,6 @@
 
 package org.apache.metron.elasticsearch.dao;
 
-import static org.apache.metron.indexing.dao.MetaAlertDao.METAALERTS_INDEX;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -35,40 +26,49 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.UUID;
-import org.apache.metron.common.Constants;
-import org.apache.metron.common.Constants.Fields;
 import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.HBaseDao;
 import org.apache.metron.indexing.dao.IndexDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
 import org.apache.metron.indexing.dao.search.FieldType;
 import org.apache.metron.indexing.dao.search.GetRequest;
 import org.apache.metron.indexing.dao.search.GroupRequest;
 import org.apache.metron.indexing.dao.search.GroupResponse;
 import org.apache.metron.indexing.dao.search.InvalidCreateException;
-import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 import org.elasticsearch.index.IndexNotFoundException;
 import org.junit.Test;
 
-public class ElasticsearchMetaAlertDaoTest {
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
 
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
+public class ElasticsearchMetaAlertDaoTest {
 
 
   @Test(expected = IllegalArgumentException.class)
   public void testInvalidInit() {
     IndexDao dao = new IndexDao() {
       @Override
-      public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+      public SearchResponse search(SearchRequest searchRequest) {
         return null;
       }
 
       @Override
-      public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+      public GroupResponse group(GroupRequest groupRequest) {
         return null;
       }
 
@@ -77,120 +77,54 @@
       }
 
       @Override
-      public Document getLatest(String guid, String sensorType) throws IOException {
+      public Document getLatest(String guid, String sensorType) {
         return null;
       }
 
       @Override
       public Iterable<Document> getAllLatest(
-          List<GetRequest> getRequests) throws IOException {
+          List<GetRequest> getRequests) {
         return null;
       }
 
       @Override
-      public void update(Document update, Optional<String> index) throws IOException {
+      public void update(Document update, Optional<String> index) {
       }
 
       @Override
-      public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
+      public void batchUpdate(Map<Document, Optional<String>> updates) {
       }
 
       @Override
-      public Map<String, FieldType> getColumnMetadata(List<String> indices)
-          throws IOException {
+      public Map<String, FieldType> getColumnMetadata(List<String> indices) {
         return null;
       }
+
+      @Override
+      public void addCommentToAlert(CommentAddRemoveRequest request) {
+      }
+
+      @Override
+      public void removeCommentFromAlert(CommentAddRemoveRequest request) {
+      }
+
+      @Override
+      public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) {
+      }
+
+      @Override
+      public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) {
+      }
     };
     ElasticsearchMetaAlertDao metaAlertDao = new ElasticsearchMetaAlertDao();
     metaAlertDao.init(dao);
   }
 
-  @Test
-  public void testBuildCreateDocumentSingleAlert() throws InvalidCreateException, IOException {
-    ElasticsearchDao esDao = new ElasticsearchDao();
-    ElasticsearchMetaAlertDao emaDao = new ElasticsearchMetaAlertDao();
-    emaDao.init(esDao);
-
-    List<String> groups = new ArrayList<>();
-    groups.add("group_one");
-    groups.add("group_two");
-
-    // Build the first response from the multiget
-    Map<String, Object> alertOne = new HashMap<>();
-    alertOne.put(Constants.GUID, "alert_one");
-    alertOne.put(MetaAlertDao.THREAT_FIELD_DEFAULT, 10.0d);
-    List<Document> alerts = new ArrayList<Document>() {{
-      add(new Document(alertOne, "", "", 0L));
-    }};
-
-    // Actually build the doc
-    Document actual = emaDao.buildCreateDocument(alerts, groups);
-
-    ArrayList<Map<String, Object>> alertList = new ArrayList<>();
-    alertList.add(alertOne);
-
-    Map<String, Object> actualDocument = actual.getDocument();
-    assertEquals(
-        MetaAlertStatus.ACTIVE.getStatusString(),
-        actualDocument.get(MetaAlertDao.STATUS_FIELD)
-    );
-    assertEquals(
-        alertList,
-        actualDocument.get(MetaAlertDao.ALERT_FIELD)
-    );
-    assertEquals(
-        groups,
-        actualDocument.get(MetaAlertDao.GROUPS_FIELD)
-    );
-
-    // Don't care about the result, just that it's a UUID. Exception will be thrown if not.
-    UUID.fromString((String) actualDocument.get(Constants.GUID));
-  }
-
-  @Test
-  public void testBuildCreateDocumentMultipleAlerts() throws InvalidCreateException, IOException {
-    ElasticsearchDao esDao = new ElasticsearchDao();
-    ElasticsearchMetaAlertDao emaDao = new ElasticsearchMetaAlertDao();
-    emaDao.init(esDao);
-
-    List<String> groups = new ArrayList<>();
-    groups.add("group_one");
-    groups.add("group_two");
-
-    // Build the first response from the multiget
-    Map<String, Object> alertOne = new HashMap<>();
-    alertOne.put(Constants.GUID, "alert_one");
-    alertOne.put(MetaAlertDao.THREAT_FIELD_DEFAULT, 10.0d);
-
-    // Build the second response from the multiget
-    Map<String, Object> alertTwo = new HashMap<>();
-    alertTwo.put(Constants.GUID, "alert_one");
-    alertTwo.put(MetaAlertDao.THREAT_FIELD_DEFAULT, 5.0d);
-    List<Document> alerts = new ArrayList<Document>() {{
-      add(new Document(alertOne, "", "", 0L));
-      add(new Document(alertTwo, "", "", 0L));
-    }};
-
-    // Actually build the doc
-    Document actual = emaDao.buildCreateDocument(alerts, groups);
-
-    ArrayList<Map<String, Object>> alertList = new ArrayList<>();
-    alertList.add(alertOne);
-    alertList.add(alertTwo);
-
-    Map<String, Object> actualDocument = actual.getDocument();
-    assertNotNull(actualDocument.get(Fields.TIMESTAMP.getName()));
-    assertEquals(
-        alertList,
-        actualDocument.get(MetaAlertDao.ALERT_FIELD)
-    );
-    assertEquals(
-        groups,
-        actualDocument.get(MetaAlertDao.GROUPS_FIELD)
-    );
-
-    // Don't care about the result, just that it's a UUID. Exception will be thrown if not.
-    UUID.fromString((String) actualDocument.get(Constants.GUID));
+  @Test(expected = IllegalArgumentException.class)
+  public void testInitInvalidDao() {
+    HBaseDao dao = new HBaseDao();
+    ElasticsearchMetaAlertDao esDao = new ElasticsearchMetaAlertDao();
+    esDao.init(dao, Optional.empty());
   }
 
   @Test(expected = InvalidCreateException.class)
@@ -206,8 +140,9 @@
   @Test(expected = InvalidCreateException.class)
   public void testCreateMetaAlertEmptyGroups() throws InvalidCreateException, IOException {
     ElasticsearchDao esDao = new ElasticsearchDao();
+    MultiIndexDao miDao = new MultiIndexDao(esDao);
     ElasticsearchMetaAlertDao emaDao = new ElasticsearchMetaAlertDao();
-    emaDao.init(esDao);
+    emaDao.init(miDao);
 
     MetaAlertCreateRequest createRequest = new MetaAlertCreateRequest();
     createRequest.setAlerts(Collections.singletonList(new GetRequest("don't", "care")));
@@ -215,80 +150,16 @@
   }
 
   @Test
-  public void testCalculateMetaScoresList() {
-    final double delta = 0.001;
-    List<Map<String, Object>> alertList = new ArrayList<>();
-
-    // add an alert with a threat score
-    alertList.add( Collections.singletonMap(ElasticsearchMetaAlertDao.THREAT_TRIAGE_FIELD, 10.0f));
-
-    // add a second alert with a threat score
-    alertList.add( Collections.singletonMap(ElasticsearchMetaAlertDao.THREAT_TRIAGE_FIELD, 20.0f));
-
-    // add a third alert with NO threat score
-    alertList.add( Collections.singletonMap("alert3", "has no threat score"));
-
-    // create the metaalert
-    Map<String, Object> docMap = new HashMap<>();
-    docMap.put(MetaAlertDao.ALERT_FIELD, alertList);
-    Document metaalert = new Document(docMap, "guid", MetaAlertDao.METAALERT_TYPE, 0L);
-
-    // calculate the threat score for the metaalert
-    ElasticsearchMetaAlertDao metaAlertDao = new ElasticsearchMetaAlertDao();
-    metaAlertDao.calculateMetaScores(metaalert);
-    Object threatScore = metaalert.getDocument().get(ElasticsearchMetaAlertDao.THREAT_TRIAGE_FIELD);
-
-    // the metaalert must contain a summary of all child threat scores
-    assertEquals(20D, (Double) metaalert.getDocument().get("max"), delta);
-    assertEquals(10D, (Double) metaalert.getDocument().get("min"), delta);
-    assertEquals(15D, (Double) metaalert.getDocument().get("average"), delta);
-    assertEquals(2L, metaalert.getDocument().get("count"));
-    assertEquals(30D, (Double) metaalert.getDocument().get("sum"), delta);
-    assertEquals(15D, (Double) metaalert.getDocument().get("median"), delta);
-
-    // it must contain an overall threat score; a float to match the type of the threat score of the other sensor indices
-    assertTrue(threatScore instanceof Float);
-
-    // by default, the overall threat score is the sum of all child threat scores
-    assertEquals(30.0F, threatScore);
-  }
-
-  @Test
-  public void testCalculateMetaScoresWithDifferentFieldName() {
-    List<Map<String, Object>> alertList = new ArrayList<>();
-
-    // add an alert with a threat score
-    alertList.add( Collections.singletonMap(MetaAlertDao.THREAT_FIELD_DEFAULT, 10.0f));
-
-    // create the metaalert
-    Map<String, Object> docMap = new HashMap<>();
-    docMap.put(MetaAlertDao.ALERT_FIELD, alertList);
-    Document metaalert = new Document(docMap, "guid", MetaAlertDao.METAALERT_TYPE, 0L);
-
-    // Configure a different threat triage score field name
-    AccessConfig accessConfig = new AccessConfig();
-    accessConfig.setGlobalConfigSupplier(() -> new HashMap<String, Object>() {{
-      put(MetaAlertDao.THREAT_FIELD_PROPERTY, MetaAlertDao.THREAT_FIELD_DEFAULT);
-    }});
-    ElasticsearchDao elasticsearchDao = new ElasticsearchDao();
-    elasticsearchDao.setAccessConfig(accessConfig);
-
-    // calculate the threat score for the metaalert
-    ElasticsearchMetaAlertDao metaAlertDao = new ElasticsearchMetaAlertDao();
-    metaAlertDao.init(elasticsearchDao);
-    metaAlertDao.calculateMetaScores(metaalert);
-    assertNotNull(metaalert.getDocument().get(MetaAlertDao.THREAT_FIELD_DEFAULT));
-  }
-
-  @Test
   public void testUpdateShouldUpdateOnMissingMetaAlertIndex() throws Exception {
     ElasticsearchDao elasticsearchDao = mock(ElasticsearchDao.class);
-    ElasticsearchMetaAlertDao emaDao = spy(new ElasticsearchMetaAlertDao(elasticsearchDao));
+    ElasticsearchMetaAlertRetrieveLatestDao elasticsearchMetaAlertRetrieveLatestDao = mock(ElasticsearchMetaAlertRetrieveLatestDao.class);
+    MetaAlertConfig metaAlertConfig = mock(MetaAlertConfig.class);
+    ElasticsearchMetaAlertUpdateDao emauDao = spy(new ElasticsearchMetaAlertUpdateDao(elasticsearchDao, elasticsearchMetaAlertRetrieveLatestDao, metaAlertConfig, 1));
 
-    doThrow(new IndexNotFoundException(METAALERTS_INDEX)).when(emaDao).getMetaAlertsForAlert("alert_one");
+    doThrow(new IndexNotFoundException(ElasticsearchMetaAlertDao.METAALERTS_INDEX)).when(emauDao).getMetaAlertsForAlert("alert_one");
 
     Document update = new Document(new HashMap<>(), "alert_one", "", 0L);
-    emaDao.update(update, Optional.empty());
+    emauDao.update(update, Optional.empty());
 
     Map<Document, Optional<String>> expectedUpdate = new HashMap<Document, Optional<String>>() {{
       put(update, Optional.empty());
@@ -298,11 +169,14 @@
 
   @Test(expected = IndexNotFoundException.class)
   public void testUpdateShouldThrowExceptionOnMissingSensorIndex() throws Exception {
-    ElasticsearchMetaAlertDao emaDao = spy(new ElasticsearchMetaAlertDao());
+    ElasticsearchDao elasticsearchDao = mock(ElasticsearchDao.class);
+    ElasticsearchMetaAlertRetrieveLatestDao elasticsearchMetaAlertRetrieveLatestDao = mock(ElasticsearchMetaAlertRetrieveLatestDao.class);
+    MetaAlertConfig metaAlertConfig = mock(MetaAlertConfig.class);
+    ElasticsearchMetaAlertUpdateDao emauDao = spy(new ElasticsearchMetaAlertUpdateDao(elasticsearchDao, elasticsearchMetaAlertRetrieveLatestDao, metaAlertConfig, 1));
 
-    doThrow(new IndexNotFoundException("bro")).when(emaDao).getMetaAlertsForAlert("alert_one");
+    doThrow(new IndexNotFoundException("bro")).when(emauDao).getMetaAlertsForAlert("alert_one");
 
     Document update = new Document(new HashMap<>(), "alert_one", "", 0L);
-    emaDao.update(update, Optional.empty());
+    emauDao.update(update, Optional.empty());
   }
 }
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchMetaAlertIntegrationTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchMetaAlertIntegrationTest.java
index 933fa2a..c05efc1 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchMetaAlertIntegrationTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchMetaAlertIntegrationTest.java
@@ -1,48 +1,44 @@
+
 /*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
 
 package org.apache.metron.elasticsearch.integration;
 
-import static org.apache.metron.common.Constants.SENSOR_TYPE;
-import static org.apache.metron.common.Constants.SENSOR_TYPE_FIELD_PROPERTY;
-import static org.apache.metron.indexing.dao.MetaAlertDao.ALERT_FIELD;
-import static org.apache.metron.indexing.dao.MetaAlertDao.METAALERTS_INDEX;
-import static org.apache.metron.indexing.dao.MetaAlertDao.METAALERT_FIELD;
-import static org.apache.metron.indexing.dao.MetaAlertDao.METAALERT_TYPE;
-import static org.apache.metron.indexing.dao.MetaAlertDao.STATUS_FIELD;
-import static org.apache.metron.indexing.dao.MetaAlertDao.THREAT_FIELD_PROPERTY;
+import static org.apache.metron.elasticsearch.dao.ElasticsearchMetaAlertDao.METAALERTS_INDEX;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.ALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_DOC;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Iterables;
 import java.io.File;
 import java.io.IOException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.Set;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 
 import com.google.common.collect.ImmutableList;
@@ -54,52 +50,56 @@
 import org.apache.metron.elasticsearch.integration.components.ElasticSearchComponent;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
-import org.apache.metron.indexing.dao.MetaAlertDao;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertIntegrationTest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
 import org.apache.metron.indexing.dao.search.GetRequest;
-import org.apache.metron.indexing.dao.search.Group;
-import org.apache.metron.indexing.dao.search.GroupRequest;
-import org.apache.metron.indexing.dao.search.GroupResponse;
-import org.apache.metron.indexing.dao.search.GroupResult;
-import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
-import org.apache.metron.indexing.dao.search.SearchResult;
 import org.apache.metron.indexing.dao.search.SortField;
-import org.apache.metron.indexing.dao.update.Document;
-import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
-import org.apache.metron.indexing.dao.update.PatchRequest;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
-public class ElasticsearchMetaAlertIntegrationTest {
+@RunWith(Parameterized.class)
+public class ElasticsearchMetaAlertIntegrationTest extends MetaAlertIntegrationTest {
 
-  private static final int MAX_RETRIES = 10;
-  private static final int SLEEP_MS = 500;
-  private static final String SENSOR_NAME = "test";
-  private static final String INDEX_DIR = "target/elasticsearch_meta";
-  private static final String DATE_FORMAT = "yyyy.MM.dd.HH";
-  private static final String INDEX =
-      SENSOR_NAME + "_index_" + new SimpleDateFormat(DATE_FORMAT).format(new Date());
-  private static final String NEW_FIELD = "new-field";
-  private static final String NAME_FIELD = "name";
-
-  private static ElasticsearchDao esDao;
-  private static MetaAlertDao metaDao;
+  private static IndexDao esDao;
   private static ElasticSearchComponent es;
 
+  protected static final String INDEX_DIR = "target/elasticsearch_meta";
+  private static String POSTFIX= new SimpleDateFormat(DATE_FORMAT).format(new Date());
+  private static final String INDEX_RAW = SENSOR_NAME + POSTFIX;
+  protected static final String INDEX = INDEX_RAW + "_index";
+  protected List<String> queryIndices = null;
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    Function<List<String>, List<String>> asteriskTransform = x -> ImmutableList.of("*");
+    Function<List<String>, List<String>> explicitTransform =
+            allIndices -> allIndices.stream().map(x -> x.replace("_index", ""))
+                       .collect(Collectors.toCollection(ArrayList::new));
+    return Arrays.asList(new Object[][]{
+            {asteriskTransform},
+            {explicitTransform}
+            }
+    );
+  }
+
+  public ElasticsearchMetaAlertIntegrationTest(Function<List<String>, List<String>> queryIndices) {
+    this.queryIndices = queryIndices.apply(allIndices);
+  }
+
+
   /**
    {
      "properties": {
-       "metron_alert": {
-         "type": "nested"
-       }
+       "metron_alert": { "type": "nested" }
      }
    }
    */
@@ -107,84 +107,16 @@
   public static String nestedAlertMapping;
 
   /**
-   {
-     "guid": "meta_alert",
-     "index": "metaalert_index",
-     "patch": [
-       {
-         "op": "add",
-         "path": "/name",
-         "value": "New Meta Alert"
-       }
-     ],
-     "sensorType": "metaalert"
-   }
-   */
-  @Multiline
-  public static String namePatchRequest;
-
-  /**
-   {
-     "guid": "meta_alert",
-     "index": "metaalert_index",
-     "patch": [
-       {
-         "op": "add",
-         "path": "/name",
-         "value": "New Meta Alert"
-       },
-       {
-         "op": "add",
-         "path": "/alert",
-         "value": []
-       }
-     ],
-     "sensorType": "metaalert"
-   }
-   */
-  @Multiline
-  public static String alertPatchRequest;
-
-  /**
-   {
-     "guid": "meta_alert",
-     "index": "metaalert_index",
-     "patch": [
-       {
-         "op": "add",
-         "path": "/status",
-         "value": "inactive"
-       },
-       {
-         "op": "add",
-         "path": "/name",
-         "value": "New Meta Alert"
-       }
-     ],
-     "sensorType": "metaalert"
-   }
-   */
-  @Multiline
-  public static String statusPatchRequest;
-
-  /**
    * {
        "%MAPPING_NAME%_doc" : {
          "properties" : {
-           "guid" : {
-             "type" : "keyword"
-           },
-           "ip_src_addr" : {
-             "type" : "keyword"
-           },
-           "score" : {
-             "type" : "integer"
-           },
-           "metron_alert" : {
-             "type" : "nested"
-           }
+           "guid" : { "type" : "keyword" },
+           "ip_src_addr" : { "type" : "keyword" },
+           "score" : { "type" : "integer" },
+           "metron_alert" : { "type" : "nested" },
+           "source:type" : { "type" : "keyword"}
          }
-       }
+     }
    }
    */
   @Multiline
@@ -192,18 +124,21 @@
 
   @BeforeClass
   public static void setupBefore() throws Exception {
+    // Ensure ES can retry as needed.
+    MAX_RETRIES = 10;
+
     // setup the client
     es = new ElasticSearchComponent.Builder()
-        .withHttpPort(9211)
-        .withIndexDir(new File(INDEX_DIR))
-        .build();
+            .withHttpPort(9211)
+            .withIndexDir(new File(INDEX_DIR))
+            .build();
     es.start();
   }
 
   @Before
   public void setup() throws IOException {
-    es.createIndexWithMapping(METAALERTS_INDEX, MetaAlertDao.METAALERT_DOC, template.replace("%MAPPING_NAME%", "metaalert"));
-    es.createIndexWithMapping(INDEX, "index_doc", template.replace("%MAPPING_NAME%", "index"));
+    es.createIndexWithMapping(METAALERTS_INDEX, METAALERT_DOC, template.replace("%MAPPING_NAME%", METAALERT_TYPE));
+    es.createIndexWithMapping(INDEX, "test_doc", template.replace("%MAPPING_NAME%", "test"));
 
     AccessConfig accessConfig = new AccessConfig();
     Map<String, Object> globalConfig = new HashMap<String, Object>() {
@@ -220,7 +155,9 @@
 
     esDao = new ElasticsearchDao();
     esDao.init(accessConfig);
-    metaDao = new ElasticsearchMetaAlertDao(esDao);
+    ElasticsearchMetaAlertDao elasticsearchMetaDao = new ElasticsearchMetaAlertDao(esDao);
+    elasticsearchMetaDao.setPageSize(5);
+    metaDao = elasticsearchMetaDao;
   }
 
   @AfterClass
@@ -235,540 +172,8 @@
     es.reset();
   }
 
-
   @Test
-  public void shouldGetAllMetaAlertsForAlert() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(3);
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlerts
-    List<Map<String, Object>> metaAlerts = buildMetaAlerts(12, MetaAlertStatus.ACTIVE,
-        Optional.of(Collections.singletonList(alerts.get(0))));
-    metaAlerts.add(buildMetaAlert("meta_active_12", MetaAlertStatus.ACTIVE,
-        Optional.of(Arrays.asList(alerts.get(0), alerts.get(2)))));
-    metaAlerts.add(buildMetaAlert("meta_inactive", MetaAlertStatus.INACTIVE,
-        Optional.of(Arrays.asList(alerts.get(0), alerts.get(2)))));
-    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(metaAlerts, METAALERTS_INDEX, MetaAlertDao.METAALERT_TYPE);
-
-    // Verify load was successful
-    List<GetRequest> createdDocs = metaAlerts.stream().map(metaAlert ->
-        new GetRequest((String) metaAlert.get(Constants.GUID), METAALERT_TYPE))
-        .collect(Collectors.toList());
-    createdDocs.addAll(alerts.stream().map(alert ->
-        new GetRequest((String) alert.get(Constants.GUID), SENSOR_NAME))
-        .collect(Collectors.toList()));
-    findCreatedDocs(createdDocs);
-
-    int previousPageSize = ((ElasticsearchMetaAlertDao) metaDao).getPageSize();
-    ((ElasticsearchMetaAlertDao) metaDao).setPageSize(5);
-
-    {
-      // Verify searches successfully return more than 10 results
-      SearchResponse searchResponse0 = metaDao.getAllMetaAlertsForAlert("message_0");
-      List<SearchResult> searchResults0 = searchResponse0.getResults();
-      Assert.assertEquals(13, searchResults0.size());
-      Set<Map<String, Object>> resultSet = new HashSet<>();
-      Iterables.addAll(resultSet, Iterables.transform(searchResults0, r -> r.getSource()));
-      StringBuffer reason = new StringBuffer("Unable to find " + metaAlerts.get(0) + "\n");
-      reason.append(Joiner.on("\n").join(resultSet));
-      Assert.assertTrue(reason.toString(), resultSet.contains(metaAlerts.get(0)));
-
-      // Verify no meta alerts are returned because message_1 was not added to any
-      SearchResponse searchResponse1 = metaDao.getAllMetaAlertsForAlert("message_1");
-      List<SearchResult> searchResults1 = searchResponse1.getResults();
-      Assert.assertEquals(0, searchResults1.size());
-
-      // Verify only the meta alert message_2 was added to is returned
-      SearchResponse searchResponse2 = metaDao.getAllMetaAlertsForAlert("message_2");
-      List<SearchResult> searchResults2 = searchResponse2.getResults();
-      Assert.assertEquals(1, searchResults2.size());
-      Assert.assertEquals(metaAlerts.get(12), searchResults2.get(0).getSource());
-    }
-    ((ElasticsearchMetaAlertDao) metaDao).setPageSize(previousPageSize);
-  }
-
-  @Test
-  public void getAllMetaAlertsForAlertShouldThrowExceptionForEmtpyGuid() throws Exception {
-    try {
-      metaDao.getAllMetaAlertsForAlert("");
-      Assert.fail("An exception should be thrown for empty guid");
-    } catch (InvalidSearchException ise) {
-      Assert.assertEquals("Guid cannot be empty", ise.getMessage());
-    }
-  }
-
-  @Test
-  public void shouldCreateMetaAlert() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(3);
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("message_2", SENSOR_NAME)));
-
-    {
-      MetaAlertCreateRequest metaAlertCreateRequest = new MetaAlertCreateRequest() {{
-        setAlerts(new ArrayList<GetRequest>() {{
-          add(new GetRequest("message_1", SENSOR_NAME));
-          add(new GetRequest("message_2", SENSOR_NAME, INDEX));
-        }});
-        setGroups(Collections.singletonList("group"));
-      }};
-      MetaAlertCreateResponse metaAlertCreateResponse = metaDao.createMetaAlert(metaAlertCreateRequest);
-      {
-        // Verify metaAlert was created
-        findCreatedDoc(metaAlertCreateResponse.getGuid(), MetaAlertDao.METAALERT_TYPE);
-      }
-      {
-        // Verify metaalert has the default field names
-        Document metaAlert = metaDao.getLatest(metaAlertCreateResponse.getGuid(), MetaAlertDao.METAALERT_TYPE);
-        Assert.assertTrue(metaAlert.getDocument().containsKey(ElasticsearchMetaAlertDao.SOURCE_TYPE));
-        Assert.assertTrue(metaAlert.getDocument().containsKey(ElasticsearchMetaAlertDao.THREAT_TRIAGE_FIELD));
-      }
-      {
-        // Verify alert 0 was not updated with metaalert field
-        Document alert = metaDao.getLatest("message_0", SENSOR_NAME);
-        Assert.assertEquals(4, alert.getDocument().size());
-        Assert.assertNull(alert.getDocument().get(METAALERT_FIELD));
-      }
-      {
-        // Verify alert 1 was properly updated with metaalert field
-        Document alert = metaDao.getLatest("message_1", SENSOR_NAME);
-        Assert.assertEquals(5, alert.getDocument().size());
-        Assert.assertEquals(1, ((List) alert.getDocument().get(METAALERT_FIELD)).size());
-        Assert.assertEquals(metaAlertCreateResponse.getGuid(), ((List) alert.getDocument().get(METAALERT_FIELD)).get(0));
-      }
-      {
-        // Verify alert 2 was properly updated with metaalert field
-        Document alert = metaDao.getLatest("message_2", SENSOR_NAME);
-        Assert.assertEquals(5, alert.getDocument().size());
-        Assert.assertEquals(1, ((List) alert.getDocument().get(METAALERT_FIELD)).size());
-        Assert.assertEquals(metaAlertCreateResponse.getGuid(), ((List) alert.getDocument().get(METAALERT_FIELD)).get(0));
-      }
-    }
-  }
-
-  @Test
-  public void shouldCreateMetaAlertWithConfiguredFieldNames() throws Exception {
-    // Configure field names
-    AccessConfig accessConfig = esDao.getAccessConfig();
-    accessConfig.setGlobalConfigSupplier(() -> new HashMap<String, Object>() {{
-      put("es.date.format", DATE_FORMAT);
-      put(SENSOR_TYPE_FIELD_PROPERTY, SENSOR_TYPE);
-      put(THREAT_FIELD_PROPERTY, MetaAlertDao.THREAT_FIELD_DEFAULT);
-    }});
-
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(1);
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Verify load was successful
-    findCreatedDocs(Collections.singletonList(
-            new GetRequest("message_0", SENSOR_NAME)));
-
-    {
-      MetaAlertCreateRequest metaAlertCreateRequest = new MetaAlertCreateRequest() {{
-        setAlerts(new ArrayList<GetRequest>() {{
-          add(new GetRequest("message_0", SENSOR_NAME));
-        }});
-        setGroups(Collections.singletonList("group"));
-      }};
-      MetaAlertCreateResponse metaAlertCreateResponse = metaDao.createMetaAlert(metaAlertCreateRequest);
-      {
-        // Verify metaAlert was created
-        findCreatedDoc(metaAlertCreateResponse.getGuid(), MetaAlertDao.METAALERT_TYPE);
-      }
-      {
-        // Verify alert 0 was not updated with metaalert field
-        Document metaAlert = metaDao.getLatest(metaAlertCreateResponse.getGuid(), MetaAlertDao.METAALERT_TYPE);
-        Assert.assertTrue(metaAlert.getDocument().containsKey(SENSOR_TYPE));
-        Assert.assertTrue(metaAlert.getDocument().containsKey(MetaAlertDao.THREAT_FIELD_DEFAULT));
-      }
-    }
-  }
-
-  @Test
-  public void shouldAddAlertsToMetaAlert() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(4);
-    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlert
-    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
-        Optional.of(Collections.singletonList(alerts.get(0))));
-    elasticsearchAdd(Collections.singletonList(metaAlert), METAALERTS_INDEX, METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("message_2", SENSOR_NAME),
-        new GetRequest("message_3", SENSOR_NAME),
-        new GetRequest("meta_alert", METAALERT_TYPE)));
-
-    // Build expected metaAlert after alerts are added
-    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-
-    // Verify the proper alerts were added
-    List<Map<String, Object>> metaAlertAlerts = new ArrayList<>((List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
-    Map<String, Object> expectedAlert0 = alerts.get(0);
-    Map<String, Object> expectedAlert1 = alerts.get(1);
-    expectedAlert1.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    metaAlertAlerts.add(expectedAlert1);
-    Map<String, Object> expectedAlert2 = alerts.get(2);
-    expectedAlert2.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    metaAlertAlerts.add(expectedAlert2);
-    expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
-
-    // Verify the counts were properly updated
-    expectedMetaAlert.put("average", 1.0d);
-    expectedMetaAlert.put("min", 0.0d);
-    expectedMetaAlert.put("median", 1.0d);
-    expectedMetaAlert.put("max", 2.0d);
-    expectedMetaAlert.put("count", 3);
-    expectedMetaAlert.put("sum", 3.0d);
-    expectedMetaAlert.put("threat:triage:score", 3.0d);
-
-    {
-      // Verify alerts were successfully added to the meta alert
-      Assert.assertTrue(metaDao.addAlertsToMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_1", SENSOR_NAME), new GetRequest("message_2", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify False when alerts are already in a meta alert and no new alerts are added
-      Assert.assertFalse(metaDao.addAlertsToMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_0", SENSOR_NAME), new GetRequest("message_1", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify only 1 alert is added when a list of alerts only contains 1 alert that is not in the meta alert
-      metaAlertAlerts = new ArrayList<>((List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
-      Map<String, Object> expectedAlert3 = alerts.get(3);
-      expectedAlert3.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-      metaAlertAlerts.add(expectedAlert3);
-      expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
-
-      expectedMetaAlert.put("average", 1.5d);
-      expectedMetaAlert.put("min", 0.0d);
-      expectedMetaAlert.put("median", 1.5d);
-      expectedMetaAlert.put("max", 3.0d);
-      expectedMetaAlert.put("count", 4);
-      expectedMetaAlert.put("sum", 6.0d);
-      expectedMetaAlert.put("threat:triage:score", 6.0d);
-
-      Assert.assertTrue(metaDao.addAlertsToMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_2", SENSOR_NAME), new GetRequest("message_3", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-  }
-
-  @Test
-  public void shouldRemoveAlertsFromMetaAlert() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(4);
-    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    alerts.get(1).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    alerts.get(2).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    alerts.get(3).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlert
-    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
-        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1), alerts.get(2), alerts.get(3))));
-    elasticsearchAdd(Collections.singletonList(metaAlert), METAALERTS_INDEX, METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("message_2", SENSOR_NAME),
-        new GetRequest("message_3", SENSOR_NAME),
-        new GetRequest("meta_alert", METAALERT_TYPE)));
-
-    // Build expected metaAlert after alerts are added
-    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-
-    // Verify the proper alerts were added
-    List<Map<String, Object>> metaAlertAlerts = new ArrayList<>((List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
-    metaAlertAlerts.remove(0);
-    metaAlertAlerts.remove(0);
-    expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
-
-    // Verify the counts were properly updated
-    expectedMetaAlert.put("average", 2.5d);
-    expectedMetaAlert.put("min", 2.0d);
-    expectedMetaAlert.put("median", 2.5d);
-    expectedMetaAlert.put("max", 3.0d);
-    expectedMetaAlert.put("count", 2);
-    expectedMetaAlert.put("sum", 5.0d);
-    expectedMetaAlert.put("threat:triage:score", 5.0d);
-
-
-    {
-      // Verify a list of alerts are removed from a meta alert
-      Assert.assertTrue(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_0", SENSOR_NAME), new GetRequest("message_1", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify False when alerts are not present in a meta alert and no alerts are removed
-      Assert.assertFalse(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_0", SENSOR_NAME), new GetRequest("message_1", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify only 1 alert is removed when a list of alerts only contains 1 alert that is in the meta alert
-      metaAlertAlerts = new ArrayList<>((List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
-      metaAlertAlerts.remove(0);
-      expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
-
-      expectedMetaAlert.put("average", 3.0d);
-      expectedMetaAlert.put("min", 3.0d);
-      expectedMetaAlert.put("median", 3.0d);
-      expectedMetaAlert.put("max", 3.0d);
-      expectedMetaAlert.put("count", 1);
-      expectedMetaAlert.put("sum", 3.0d);
-      expectedMetaAlert.put("threat:triage:score", 3.0d);
-
-      Assert.assertTrue(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays.asList(new GetRequest("message_0", SENSOR_NAME), new GetRequest("message_2", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify all alerts are removed from a metaAlert
-      metaAlertAlerts = new ArrayList<>((List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
-      metaAlertAlerts.remove(0);
-      expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
-
-      expectedMetaAlert.put("average", 0.0d);
-      expectedMetaAlert.put("min", "Infinity");
-      expectedMetaAlert.put("median", "NaN");
-      expectedMetaAlert.put("max", "-Infinity");
-      expectedMetaAlert.put("count", 0);
-      expectedMetaAlert.put("sum", 0.0d);
-      expectedMetaAlert.put("threat:triage:score", 0.0d);
-
-      Assert.assertTrue(metaDao.removeAlertsFromMetaAlert("meta_alert",
-          Collections.singletonList(new GetRequest("message_3", SENSOR_NAME))));
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-  }
-
-  @Test
-  public void addRemoveAlertsShouldThrowExceptionForInactiveMetaAlert() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(2);
-    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlert
-    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.INACTIVE,
-        Optional.of(Collections.singletonList(alerts.get(0))));
-    elasticsearchAdd(Collections.singletonList(metaAlert), METAALERTS_INDEX, METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("meta_alert", METAALERT_TYPE)));
-
-    {
-      // Verify alerts cannot be added to an INACTIVE meta alert
-      try {
-        metaDao.addAlertsToMetaAlert("meta_alert",
-            Collections.singletonList(new GetRequest("message_1", SENSOR_NAME)));
-        Assert.fail("Adding alerts to an inactive meta alert should throw an exception");
-      } catch (IllegalStateException ise) {
-        Assert.assertEquals("Adding alerts to an INACTIVE meta alert is not allowed", ise.getMessage());
-      }
-    }
-
-    {
-      // Verify alerts cannot be removed from an INACTIVE meta alert
-      try {
-        metaDao.removeAlertsFromMetaAlert("meta_alert",
-            Collections.singletonList(new GetRequest("message_0", SENSOR_NAME)));
-        Assert.fail("Removing alerts from an inactive meta alert should throw an exception");
-      } catch (IllegalStateException ise) {
-        Assert.assertEquals("Removing alerts from an INACTIVE meta alert is not allowed", ise.getMessage());
-      }
-    }
-  }
-
-  @Test
-  public void shouldUpdateMetaAlertStatus() throws Exception {
-    int numChildAlerts = 25;
-    int numUnrelatedAlerts = 25;
-    int totalAlerts = numChildAlerts + numUnrelatedAlerts;
-
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(totalAlerts);
-    List<Map<String, Object>> childAlerts = alerts.subList(0, numChildAlerts);
-    List<Map<String, Object>> unrelatedAlerts = alerts.subList(numChildAlerts, totalAlerts);
-    for (Map<String, Object> alert : childAlerts) {
-      alert.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
-    }
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlerts
-    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
-        Optional.of(childAlerts));
-    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(Collections.singletonList(metaAlert), METAALERTS_INDEX,
-        MetaAlertDao.METAALERT_TYPE);
-
-    List<GetRequest> requests = new ArrayList<>();
-    for (int i = 0; i < numChildAlerts; ++i) {
-      requests.add(new GetRequest("message_" + i, SENSOR_NAME));
-    }
-    requests.add(new GetRequest("meta_alert", METAALERT_TYPE));
-
-    // Verify load was successful
-    findCreatedDocs(requests);
-
-    {
-      // Verify status changed to inactive and child alerts are updated
-      Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.INACTIVE));
-
-      Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-      expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.INACTIVE.getStatusString());
-
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-
-      for (int i = 0; i < numChildAlerts; ++i) {
-        Map<String, Object> expectedAlert = new HashMap<>(childAlerts.get(i));
-        expectedAlert.put("metaalerts", new ArrayList());
-        findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
-      }
-
-      // Ensure unrelated alerts are unaffected
-      for (int i = 0; i < numUnrelatedAlerts; ++i) {
-        Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
-        // Make sure to handle the guid offset from creation
-        findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
-      }
-    }
-
-    {
-      // Verify status changed to active and metaalerts fields on child alerts are updated
-      Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.ACTIVE));
-
-      Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-      expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
-
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-
-      for (int i = 0; i < numChildAlerts; ++i) {
-        Map<String, Object> expectedAlert = new HashMap<>(alerts.get(i));
-        expectedAlert.put("metaalerts", Collections.singletonList("meta_alert"));
-        findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
-      }
-
-      // Ensure unrelated alerts are unaffected
-      for (int i = 0; i < numUnrelatedAlerts; ++i) {
-        Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
-        // Make sure to handle the guid offset from creation
-        findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
-      }
-
-      {
-        // Verify status changed to current status has no effect
-        Assert.assertFalse(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.ACTIVE));
-
-        findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-
-        for (int i = 0; i < numChildAlerts; ++i) {
-          Map<String, Object> expectedAlert = new HashMap<>(alerts.get(i));
-          expectedAlert.put("metaalerts", Collections.singletonList("meta_alert"));
-          findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
-        }
-
-        // Ensure unrelated alerts are unaffected
-        for (int i = 0; i < numUnrelatedAlerts; ++i) {
-          Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
-          // Make sure to handle the guid offset from creation
-          findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
-        }
-      }
-    }
-
-    {
-      // Verify status changed to active and meta alert child alerts are refreshed
-      Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.INACTIVE));
-
-      Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-      expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.INACTIVE.getStatusString());
-
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-
-      {
-        // Update a child alert by adding a field
-        Document alert0 = esDao.getLatest("message_0", SENSOR_NAME);
-        alert0.getDocument().put("field", "value");
-        esDao.update(alert0, Optional.empty());
-
-        findUpdatedDoc(alert0.getDocument(), "message_0", SENSOR_NAME);
-
-        // Change the status to active
-        Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.ACTIVE));
-
-        // Expect the first child alert to also contain the update
-        expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
-        List<Map<String, Object>> expectedAlerts = (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD);
-        expectedAlerts.get(0).put("field", "value");
-
-        // Verify the metaalert child alerts were refreshed and the new field is present
-        findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-      }
-
-    }
-  }
-
-  @Test
-  public void shouldSearchByStatus() throws Exception {
-    // Load metaAlerts
-    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
-        Optional.empty());
-    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive", MetaAlertStatus.INACTIVE,
-        Optional.empty());
-
-
-    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(Arrays.asList(activeMetaAlert, inactiveMetaAlert), METAALERTS_INDEX, MetaAlertDao.METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("meta_active", METAALERT_TYPE),
-        new GetRequest("meta_inactive", METAALERT_TYPE)));
-
-    SearchResponse searchResponse = metaDao.search(new SearchRequest() {
-      {
-        setQuery("*");
-        setIndices(Collections.singletonList(MetaAlertDao.METAALERT_TYPE));
-        setFrom(0);
-        setSize(5);
-        setSort(Collections.singletonList(new SortField() {{
-          setField(Constants.GUID);
-        }}));
-      }
-    });
-
-    // Verify only active meta alerts are returned
-    Assert.assertEquals(1, searchResponse.getTotal());
-    Assert.assertEquals(MetaAlertStatus.ACTIVE.getStatusString(),
-        searchResponse.getResults().get(0).getSource().get(MetaAlertDao.STATUS_FIELD));
-  }
-
-
-  @Test
+  @Override
   public void shouldSearchByNestedAlert() throws Exception {
     // Load alerts
     List<Map<String, Object>> alerts = buildAlerts(4);
@@ -782,37 +187,35 @@
     alerts.get(2).put("ip_src_port", 8008);
     alerts.get(3).put("ip_src_addr", "192.168.1.4");
     alerts.get(3).put("ip_src_port", 8007);
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
+    addRecords(alerts, INDEX, SENSOR_NAME);
 
     // Put the nested type into the test index, so that it'll match appropriately
-    ((ElasticsearchDao) esDao).getClient().admin().indices().preparePutMapping(INDEX)
-        .setType("test_doc")
-        .setSource(nestedAlertMapping)
-        .get();
+    setupTypings();
 
     // Load metaAlerts
     Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
-        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1))));
-    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive", MetaAlertStatus.INACTIVE,
-        Optional.of(Arrays.asList(alerts.get(2), alerts.get(3))));
+            Optional.of(Arrays.asList(alerts.get(0), alerts.get(1))));
+    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive",
+            MetaAlertStatus.INACTIVE,
+            Optional.of(Arrays.asList(alerts.get(2), alerts.get(3))));
     // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(Arrays.asList(activeMetaAlert, inactiveMetaAlert), METAALERTS_INDEX, MetaAlertDao.METAALERT_TYPE);
+    addRecords(Arrays.asList(activeMetaAlert, inactiveMetaAlert), METAALERTS_INDEX,
+            METAALERT_TYPE);
 
     // Verify load was successful
     findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("message_2", SENSOR_NAME),
-        new GetRequest("message_3", SENSOR_NAME),
-        new GetRequest("meta_active", METAALERT_TYPE),
-        new GetRequest("meta_inactive", METAALERT_TYPE)));
-
+            new GetRequest("message_0", SENSOR_NAME),
+            new GetRequest("message_1", SENSOR_NAME),
+            new GetRequest("message_2", SENSOR_NAME),
+            new GetRequest("message_3", SENSOR_NAME),
+            new GetRequest("meta_active", METAALERT_TYPE),
+            new GetRequest("meta_inactive", METAALERT_TYPE)));
 
     SearchResponse searchResponse = metaDao.search(new SearchRequest() {
       {
         setQuery(
-            "(ip_src_addr:192.168.1.1 AND ip_src_port:8009) OR (metron_alert.ip_src_addr:192.168.1.1 AND metron_alert.ip_src_port:8009)");
-        setIndices(Collections.singletonList(MetaAlertDao.METAALERT_TYPE));
+                "(ip_src_addr:192.168.1.1 AND ip_src_port:8009) OR (metron_alert.ip_src_addr:192.168.1.1 AND metron_alert.ip_src_port:8009)");
+        setIndices(Collections.singletonList(METAALERT_TYPE));
         setFrom(0);
         setSize(5);
         setSort(Collections.singletonList(new SortField() {
@@ -830,9 +233,9 @@
     searchResponse = metaDao.search(new SearchRequest() {
       {
         setQuery(
-            "(ip_src_addr:192.168.1.1 AND ip_src_port:8010)"
-                + " OR (metron_alert.ip_src_addr:192.168.1.1 AND metron_alert.ip_src_port:8010)");
-        setIndices(Collections.singletonList("*"));
+                "(ip_src_addr:192.168.1.1 AND ip_src_port:8010)"
+                        + " OR (metron_alert.ip_src_addr:192.168.1.1 AND metron_alert.ip_src_port:8010)");
+        setIndices(queryIndices);
         setFrom(0);
         setSize(5);
         setSort(Collections.singletonList(new SortField() {
@@ -846,15 +249,15 @@
     // Nested query should match a nested alert
     Assert.assertEquals(1, searchResponse.getTotal());
     Assert.assertEquals("meta_active",
-        searchResponse.getResults().get(0).getSource().get("guid"));
+            searchResponse.getResults().get(0).getSource().get("guid"));
 
     // Query against all indices. The child alert has no actual attached meta alerts, and should
     // be returned on its own.
-   searchResponse = metaDao.search(new SearchRequest() {
+    searchResponse = metaDao.search(new SearchRequest() {
       {
         setQuery(
-            "(ip_src_addr:192.168.1.3 AND ip_src_port:8008)"
-                + " OR (metron_alert.ip_src_addr:192.168.1.3 AND metron_alert.ip_src_port:8008)");
+                "(ip_src_addr:192.168.1.3 AND ip_src_port:8008)"
+                        + " OR (metron_alert.ip_src_addr:192.168.1.3 AND metron_alert.ip_src_port:8008)");
         setIndices(Collections.singletonList("*"));
         setFrom(0);
         setSize(1);
@@ -869,398 +272,107 @@
     // Nested query should match a plain alert
     Assert.assertEquals(1, searchResponse.getTotal());
     Assert.assertEquals("message_2",
-        searchResponse.getResults().get(0).getSource().get("guid"));
+            searchResponse.getResults().get(0).getSource().get("guid"));
   }
 
-  @Test
-  public void shouldHidesAlertsOnGroup() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(2);
-    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
-    alerts.get(0).put("ip_src_addr", "192.168.1.1");
-    alerts.get(0).put("score_field", 1);
-    alerts.get(1).put("ip_src_addr", "192.168.1.1");
-    alerts.get(1).put("score_field", 10);
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-
-    // Put the nested type into the test index, so that it'll match appropriately
-    ((ElasticsearchDao) esDao).getClient().admin().indices().preparePutMapping(INDEX)
-        .setType("test_doc")
-        .setSource(nestedAlertMapping)
-        .get();
-
-    // Don't need any meta alerts to actually exist, since we've populated the field on the alerts.
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME)));
-
-    // Build our group request
-    Group searchGroup = new Group();
-    searchGroup.setField("ip_src_addr");
-    List<Group> groupList = new ArrayList<>();
-    groupList.add(searchGroup);
-    GroupResponse groupResponse = metaDao.group(new GroupRequest() {
-      {
-        setQuery("ip_src_addr:192.168.1.1");
-        setIndices(Collections.singletonList("*"));
-        setScoreField("score_field");
-        setGroups(groupList);
-    }});
-
-    // Should only return the standalone alert in the group
-    GroupResult result = groupResponse.getGroupResults().get(0);
-    Assert.assertEquals(1, result.getTotal());
-    Assert.assertEquals("192.168.1.1", result.getKey());
-    // No delta, since no ops happen
-    Assert.assertEquals(10.0d, result.getScore(), 0.0d);
-  }
-
-  @SuppressWarnings("unchecked")
-  @Test
-  public void shouldUpdateMetaAlertOnAlertUpdate() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(2);
-    alerts.get(0).put(METAALERT_FIELD, Arrays.asList("meta_active", "meta_inactive"));
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Load metaAlerts
-    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
-        Optional.of(Collections.singletonList(alerts.get(0))));
-    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive", MetaAlertStatus.INACTIVE,
-        Optional.of(Collections.singletonList(alerts.get(0))));
-    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(Arrays.asList(activeMetaAlert, inactiveMetaAlert), METAALERTS_INDEX, METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("meta_active", METAALERT_TYPE),
-        new GetRequest("meta_inactive", METAALERT_TYPE)));
-
-    {
-      // Modify the first message and add a new field
-      Map<String, Object> message0 = new HashMap<String, Object>(alerts.get(0)) {
-        {
-          put(NEW_FIELD, "metron");
-          put(MetaAlertDao.THREAT_FIELD_DEFAULT, "10");
-        }
-      };
-      String guid = "" + message0.get(Constants.GUID);
-      metaDao.update(new Document(message0, guid, SENSOR_NAME, null), Optional.empty());
-
-      {
-        // Verify alerts in ES are up-to-date
-        findUpdatedDoc(message0, guid, SENSOR_NAME);
-        long cnt = getMatchingAlertCount(NEW_FIELD, message0.get(NEW_FIELD));
-        if (cnt == 0) {
-          Assert.fail("Elasticsearch alert not updated!");
-        }
-      }
-
-      {
-        // Verify meta alerts in ES are up-to-date
-        long cnt = getMatchingMetaAlertCount(NEW_FIELD, "metron");
-        if (cnt == 0) {
-          Assert.fail("Active metaalert was not updated!");
-        }
-        if (cnt != 1) {
-          Assert.fail("Elasticsearch metaalerts not updated correctly!");
-        }
-      }
-    }
-    //modify the same message and modify the new field
-    {
-      Map<String, Object> message0 = new HashMap<String, Object>(alerts.get(0)) {
-        {
-          put(NEW_FIELD, "metron2");
-        }
-      };
-      String guid = "" + message0.get(Constants.GUID);
-      metaDao.update(new Document(message0, guid, SENSOR_NAME, null), Optional.empty());
-
-      {
-        // Verify ES is up-to-date
-        findUpdatedDoc(message0, guid, SENSOR_NAME);
-        long cnt = getMatchingAlertCount(NEW_FIELD, message0.get(NEW_FIELD));
-        if (cnt == 0) {
-          Assert.fail("Elasticsearch alert not updated!");
-        }
-      }
-      {
-        // Verify meta alerts in ES are up-to-date
-        long cnt = getMatchingMetaAlertCount(NEW_FIELD, "metron2");
-        if (cnt == 0) {
-          Assert.fail("Active metaalert was not updated!");
-        }
-        if (cnt != 1) {
-          Assert.fail("Elasticsearch metaalerts not updated correctly!");
-        }
-      }
-    }
-    //modify the same message and modify the new field with the patch method
-    {
-      Map<String, Object> message0 = new HashMap<String, Object>(alerts.get(0)) {
-        {
-          put(NEW_FIELD, "metron3");
-        }
-      };
-      String guid = "" + message0.get(Constants.GUID);
-      PatchRequest patchRequest = new PatchRequest();
-      patchRequest.setGuid(guid);
-      patchRequest.setIndex(INDEX);
-      patchRequest.setSensorType(SENSOR_NAME);
-      patchRequest.setPatch(Collections.singletonList(new HashMap<String, Object>() {{
-        put("op", "replace");
-        put("path", "/" + NEW_FIELD);
-        put("value", "metron3");
-      }}));
-
-      metaDao.patch(patchRequest, Optional.empty());
-
-      {
-        // Verify ES is up-to-date
-        findUpdatedDoc(message0, guid, SENSOR_NAME);
-        long cnt = getMatchingAlertCount(NEW_FIELD, message0.get(NEW_FIELD));
-        if (cnt == 0) {
-          Assert.fail("Elasticsearch alert not updated!");
-        }
-      }
-      {
-        // Verify meta alerts in ES are up-to-date
-        long cnt = getMatchingMetaAlertCount(NEW_FIELD, "metron3");
-        if (cnt == 0) {
-          Assert.fail("Active metaalert was not updated!");
-        }
-        if (cnt != 1) {
-          Assert.fail("Elasticsearch metaalerts not updated correctly!");
-        }
-      }
-    }
-  }
-
-  @Test
-  public void shouldThrowExceptionOnMetaAlertUpdate() throws Exception {
-    Document metaAlert = new Document(new HashMap<>(), "meta_alert", METAALERT_TYPE, 0L);
-    try {
-      // Verify a meta alert cannot be updated in the meta alert dao
-      metaDao.update(metaAlert, Optional.empty());
-      Assert.fail("Direct meta alert update should throw an exception");
-    } catch (UnsupportedOperationException uoe) {
-      Assert.assertEquals("Meta alerts cannot be directly updated", uoe.getMessage());
-    }
-  }
-  @Test
-  public void shouldPatchAllowedMetaAlerts() throws Exception {
-    // Load alerts
-    List<Map<String, Object>> alerts = buildAlerts(2);
-    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
-    alerts.get(1).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
-    elasticsearchAdd(alerts, INDEX, SENSOR_NAME);
-
-    // Put the nested type into the test index, so that it'll match appropriately
-    ((ElasticsearchDao) esDao).getClient().admin().indices().preparePutMapping(INDEX)
-        .setType("test_doc")
-        .setSource(nestedAlertMapping)
-        .get();
-
-    // Load metaAlerts
-    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
-        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1))));
-    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
-    elasticsearchAdd(Collections.singletonList(metaAlert), METAALERTS_INDEX, MetaAlertDao.METAALERT_TYPE);
-
-    // Verify load was successful
-    findCreatedDocs(Arrays.asList(
-        new GetRequest("message_0", SENSOR_NAME),
-        new GetRequest("message_1", SENSOR_NAME),
-        new GetRequest("meta_alert", METAALERT_TYPE)));
-
-    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
-    expectedMetaAlert.put(NAME_FIELD, "New Meta Alert");
-    {
-      // Verify a patch to a field other than "status" or "alert" can be patched
-      PatchRequest patchRequest = JSONUtils.INSTANCE.load(namePatchRequest, PatchRequest.class);
-      metaDao.patch(patchRequest, Optional.of(System.currentTimeMillis()));
-
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify a patch to an alert field should throw an exception
-      try {
-        PatchRequest patchRequest = JSONUtils.INSTANCE.load(alertPatchRequest, PatchRequest.class);
-        metaDao.patch(patchRequest, Optional.of(System.currentTimeMillis()));
-
-        Assert.fail("A patch on the alert field should throw an exception");
-      } catch (IllegalArgumentException iae) {
-        Assert.assertEquals("Meta alert patches are not allowed for /alert or /status paths.  "
-            + "Please use the add/remove alert or update status functions instead.", iae.getMessage());
-      }
-
-      // Verify the metaAlert was not updated
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-
-    {
-      // Verify a patch to a status field should throw an exception
-      try {
-        PatchRequest patchRequest = JSONUtils.INSTANCE.load(statusPatchRequest, PatchRequest.class);
-        metaDao.patch(patchRequest, Optional.of(System.currentTimeMillis()));
-
-        Assert.fail("A patch on the status field should throw an exception");
-      } catch (IllegalArgumentException iae) {
-        Assert.assertEquals("Meta alert patches are not allowed for /alert or /status paths.  "
-            + "Please use the add/remove alert or update status functions instead.", iae.getMessage());
-      }
-
-      // Verify the metaAlert was not updated
-      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
-    }
-  }
-
-  protected long getMatchingAlertCount(String fieldName, Object fieldValue) throws IOException, InterruptedException {
+  @Override
+  protected long getMatchingAlertCount(String fieldName, Object fieldValue)
+          throws IOException, InterruptedException {
     long cnt = 0;
     for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
-      List<Map<String, Object>> docs = es.getAllIndexedDocs(INDEX, SENSOR_NAME + "_doc");
+      List<Map<String, Object>> docs = es
+              .getAllIndexedDocs(INDEX, SENSOR_NAME + "_doc");
       cnt = docs
-          .stream()
-          .filter(d -> {
-            Object newfield = d.get(fieldName);
-            return newfield != null && newfield.equals(fieldValue);
-          }).count();
+              .stream()
+              .filter(d -> {
+                Object newfield = d.get(fieldName);
+                return newfield != null && newfield.equals(fieldValue);
+              }).count();
     }
     return cnt;
   }
 
-  protected long getMatchingMetaAlertCount(String fieldName, String fieldValue) throws IOException, InterruptedException {
+  @Override
+  protected long getMatchingMetaAlertCount(String fieldName, String fieldValue)
+          throws IOException, InterruptedException {
     long cnt = 0;
     for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
-      List<Map<String, Object>> docs = es.getAllIndexedDocs(METAALERTS_INDEX, MetaAlertDao.METAALERT_DOC);
+      List<Map<String, Object>> docs = es
+              .getAllIndexedDocs(METAALERTS_INDEX, METAALERT_DOC);
       cnt = docs
-          .stream()
-          .filter(d -> {
-            List<Map<String, Object>> alerts = (List<Map<String, Object>>) d
-                .get(MetaAlertDao.ALERT_FIELD);
+              .stream()
+              .filter(d -> {
+                @SuppressWarnings("unchecked")
+                List<Map<String, Object>> alerts = (List<Map<String, Object>>) d
+                        .get(ALERT_FIELD);
 
-            for (Map<String, Object> alert : alerts) {
-              Object newField = alert.get(fieldName);
-              if (newField != null && newField.equals(fieldValue)) {
-                return true;
+                for (Map<String, Object> alert : alerts) {
+                  Object newField = alert.get(fieldName);
+                  if (newField != null && newField.equals(fieldValue)) {
+                    return true;
+                  }
+                }
+
+                return false;
+              }).count();
+    }
+    return cnt;
+  }
+
+  @Override
+  protected void addRecords(List<Map<String, Object>> inputData, String index, String docType)
+          throws IOException {
+    es.add(index, docType, inputData.stream().map(m -> {
+              try {
+                return JSONUtils.INSTANCE.toJSON(m, true);
+              } catch (JsonProcessingException e) {
+                throw new IllegalStateException(e.getMessage(), e);
               }
             }
-
-            return false;
-          }).count();
-    }
-    return cnt;
+            ).collect(Collectors.toList())
+    );
   }
 
-  protected void findUpdatedDoc(Map<String, Object> message0, String guid, String sensorType)
-      throws InterruptedException, IOException, OriginalNotFoundException {
-    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
-      Document doc = metaDao.getLatest(guid, sensorType);
-      if (doc != null && compareDocs(message0, doc.getDocument())) {
-        return;
-      }
-    }
-    throw new OriginalNotFoundException("Count not find " + guid + " after " + MAX_RETRIES + " tries");
+  @Override
+  protected void setupTypings() {
+    ((ElasticsearchDao) esDao).getClient().admin().indices().preparePutMapping(INDEX)
+            .setType("test_doc")
+            .setSource(nestedAlertMapping)
+            .get();
   }
 
-  private boolean compareDocs(Map<String, Object> expected, Map<String, Object> actual) {
-    if (expected.size() != actual.size()) {
-      return false;
-    }
-    for(String key: expected.keySet()) {
-      if (ALERT_FIELD.equals(key)) {
-        List<Map<String, Object>> expectedAlerts = (List<Map<String, Object>>) expected.get(MetaAlertDao.ALERT_FIELD);
-        ArrayList<Map<String, Object>> actualAlerts = (ArrayList<Map<String, Object>>) actual.get(MetaAlertDao.ALERT_FIELD);
-        if (!expectedAlerts.containsAll(actualAlerts) || !actualAlerts.containsAll(expectedAlerts)) {
-          return false;
-        }
-      } else if (!expected.get(key).equals(actual.get(key))){
-        return false;
-      }
-    }
+  @Override
+  protected String getTestIndexName() {
+    return INDEX_RAW;
+  }
+
+  @Override
+  protected String getTestIndexFullName() {
+    return INDEX;
+  }
+
+  @Override
+  protected String getMetaAlertIndex() {
+    return METAALERTS_INDEX;
+  }
+
+  @Override
+  protected String getSourceTypeField() {
+    return ElasticsearchMetaAlertDao.SOURCE_TYPE_FIELD;
+  }
+
+  @Override
+  protected void setEmptiedMetaAlertField(Map<String, Object> docMap) {
+    docMap.put(METAALERT_FIELD, new ArrayList<>());
+  }
+
+  @Override
+  protected boolean isFiniteDoubleOnly() {
     return true;
   }
 
-  protected boolean findCreatedDoc(String guid, String sensorType)
-      throws InterruptedException, IOException, OriginalNotFoundException {
-    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
-      Document doc = metaDao.getLatest(guid, sensorType);
-      if (doc != null) {
-        return true;
-      }
-    }
-    throw new OriginalNotFoundException("Count not find " + guid + " after " + MAX_RETRIES + "tries");
-  }
-
-  protected boolean findCreatedDocs(List<GetRequest> getRequests)
-      throws InterruptedException, IOException, OriginalNotFoundException {
-    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
-      Iterable<Document> docs = metaDao.getAllLatest(getRequests);
-      if (docs != null) {
-        int docCount = 0;
-        for (Document doc: docs) {
-          docCount++;
-        }
-        if (getRequests.size() == docCount) {
-          return true;
-        }
-      }
-    }
-    throw new OriginalNotFoundException("Count not find guids after " + MAX_RETRIES + "tries");
-  }
-
-  protected List<Map<String, Object>> buildAlerts(int count) {
-    List<Map<String, Object>> inputData = new ArrayList<>();
-    for (int i = 0; i < count; ++i) {
-      final String guid = "message_" + i;
-      Map<String, Object> alerts = new HashMap<>();
-      alerts.put(Constants.GUID, guid);
-      alerts.put("source:type", SENSOR_NAME);
-      alerts.put(ElasticsearchMetaAlertDao.THREAT_TRIAGE_FIELD, i);
-      alerts.put("timestamp", System.currentTimeMillis());
-      inputData.add(alerts);
-    }
-    return inputData;
-  }
-
-  protected List<Map<String, Object>> buildMetaAlerts(int count, MetaAlertStatus status, Optional<List<Map<String, Object>>> alerts) {
-    List<Map<String, Object>> inputData = new ArrayList<>();
-    for (int i = 0; i < count; ++i) {
-      final String guid = "meta_" + status.getStatusString() + "_" + i;
-      inputData.add(buildMetaAlert(guid, status, alerts));
-    }
-    return inputData;
-  }
-
-  protected Map<String, Object> buildMetaAlert(String guid, MetaAlertStatus status, Optional<List<Map<String, Object>>> alerts) {
-    Map<String, Object> metaAlert = new HashMap<>();
-    metaAlert.put(Constants.GUID, guid);
-    metaAlert.put("source:type", METAALERT_TYPE);
-    metaAlert.put(MetaAlertDao.STATUS_FIELD, status.getStatusString());
-    if (alerts.isPresent()) {
-      List<Map<String, Object>> alertsList = alerts.get();
-      metaAlert.put(ALERT_FIELD, alertsList);
-    }
-    return metaAlert;
-  }
-
-  protected void elasticsearchAdd(List<Map<String, Object>> inputData, String index, String docType)
-      throws IOException {
-    es.add(index, docType, inputData.stream().map(m -> {
-          try {
-            return JSONUtils.INSTANCE.toJSON(m, true);
-          } catch (JsonProcessingException e) {
-            throw new IllegalStateException(e.getMessage(), e);
-          }
-        }
-        ).collect(Collectors.toList())
-    );
+  @Override
+  protected boolean isEmptyMetaAlertList() {
+    return true;
   }
 }
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchSearchIntegrationTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchSearchIntegrationTest.java
index 1bc5b6e..8071e68 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchSearchIntegrationTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchSearchIntegrationTest.java
@@ -19,18 +19,25 @@
 
 
 import java.io.File;
-import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
-import java.util.concurrent.ExecutionException;
+import java.util.Map;
 import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.Constants;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.elasticsearch.dao.ElasticsearchDao;
 import org.apache.metron.elasticsearch.integration.components.ElasticSearchComponent;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
 import org.apache.metron.indexing.dao.SearchIntegrationTest;
-import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
 import org.apache.metron.integration.InMemoryComponent;
 import org.elasticsearch.action.bulk.BulkRequestBuilder;
 import org.elasticsearch.action.bulk.BulkResponse;
@@ -40,6 +47,8 @@
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
 import org.json.simple.parser.ParseException;
+import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class ElasticsearchSearchIntegrationTest extends SearchIntegrationTest {
@@ -48,6 +57,7 @@
   private static String dateFormat = "yyyy.MM.dd.HH";
   private static final int MAX_RETRIES = 10;
   private static final int SLEEP_MS = 500;
+  protected static IndexDao dao;
 
   /**
    * {
@@ -89,7 +99,7 @@
    *        "type": "text",
    *        "fielddata" : "true"
    *     },
-   *     "duplicate_name_field": {
+   *     "ttl": {
    *        "type": "text",
    *        "fielddata" : "true"
    *     },
@@ -142,7 +152,7 @@
    *        "snort_field": {
    *          "type": "integer"
    *        },
-   *        "duplicate_name_field": {
+   *        "ttl": {
    *          "type": "integer"
    *        },
    *        "alert": {
@@ -175,8 +185,15 @@
   @Multiline
   private static String broDefaultStringMappings;
 
-  @Override
-  protected IndexDao createDao() throws Exception {
+  @BeforeClass
+  public static void setup() throws Exception {
+    indexComponent = startIndex();
+    dao = createDao();
+    // The data is all static for searches, so we can set it up beforehand, and it's faster
+    loadTestData();
+  }
+
+  protected static IndexDao createDao() {
     AccessConfig config = new AccessConfig();
     config.setMaxSearchResults(100);
     config.setMaxSearchGroups(100);
@@ -194,8 +211,7 @@
     return dao;
   }
 
-  @Override
-  protected InMemoryComponent startIndex() throws Exception {
+  protected static InMemoryComponent startIndex() throws Exception {
     InMemoryComponent es = new ElasticSearchComponent.Builder()
             .withHttpPort(9211)
             .withIndexDir(new File(indexDir))
@@ -204,32 +220,36 @@
     return es;
   }
 
-  @Override
-  protected void loadTestData()
-      throws ParseException, IOException, ExecutionException, InterruptedException {
-    ElasticSearchComponent es = (ElasticSearchComponent)indexComponent;
+  protected static void loadTestData() throws ParseException {
+    ElasticSearchComponent es = (ElasticSearchComponent) indexComponent;
     es.getClient().admin().indices().prepareCreate("bro_index_2017.01.01.01")
-            .addMapping("bro_doc", broTypeMappings).addMapping("bro_doc_default", broDefaultStringMappings).get();
+        .addMapping("bro_doc", broTypeMappings)
+        .addMapping("bro_doc_default", broDefaultStringMappings).get();
     es.getClient().admin().indices().prepareCreate("snort_index_2017.01.01.02")
-            .addMapping("snort_doc", snortTypeMappings).get();
+        .addMapping("snort_doc", snortTypeMappings).get();
 
-    BulkRequestBuilder bulkRequest = es.getClient().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
+    BulkRequestBuilder bulkRequest = es.getClient().prepareBulk()
+        .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
     JSONArray broArray = (JSONArray) new JSONParser().parse(broData);
-    for(Object o: broArray) {
+    for (Object o : broArray) {
       JSONObject jsonObject = (JSONObject) o;
-      IndexRequestBuilder indexRequestBuilder = es.getClient().prepareIndex("bro_index_2017.01.01.01", "bro_doc");
+      IndexRequestBuilder indexRequestBuilder = es.getClient()
+          .prepareIndex("bro_index_2017.01.01.01", "bro_doc");
       indexRequestBuilder = indexRequestBuilder.setId((String) jsonObject.get("guid"));
       indexRequestBuilder = indexRequestBuilder.setSource(jsonObject.toJSONString());
-      indexRequestBuilder = indexRequestBuilder.setTimestamp(jsonObject.get("timestamp").toString());
+      indexRequestBuilder = indexRequestBuilder
+          .setTimestamp(jsonObject.get("timestamp").toString());
       bulkRequest.add(indexRequestBuilder);
     }
     JSONArray snortArray = (JSONArray) new JSONParser().parse(snortData);
-    for(Object o: snortArray) {
+    for (Object o : snortArray) {
       JSONObject jsonObject = (JSONObject) o;
-      IndexRequestBuilder indexRequestBuilder = es.getClient().prepareIndex("snort_index_2017.01.01.02", "snort_doc");
+      IndexRequestBuilder indexRequestBuilder = es.getClient()
+          .prepareIndex("snort_index_2017.01.01.02", "snort_doc");
       indexRequestBuilder = indexRequestBuilder.setId((String) jsonObject.get("guid"));
       indexRequestBuilder = indexRequestBuilder.setSource(jsonObject.toJSONString());
-      indexRequestBuilder = indexRequestBuilder.setTimestamp(jsonObject.get("timestamp").toString());
+      indexRequestBuilder = indexRequestBuilder
+          .setTimestamp(jsonObject.get("timestamp").toString());
       bulkRequest.add(indexRequestBuilder);
     }
     BulkResponse bulkResponse = bulkRequest.execute().actionGet();
@@ -238,5 +258,116 @@
     }
   }
 
+  @Test
+  public void bad_facet_query_throws_exception() throws Exception {
+    thrown.expect(InvalidSearchException.class);
+    thrown.expectMessage("Failed to execute search");
+    SearchRequest request = JSONUtils.INSTANCE.load(badFacetQuery, SearchRequest.class);
+    dao.search(request);
+  }
 
+
+
+  @Override
+  public void returns_column_metadata_for_specified_indices() throws Exception {
+    // getColumnMetadata with only bro
+    {
+      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("bro"));
+      Assert.assertEquals(13, fieldTypes.size());
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("ttl"));
+      Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
+      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+      Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
+      Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
+      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
+      Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
+      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("ttl"));
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
+    }
+    // getColumnMetadata with only snort
+    {
+      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("snort"));
+      Assert.assertEquals(14, fieldTypes.size());
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("snort_field"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ttl"));
+      Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
+      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+      Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
+      Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
+      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
+      Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
+      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ttl"));
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
+    }
+  }
+
+  @Override
+  public void returns_column_data_for_multiple_indices() throws Exception {
+    Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Arrays.asList("bro", "snort"));
+    Assert.assertEquals(15, fieldTypes.size());
+    Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
+    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
+    Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+    Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
+    Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
+    Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
+    Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
+    Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
+    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("snort_field"));
+    //NOTE: This is because the field is in both bro and snort and they have different types.
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("ttl"));
+    Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("threat:triage:score"));
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
+  }
+
+  @Test
+  public void throws_exception_on_aggregation_queries_on_non_string_non_numeric_fields()
+      throws Exception {
+    thrown.expect(InvalidSearchException.class);
+    thrown.expectMessage("Failed to execute search");
+    GroupRequest request = JSONUtils.INSTANCE.load(badGroupQuery, GroupRequest.class);
+    dao.group(request);
+  }
+
+  @Test
+  public void different_type_filter_query() throws Exception {
+    SearchRequest request = JSONUtils.INSTANCE.load(differentTypeFilterQuery, SearchRequest.class);
+    SearchResponse response = dao.search(request);
+    Assert.assertEquals(1, response.getTotal());
+    List<SearchResult> results = response.getResults();
+    Assert.assertEquals("bro", results.get(0).getSource().get("source:type"));
+    Assert.assertEquals("data 1", results.get(0).getSource().get("ttl"));
+  }
+
+  @Override
+  protected String getSourceTypeField() {
+    return Constants.SENSOR_TYPE.replace('.', ':');
+  }
+
+  @Override
+  protected IndexDao getIndexDao() {
+    return dao;
+  }
+
+  @Override
+  protected String getIndexName(String sensorType) {
+    if ("bro".equals(sensorType)) {
+      return "bro_index_2017.01.01.01";
+    } else {
+      return "snort_index_2017.01.01.02";
+    }
+  }
 }
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchUpdateIntegrationTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchUpdateIntegrationTest.java
index 325d42e..c5c0bc1 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchUpdateIntegrationTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/ElasticsearchUpdateIntegrationTest.java
@@ -19,203 +19,124 @@
 
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.google.common.collect.Iterables;
+import java.io.File;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.metron.common.Constants;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.elasticsearch.dao.ElasticsearchDao;
 import org.apache.metron.elasticsearch.integration.components.ElasticSearchComponent;
-import org.apache.metron.hbase.mock.MockHTable;
 import org.apache.metron.hbase.mock.MockHBaseTableProvider;
-import org.apache.metron.indexing.dao.*;
-import org.apache.metron.indexing.dao.update.Document;
-import org.apache.metron.indexing.dao.update.ReplaceRequest;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.index.query.QueryBuilders;
+import org.apache.metron.hbase.mock.MockHTable;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.HBaseDao;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.UpdateIntegrationTest;
+import org.apache.metron.integration.UnableToStartException;
+import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Test;
 
-import java.io.File;
-import java.text.SimpleDateFormat;
-import java.util.*;
-
-
-public class ElasticsearchUpdateIntegrationTest {
-  private static final int MAX_RETRIES = 10;
-  private static final int SLEEP_MS = 500;
+public class ElasticsearchUpdateIntegrationTest extends UpdateIntegrationTest {
   private static final String SENSOR_NAME= "test";
-  private static final String TABLE_NAME = "modifications";
-  private static final String CF = "p";
   private static String indexDir = "target/elasticsearch_mutation";
   private static String dateFormat = "yyyy.MM.dd.HH";
   private static String index = SENSOR_NAME + "_index_" + new SimpleDateFormat(dateFormat).format(new Date());
-  private static MockHTable table;
-  private static IndexDao esDao;
-  private static IndexDao hbaseDao;
-  private static MultiIndexDao dao;
   private static ElasticSearchComponent es;
 
+  private static final String TABLE_NAME = "modifications";
+  private static final String CF = "p";
+  private static MockHTable table;
+  private static IndexDao hbaseDao;
+
+  @Override
+  protected String getIndexName() {
+    return SENSOR_NAME + "_index_" + new SimpleDateFormat(dateFormat).format(new Date());
+  }
+
   @BeforeClass
-  public static void setup() throws Exception {
+  public static void setupBeforeClass() throws UnableToStartException {
+    es = new ElasticSearchComponent.Builder()
+        .withHttpPort(9211)
+        .withIndexDir(new File(indexDir))
+        .build();
+    es.start();
+  }
+
+  @Before
+  public void setup() throws IOException {
     Configuration config = HBaseConfiguration.create();
     MockHBaseTableProvider tableProvider = new MockHBaseTableProvider();
-    tableProvider.addToCache(TABLE_NAME, CF);
-    table = (MockHTable)tableProvider.getTable(config, TABLE_NAME);
-    // setup the client
-    es = new ElasticSearchComponent.Builder()
-            .withHttpPort(9211)
-            .withIndexDir(new File(indexDir))
-            .build();
-    es.start();
+    MockHBaseTableProvider.addToCache(TABLE_NAME, CF);
+    table = (MockHTable) tableProvider.getTable(config, TABLE_NAME);
 
     hbaseDao = new HBaseDao();
     AccessConfig accessConfig = new AccessConfig();
     accessConfig.setTableProvider(tableProvider);
-    Map<String, Object> globalConfig = new HashMap<String, Object>() {{
-      put("es.clustername", "metron");
-      put("es.port", "9300");
-      put("es.ip", "localhost");
-      put("es.date.format", dateFormat);
-      put(HBaseDao.HBASE_TABLE, TABLE_NAME);
-      put(HBaseDao.HBASE_CF, CF);
-    }};
+    Map<String, Object> globalConfig = createGlobalConfig();
+    globalConfig.put(HBaseDao.HBASE_TABLE, TABLE_NAME);
+    globalConfig.put(HBaseDao.HBASE_CF, CF);
     accessConfig.setGlobalConfigSupplier(() -> globalConfig);
 
-    esDao = new ElasticsearchDao();
-
-    dao = new MultiIndexDao(hbaseDao, esDao);
+    MultiIndexDao dao = new MultiIndexDao(hbaseDao, createDao());
     dao.init(accessConfig);
+    setDao(dao);
+  }
 
+  @After
+  public void reset() {
+    es.reset();
+    table.clear();
   }
 
   @AfterClass
   public static void teardown() {
-    if(es != null) {
-      es.stop();
-    }
+    es.stop();
   }
 
+  protected static Map<String, Object> createGlobalConfig() {
+    return new HashMap<String, Object>() {{
+      put("es.clustername", "metron");
+      put("es.port", "9300");
+      put("es.ip", "localhost");
+      put("es.date.format", dateFormat);
+    }};
+  }
 
+  protected static IndexDao createDao() {
+    return new ElasticsearchDao();
+  }
 
-  @Test
-  public void test() throws Exception {
-    List<Map<String, Object>> inputData = new ArrayList<>();
-    for(int i = 0; i < 10;++i) {
-      final String name = "message" + i;
-      inputData.add(
-              new HashMap<String, Object>() {{
-                put("source:type", SENSOR_NAME);
-                put("name" , name);
-                put("timestamp", System.currentTimeMillis());
-                put(Constants.GUID, name);
-              }}
-                             );
-    }
+  @Override
+  protected void addTestData(String indexName, String sensorType,
+      List<Map<String, Object>> docs) throws Exception {
     es.add(index, SENSOR_NAME
-          , Iterables.transform(inputData,
-                    m -> {
-                      try {
-                        return JSONUtils.INSTANCE.toJSON(m, true);
-                      } catch (JsonProcessingException e) {
-                        throw new IllegalStateException(e.getMessage(), e);
-                      }
-                    }
-                    )
+        , Iterables.transform(docs,
+            m -> {
+              try {
+                return JSONUtils.INSTANCE.toJSON(m, true);
+              } catch (JsonProcessingException e) {
+                throw new IllegalStateException(e.getMessage(), e);
+              }
+            }
+        )
     );
-    List<Map<String,Object>> docs = null;
-    for(int t = 0;t < MAX_RETRIES;++t, Thread.sleep(SLEEP_MS)) {
-      docs = es.getAllIndexedDocs(index, SENSOR_NAME + "_doc");
-      if(docs.size() >= 10) {
-        break;
-      }
-    }
-    Assert.assertEquals(10, docs.size());
-    //modify the first message and add a new field
-    {
-      Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {{
-        put("new-field", "metron");
-      }};
-      String guid = "" + message0.get(Constants.GUID);
-      dao.replace(new ReplaceRequest(){{
-        setReplacement(message0);
-        setGuid(guid);
-        setSensorType(SENSOR_NAME);
-      }}, Optional.empty());
-
-      Assert.assertEquals(1, table.size());
-      Document doc = dao.getLatest(guid, SENSOR_NAME);
-      Assert.assertEquals(message0, doc.getDocument());
-      {
-        //ensure hbase is up to date
-        Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
-        Result r = table.get(g);
-        NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
-        Assert.assertEquals(1, columns.size());
-        Assert.assertEquals(message0
-                , JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue())
-                        , JSONUtils.MAP_SUPPLIER)
-        );
-      }
-      {
-        //ensure ES is up-to-date
-        long cnt = 0;
-        for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
-          docs = es.getAllIndexedDocs(index, SENSOR_NAME + "_doc");
-          cnt = docs
-                  .stream()
-                  .filter(d -> message0.get("new-field").equals(d.get("new-field")))
-                  .count();
-        }
-        Assert.assertNotEquals("Elasticsearch is not updated!", cnt, 0);
-      }
-    }
-    //modify the same message and modify the new field
-    {
-      Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {{
-        put("new-field", "metron2");
-      }};
-      String guid = "" + message0.get(Constants.GUID);
-      dao.replace(new ReplaceRequest(){{
-        setReplacement(message0);
-        setGuid(guid);
-        setSensorType(SENSOR_NAME);
-      }}, Optional.empty());
-      Assert.assertEquals(1, table.size());
-      Document doc = dao.getLatest(guid, SENSOR_NAME);
-      Assert.assertEquals(message0, doc.getDocument());
-      {
-        //ensure hbase is up to date
-        Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
-        Result r = table.get(g);
-        NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
-        Assert.assertEquals(2, columns.size());
-        Assert.assertEquals(message0, JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue())
-                        , JSONUtils.MAP_SUPPLIER)
-        );
-        Assert.assertNotEquals(message0, JSONUtils.INSTANCE.load(new String(columns.firstEntry().getValue())
-                        , JSONUtils.MAP_SUPPLIER)
-        );
-      }
-      {
-        //ensure ES is up-to-date
-        long cnt = 0;
-        for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t,Thread.sleep(SLEEP_MS)) {
-          docs = es.getAllIndexedDocs(index, SENSOR_NAME + "_doc");
-          cnt = docs
-                  .stream()
-                  .filter(d -> message0.get("new-field").equals(d.get("new-field")))
-                  .count();
-        }
-
-        Assert.assertNotEquals("Elasticsearch is not updated!", cnt, 0);
-      }
-    }
   }
 
+  @Override
+  protected List<Map<String, Object>> getIndexedTestData(String indexName, String sensorType) throws Exception {
+    return es.getAllIndexedDocs(index, SENSOR_NAME + "_doc");
+  }
 
-
+  @Override
+  protected MockHTable getMockHTable() {
+    return table;
+  }
 }
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/components/ElasticSearchComponent.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/components/ElasticSearchComponent.java
index e716ce1..45b4d60 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/components/ElasticSearchComponent.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/integration/components/ElasticSearchComponent.java
@@ -274,19 +274,19 @@
 
   }
 
-    @Override
-    public void stop() {
-      try {
-        node.close();
-      } catch (IOException e) {
-        throw new RuntimeException("Unable to stop node." , e);
-      }
-      node = null;
-      client = null;
+  @Override
+  public void stop() {
+    try {
+      node.close();
+    } catch (IOException e) {
+      throw new RuntimeException("Unable to stop node." , e);
     }
+    node = null;
+    client = null;
+  }
 
-    @Override
-    public void reset() {
-        client.admin().indices().delete(new DeleteIndexRequest("*")).actionGet();
-    }
+  @Override
+  public void reset() {
+      client.admin().indices().delete(new DeleteIndexRequest("*")).actionGet();
+  }
 }
diff --git a/metron-platform/metron-indexing/README.md b/metron-platform/metron-indexing/README.md
index a9a8ed9..46e511b 100644
--- a/metron-platform/metron-indexing/README.md
+++ b/metron-platform/metron-indexing/README.md
@@ -77,6 +77,20 @@
 ### Elasticsearch
 Metron comes with built-in templates for the default sensors for Elasticsearch. When adding a new sensor, it will be necessary to add a new template defining the output fields appropriately. In addition, there is a requirement for a field `alert` of type `nested` for Elasticsearch 2.x installs.  This is detailed at [Using Metron with Elasticsearch 2.x](../metron-elasticsearch/README.md#using-metron-with-elasticsearch-2x)
 
+### Solr
+
+Metron comes with built-in schemas for the default sensors for Solr.  When adding a new sensor, it will be necessary to add a new schema defining the output fields appropriately.  In addition, these fields are used internally by Metron and also required:
+
+* `<field name="guid" type="string" indexed="true" stored="true" required="true" multiValued="false" />`
+* `<field name="source.type" type="string" indexed="true" stored="true" />`
+* `<field name="timestamp" type="timestamp" indexed="true" stored="true" />`
+* `<field name="comments" type="string" indexed="true" stored="true" multiValued="true"/>`
+* `<field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>`
+
+The unique key should be set to `guid` by including `<uniqueKey>guid</uniqueKey>` in the schema.
+
+It is strongly suggested the `fieldTypes` match those in the built-in schemas.
+
 ### Indexing Configuration Examples
 For a given  sensor, the following scenarios would be indicated by
 the following cases:
@@ -196,7 +210,7 @@
 ### The `MetaAlertDao`
 
 The goal of meta alerts is to be able to group together a set of alerts while being able to transparently perform actions
-like searches, as if meta alerts were normal alerts.  `org.apache.metron.indexing.dao.MetaAlertDao` extends `IndexDao` and
+like searches, as if meta alerts were normal alerts.  `org.apache.metron.indexing.dao.metaalert.MetaAlertDao` extends `IndexDao` and
 enables several features: 
 * the ability to get all meta alerts associated with an alert
 * creation of a meta alert
diff --git a/metron-platform/metron-indexing/pom.xml b/metron-platform/metron-indexing/pom.xml
index 4d664fb..ace6e6c 100644
--- a/metron-platform/metron-indexing/pom.xml
+++ b/metron-platform/metron-indexing/pom.xml
@@ -143,7 +143,7 @@
         </dependency>
         <dependency>
             <groupId>org.mockito</groupId>
-            <artifactId>mockito-all</artifactId>
+            <artifactId>mockito-core</artifactId>
             <version>${global_mockito_version}</version>
             <scope>test</scope>
         </dependency>
@@ -197,6 +197,12 @@
             <type>test-jar</type>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.hamcrest</groupId>
+            <artifactId>hamcrest-core</artifactId>
+            <version>1.3</version>
+            <scope>test</scope>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/AccessConfig.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/AccessConfig.java
index c16401e..b1df46a 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/AccessConfig.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/AccessConfig.java
@@ -17,6 +17,7 @@
  */
 package org.apache.metron.indexing.dao;
 
+import java.util.function.Function;
 import org.apache.metron.hbase.TableProvider;
 
 import java.util.HashMap;
@@ -27,8 +28,10 @@
   private Integer maxSearchResults;
   private Integer maxSearchGroups;
   private Supplier<Map<String, Object>> globalConfigSupplier;
+  private Function<String, String> indexSupplier;
   private Map<String, String> optionalSettings = new HashMap<>();
   private TableProvider tableProvider = null;
+  private Boolean isKerberosEnabled = false;
 
   /**
    * @return A supplier which will return the current global config.
@@ -41,6 +44,14 @@
     this.globalConfigSupplier = globalConfigSupplier;
   }
 
+  public Function<String, String> getIndexSupplier() {
+    return indexSupplier;
+  }
+
+  public void setIndexSupplier(Function<String, String> indexSupplier) {
+    this.indexSupplier = indexSupplier;
+  }
+
   /**
    * @return The maximum number of search results.
    */
@@ -84,4 +95,15 @@
   public void setTableProvider(TableProvider tableProvider) {
     this.tableProvider = tableProvider;
   }
+
+  /**
+   * @return True if clients should be configured for Kerberos
+   */
+  public Boolean getKerberosEnabled() {
+    return isKerberosEnabled;
+  }
+
+  public void setKerberosEnabled(Boolean kerberosEnabled) {
+    isKerberosEnabled = kerberosEnabled;
+  }
 }
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ColumnMetadataDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/ColumnMetadataDao.java
similarity index 61%
rename from metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ColumnMetadataDao.java
rename to metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/ColumnMetadataDao.java
index 0393629..3610574 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ColumnMetadataDao.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/ColumnMetadataDao.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.metron.elasticsearch.dao;
+package org.apache.metron.indexing.dao;
 
 import org.apache.metron.indexing.dao.search.FieldType;
 
@@ -36,32 +36,4 @@
    * @throws IOException
    */
   Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException;
-
-  /**
-   * Finds the latest version of a set of base indices.  This can be used to find
-   * the latest 'bro' index, for example.
-   *
-   * Assuming the following indices exist...
-   *
-   *    [
-   *      'bro_index_2017.10.03.19'
-   *      'bro_index_2017.10.03.20',
-   *      'bro_index_2017.10.03.21',
-   *      'snort_index_2017.10.03.19',
-   *      'snort_index_2017.10.03.20',
-   *      'snort_index_2017.10.03.21'
-   *    ]
-   *
-   *  And the include indices are given as...
-   *
-   *    ['bro', 'snort']
-   *
-   * Then the latest indices are...
-   *
-   *    ['bro_index_2017.10.03.21', 'snort_index_2017.10.03.21']
-   *
-   * @param includeIndices The base names of the indices to include
-   * @return The latest version of a set of indices.
-   */
-  String[] getLatestIndices(List<String> includeIndices);
 }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/HBaseDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/HBaseDao.java
index 7f37a9a..6c646de 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/HBaseDao.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/HBaseDao.java
@@ -28,8 +28,7 @@
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.Optional;
-
-import com.google.common.hash.Hasher;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
@@ -38,6 +37,7 @@
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.common.utils.KeyUtil;
+import org.apache.metron.indexing.dao.search.AlertComment;
 import org.apache.metron.indexing.dao.search.FieldType;
 import org.apache.metron.indexing.dao.search.GetRequest;
 import org.apache.metron.indexing.dao.search.GroupRequest;
@@ -45,6 +45,7 @@
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 
 /**
@@ -210,7 +211,21 @@
     if(entry.getValue()!= null) {
       Map<String, Object> json = JSONUtils.INSTANCE.load(new String(entry.getValue()),
           JSONUtils.MAP_SUPPLIER);
+
+      // Make sure comments are in the proper format
+      @SuppressWarnings("unchecked")
+      List<Map<String, Object>> commentsMap = (List<Map<String, Object>>) json.get(COMMENTS_FIELD);
       try {
+        if (commentsMap != null) {
+          List<AlertComment> comments = new ArrayList<>();
+          for (Map<String, Object> commentMap : commentsMap) {
+            comments.add(new AlertComment(commentMap));
+          }
+          if (comments.size() > 0) {
+            json.put(COMMENTS_FIELD,
+                comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
+          }
+        }
         Key k = Key.fromBytes(result.getRow());
         return new Document(json, k.getGuid(), k.getSensorType(), ts);
       } catch (IOException e) {
@@ -262,4 +277,78 @@
   public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
     return null;
   }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = getLatest(request.getGuid(), request.getSensorType());
+    addCommentToAlert(request, latest);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    if (latest == null || latest.getDocument() == null) {
+      throw new IOException("Unable to add comment to document that doesn't exist");
+    }
+
+    List<Map<String, Object>> comments = (List<Map<String, Object>>) latest.getDocument()
+        .getOrDefault(COMMENTS_FIELD, new ArrayList<>());
+    List<Map<String, Object>> originalComments = new ArrayList<>(comments);
+
+    // Convert all comments back to raw JSON before updating.
+    List<Map<String, Object>> commentsMap = new ArrayList<>();
+    for (Map<String, Object> comment : originalComments) {
+      commentsMap.add(new AlertComment(comment).asMap());
+    }
+    commentsMap.add(new AlertComment(
+        request.getComment(),
+        request.getUsername(),
+        request.getTimestamp())
+        .asMap());
+
+    Document newVersion = new Document(latest);
+    newVersion.getDocument().put(COMMENTS_FIELD, commentsMap);
+    update(newVersion, Optional.empty());
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void removeCommentFromAlert(CommentAddRemoveRequest request)
+      throws IOException {
+    Document latest = getLatest(request.getGuid(), request.getSensorType());
+    removeCommentFromAlert(request, latest);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    if (latest == null || latest.getDocument() == null) {
+      throw new IOException("Unable to remove comment document that doesn't exist");
+    }
+    List<Map<String, Object>> commentMap = (List<Map<String, Object>>) latest.getDocument().get(COMMENTS_FIELD);
+    // Can't remove anything if there's nothing there
+    if (commentMap == null) {
+      return;
+    }
+    List<Map<String, Object>> originalComments = new ArrayList<>(commentMap);
+    List<AlertComment> comments = new ArrayList<>();
+    for (Map<String, Object> commentStr : originalComments) {
+      comments.add(new AlertComment(commentStr));
+    }
+
+    comments.remove(new AlertComment(request.getComment(), request.getUsername(), request.getTimestamp()));
+    Document newVersion = new Document(latest);
+    if (comments.size() > 0) {
+      List<Map<String, Object>> commentsAsMap = comments.stream().map(AlertComment::asMap)
+          .collect(Collectors.toList());
+      newVersion.getDocument().put(COMMENTS_FIELD, commentsAsMap);
+      update(newVersion, Optional.empty());
+    } else {
+      newVersion.getDocument().remove(COMMENTS_FIELD);
+    }
+
+    update(newVersion, Optional.empty());
+  }
 }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/IndexDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/IndexDao.java
index fe546bd..11b2ff0 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/IndexDao.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/IndexDao.java
@@ -17,151 +17,20 @@
  */
 package org.apache.metron.indexing.dao;
 
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-
-import org.apache.metron.common.utils.JSONUtils;
-import org.apache.metron.indexing.dao.search.FieldType;
-import org.apache.metron.indexing.dao.search.GetRequest;
-import org.apache.metron.indexing.dao.search.GroupRequest;
-import org.apache.metron.indexing.dao.search.GroupResponse;
-import org.apache.metron.indexing.dao.search.InvalidSearchException;
-import org.apache.metron.indexing.dao.search.SearchRequest;
-import org.apache.metron.indexing.dao.search.SearchResponse;
-import org.apache.metron.indexing.dao.update.Document;
-import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
-import org.apache.metron.indexing.dao.update.PatchRequest;
-import org.apache.metron.indexing.dao.update.ReplaceRequest;
+import org.apache.metron.indexing.dao.search.SearchDao;
+import org.apache.metron.indexing.dao.update.UpdateDao;
 
 /**
  * The IndexDao provides a common interface for retrieving and storing data in a variety of persistent stores.
  * Document reads and writes require a GUID and sensor type with an index being optional.
  */
-public interface IndexDao {
+public interface IndexDao extends UpdateDao, SearchDao, RetrieveLatestDao, ColumnMetadataDao {
 
-  /**
-   * Return search response based on the search request
-   *
-   * @param searchRequest
-   * @return
-   * @throws InvalidSearchException
-   */
-  SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException;
-
-  GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException;
+  String COMMENTS_FIELD = "comments";
 
   /**
    * Initialize the DAO with the AccessConfig object.
-   * @param config
+   * @param config The config to use for initialization
    */
   void init(AccessConfig config);
-
-  /**
-   * Return the latest version of a document given the GUID and the sensor type.
-   *
-   * @param guid The GUID for the document
-   * @param sensorType The sensor type of the document
-   * @return The Document matching or null if not available.
-   * @throws IOException
-   */
-  Document getLatest(String guid, String sensorType) throws IOException;
-
-  /**
-   * Return a list of the latest versions of documents given a list of GUIDs and sensor types.
-   *
-   * @param getRequests A list of get requests for documents
-   * @return A list of documents matching or an empty list in not available.
-   * @throws IOException
-   */
-  Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException;
-
-  /**
-   * Return the latest version of a document given a GetRequest.
-   * @param request The GetRequest which indicates the GUID and sensor type.
-   * @return Optionally the document (dependent upon existence in the index).
-   * @throws IOException
-   */
-  default Optional<Map<String, Object>> getLatestResult(GetRequest request) throws IOException {
-    Document ret = getLatest(request.getGuid(), request.getSensorType());
-    if(ret == null) {
-      return Optional.empty();
-    }
-    else {
-      return Optional.ofNullable(ret.getDocument());
-    }
-  }
-
-  /**
-   * Update a given Document and optionally the index where the document exists.  This is a full update,
-   * meaning the current document will be replaced if it exists or a new document will be created if it does
-   * not exist.  Partial updates are not supported in this method.
-   *
-   * @param update The document to replace from the index.
-   * @param index The index where the document lives.
-   * @throws IOException
-   */
-  void update(Document update, Optional<String> index) throws IOException;
-
-  /**
-   * Similar to the update method but accepts multiple documents and performs updates in batch.
-   *
-   * @param updates A map of the documents to update to the index where they live.
-   * @throws IOException
-   */
-  void batchUpdate(Map<Document, Optional<String>> updates) throws IOException;
-
-  /**
-   * Update a document in an index given a JSON Patch (see RFC 6902 at https://tools.ietf.org/html/rfc6902)
-   * @param request The patch request
-   * @param timestamp Optionally a timestamp to set. If not specified then current time is used.
-   * @throws OriginalNotFoundException If the original is not found, then it cannot be patched.
-   * @throws IOException
-   */
-  default void patch( PatchRequest request
-                    , Optional<Long> timestamp
-                    ) throws OriginalNotFoundException, IOException {
-    Document d = getPatchedDocument(request, timestamp);
-    update(d, Optional.ofNullable(request.getIndex()));
-  }
-
-  default Document getPatchedDocument(PatchRequest request
-      , Optional<Long> timestamp
-  ) throws OriginalNotFoundException, IOException {
-    Map<String, Object> latest = request.getSource();
-    if(latest == null) {
-      Document latestDoc = getLatest(request.getGuid(), request.getSensorType());
-      if(latestDoc != null && latestDoc.getDocument() != null) {
-        latest = latestDoc.getDocument();
-      }
-      else {
-        throw new OriginalNotFoundException("Unable to patch an document that doesn't exist and isn't specified.");
-      }
-    }
-    Map<String, Object> updated = JSONUtils.INSTANCE.applyPatch(request.getPatch(), latest);
-    return new Document(updated
-            , request.getGuid()
-            , request.getSensorType()
-            , timestamp.orElse(System.currentTimeMillis()));
-  }
-
-  /**
-   * Replace a document in an index.
-   * @param request The replacement request.
-   * @param timestamp The timestamp (optional) of the update.  If not specified, then current time will be used.
-   * @throws IOException
-   */
-  default void replace( ReplaceRequest request
-                      , Optional<Long> timestamp
-                      ) throws IOException {
-    Document d = new Document(request.getReplacement()
-                             , request.getGuid()
-                             , request.getSensorType()
-                             , timestamp.orElse(System.currentTimeMillis())
-                             );
-    update(d, Optional.ofNullable(request.getIndex()));
-  }
-
-  Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException;
 }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MetaAlertDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MetaAlertDao.java
deleted file mode 100644
index 93c791b..0000000
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MetaAlertDao.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.metron.indexing.dao;
-
-import java.util.List;
-import java.util.Optional;
-import java.io.IOException;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
-import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
-import org.apache.metron.indexing.dao.search.GetRequest;
-import org.apache.metron.indexing.dao.search.InvalidCreateException;
-import org.apache.metron.indexing.dao.search.InvalidSearchException;
-import org.apache.metron.indexing.dao.search.SearchResponse;
-
-/**
- * The MetaAlertDao exposes methods for interacting with meta alerts.  Meta alerts are objects that contain
- * alerts and summary statistics based on the scores of these alerts.  Meta alerts are returned in searches
- * just as alerts are and match based on the field values of child alerts.  If a child alert matches a search
- * the meta alert will be returned while the original child alert will not.  A meta alert also contains a
- * status field that controls it's inclusion in search results and a groups field that can be used to track
- * the groups a meta alert was created from.
- *
- * The structure of a meta alert is as follows:
- * {
- *   "guid": "meta alert guid",
- *   "timestamp": timestamp,
- *   "source:type": "metaalert",
- *   "alerts": [ array of child alerts ],
- *   "status": "active or inactive",
- *   "groups": [ array of group names ],
- *   "average": 10,
- *   "max": 10,
- *   "threat:triage:score": 30,
- *   "count": 3,
- *   "sum": 30,
- *   "min": 10,
- *   "median": 10
- * }
- *
- * A child alert that has been added to a meta alert will store the meta alert GUID in a "metaalerts" field.
- * This field is an array of meta alert GUIDs, meaning a child alert can be contained in multiple meta alerts.
- * Any update to a child alert will trigger an update to the meta alert so that the alert inside a meta alert
- * and the original alert will be kept in sync.
- *
- * Other fields can be added to a meta alert through the patch method on the IndexDao interface.  However, attempts
- * to directly change the "alerts" or "status" field will result in an exception.
- */
-public interface MetaAlertDao extends IndexDao {
-
-  String METAALERTS_INDEX = "metaalert_index";
-  String METAALERT_TYPE = "metaalert";
-  String METAALERT_FIELD = "metaalerts";
-  String METAALERT_DOC = METAALERT_TYPE + "_doc";
-  String THREAT_FIELD_DEFAULT = "threat.triage.score";
-  String THREAT_FIELD_PROPERTY = "threat.triage.score.field";
-  String THREAT_SORT_DEFAULT = "sum";
-  String ALERT_FIELD = "metron_alert";
-  String STATUS_FIELD = "status";
-  String GROUPS_FIELD = "groups";
-
-  /**
-   * Given an alert GUID, retrieve all associated meta alerts.
-   * @param guid The alert GUID to be searched for
-   * @return All meta alerts with a child alert having the GUID
-   * @throws InvalidSearchException If a problem occurs with the search
-   */
-  SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException;
-
-  /**
-   * Creates a meta alert from a list of child alerts.  The most recent version of each child alert is
-   * retrieved using the DAO abstractions.
-   *
-   * @param request A request object containing get requests for alerts to be added and a list of groups
-   * @return A response indicating success or failure along with the GUID of the new meta alert
-   * @throws InvalidCreateException If a malformed create request is provided
-   * @throws IOException If a problem occurs during communication
-   */
-  MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
-      throws InvalidCreateException, IOException;
-
-
-  /**
-   * Adds a list of alerts to an existing meta alert.  This will add each alert object to the "alerts" array in the meta alert
-   * and also add the meta alert GUID to each child alert's "metaalerts" array.  After alerts have been added the
-   * meta alert scores are recalculated.  Any alerts already in the meta alert are skipped and no updates are
-   * performed if all of the alerts are already in the meta alert.  The most recent version of each child alert is
-   * retrieved using the DAO abstractions.  Alerts cannot be added to an 'inactive' meta alert.
-   *
-   * @param metaAlertGuid The meta alert GUID
-   * @param getRequests Get requests for alerts to be added
-   * @return True or false depending on if any alerts were added
-   * @throws IOException
-   */
-  boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> getRequests) throws IOException;
-
-  /**
-   * Removes a list of alerts from an existing meta alert.  This will remove each alert object from the "alerts" array in the meta alert
-   * and also remove the meta alert GUID from each child alert's "metaalerts" array.  After alerts have been removed the
-   * meta alert scores are recalculated.  Any alerts not contained in the meta alert are skipped and no updates are
-   * performed if no alerts can be found in the meta alert.  Alerts cannot be removed from an 'inactive' meta alert.
-   *
-   * @param metaAlertGuid The meta alert GUID
-   * @param getRequests Get requests for alerts to be removed
-   * @return True or false depending on if any alerts were removed
-   * @throws IOException
-   */
-  boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> getRequests) throws IOException;
-
-  /**
-   * The meta alert status field can be set to either 'active' or 'inactive' and will control whether or not meta alerts
-   * (and child alerts) appear in search results.  An 'active' status will cause meta alerts to appear in search
-   * results instead of it's child alerts and an 'inactive' status will suppress the meta alert from search results
-   * with child alerts appearing in search results as normal.  A change to 'inactive' will cause the meta alert GUID to
-   * be removed from all it's child alert's "metaalerts" field.  A change back to 'active' will have the opposite effect.
-   *
-   * @param metaAlertGuid The GUID of the meta alert
-   * @param status A status value of 'active' or 'inactive'
-   * @return True or false depending on if the status was changed
-   * @throws IOException
-   */
-  boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status) throws IOException;
-
-  /**
-   * Initializes a Meta Alert DAO with default "sum" meta alert threat sorting.
-   * @param indexDao The DAO to wrap for our queries.
-   */
-  default void init(IndexDao indexDao) {
-    init(indexDao, Optional.empty());
-  }
-
-  /**
-   * Initializes a Meta Alert DAO.
-   * @param indexDao The DAO to wrap for our queries
-   * @param threatSort The aggregation to use as the threat field. E.g. "sum", "median", etc.
-   *     null is "sum"
-   */
-  void init(IndexDao indexDao, Optional<String> threatSort);
-}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MultiIndexDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MultiIndexDao.java
index dad08d6..420c775 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MultiIndexDao.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/MultiIndexDao.java
@@ -25,6 +25,7 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.function.Function;
 import java.util.stream.Collectors;
@@ -36,6 +37,7 @@
 import org.apache.metron.indexing.dao.search.InvalidSearchException;
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 
 public class MultiIndexDao implements IndexDao {
@@ -98,6 +100,51 @@
     return null;
   }
 
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = getLatest(request.getGuid(), request.getSensorType());
+    addCommentToAlert(request, latest);
+  }
+
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    List<String> exceptions =
+        indices.parallelStream().map(dao -> {
+          try {
+            dao.addCommentToAlert(request, latest);
+            return null;
+          } catch (Throwable e) {
+            return dao.getClass() + ": " + e.getMessage() + "\n" + ExceptionUtils.getStackTrace(e);
+          }
+        }).filter(Objects::nonNull).collect(Collectors.toList());
+    if (exceptions.size() > 0) {
+      throw new IOException(Joiner.on("\n").join(exceptions));
+    }
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = getLatest(request.getGuid(), request.getSensorType());
+    removeCommentFromAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    List<String> exceptions =
+        indices.parallelStream().map(dao -> {
+          try {
+            dao.removeCommentFromAlert(request, latest);
+            return null;
+          } catch (Throwable e) {
+            return dao.getClass() + ": " + e.getMessage() + "\n" + ExceptionUtils.getStackTrace(e);
+          }
+        }).filter(Objects::nonNull).collect(Collectors.toList());
+    if (exceptions.size() > 0) {
+      throw new IOException(Joiner.on("\n").join(exceptions));
+    }
+  }
+
   private static class DocumentContainer {
     private Optional<Document> d = Optional.empty();
     private Optional<Throwable> t = Optional.empty();
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/RetrieveLatestDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/RetrieveLatestDao.java
new file mode 100644
index 0000000..caf754c
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/RetrieveLatestDao.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+
+/**
+ * An base interface for other DAOs to extend.  All DAOs are expected to be able to retrieve
+ * Documents they've stored.
+ */
+public interface RetrieveLatestDao {
+
+  /**
+   * Return the latest version of a document given the GUID and the sensor type.
+   *
+   * @param guid The GUID for the document
+   * @param sensorType The sensor type of the document
+   * @return The Document matching or null if not available.
+   * @throws IOException If an error occurs retrieving the latest document.
+   */
+  Document getLatest(String guid, String sensorType) throws IOException;
+
+  /**
+   * Return a list of the latest versions of documents given a list of GUIDs and sensor types.
+   *
+   * @param getRequests A list of get requests for documents
+   * @return A list of documents matching or an empty list in not available.
+   * @throws IOException If an error occurs retrieving the latest documents.
+   */
+  Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException;
+
+  /**
+   * Return the latest version of a document given a GetRequest.
+   * @param request The GetRequest which indicates the GUID and sensor type.
+   * @return Optionally the document (dependent upon existence in the index).
+   * @throws IOException If an error occurs while retrieving the document.
+   */
+  default Optional<Map<String, Object>> getLatestResult(GetRequest request) throws IOException {
+    Document ret = getLatest(request.getGuid(), request.getSensorType());
+    if (ret == null) {
+      return Optional.empty();
+    } else {
+      return Optional.ofNullable(ret.getDocument());
+    }
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/DeferredMetaAlertIndexDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/DeferredMetaAlertIndexDao.java
new file mode 100644
index 0000000..1e5e723
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/DeferredMetaAlertIndexDao.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import org.apache.metron.indexing.dao.IndexDao;
+
+/**
+ * Interface for a DAO that is allowed to defer to a child Index DAO in order to perform tasks.
+ * An example is metaalerts deferring to a base DAO.
+ */
+public interface DeferredMetaAlertIndexDao {
+
+  IndexDao getIndexDao();
+
+  String getMetAlertSensorName();
+
+  String getMetaAlertIndex();
+
+  default String getThreatTriageField() {
+    return MetaAlertConstants.THREAT_FIELD_DEFAULT;
+  }
+
+  default String getThreatSort() {
+    return MetaAlertConstants.THREAT_SORT_DEFAULT;
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertAddRemoveRequest.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertAddRemoveRequest.java
index 6183d37..a14749b 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertAddRemoveRequest.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertAddRemoveRequest.java
@@ -14,7 +14,6 @@
  */
 package org.apache.metron.indexing.dao.metaalert;
 
-import java.util.Collection;
 import java.util.List;
 import org.apache.metron.indexing.dao.search.GetRequest;
 
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConfig.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConfig.java
new file mode 100644
index 0000000..b538bc2
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConfig.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.configuration.ConfigurationsUtils;
+
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Supplier;
+
+public abstract class MetaAlertConfig {
+  private String metaAlertIndex;
+  private String threatSort;
+  private Supplier<Map<String, Object>> globalConfigSupplier;
+
+  /**
+   * Simple object for storing and retrieving configs, primarily to make passing all the info to
+   * the sub DAOs easier.
+   * @param metaAlertIndex The metaalert index or collection we're using
+   * @param threatSort The sorting operation on the threat triage field
+   */
+  public MetaAlertConfig( String metaAlertIndex
+                        , String threatSort
+                        , Supplier<Map<String, Object>> globalConfigSupplier) {
+    this.metaAlertIndex = metaAlertIndex;
+    this.threatSort = threatSort;
+    this.globalConfigSupplier = globalConfigSupplier;
+  }
+
+  public String getMetaAlertIndex() {
+    return metaAlertIndex;
+  }
+
+  public void setMetaAlertIndex(String metaAlertIndex) {
+    this.metaAlertIndex = metaAlertIndex;
+  }
+
+  public String getThreatTriageField() {
+    Optional<Map<String, Object>> globalConfig = Optional.ofNullable(globalConfigSupplier.get());
+    if(!globalConfig.isPresent()) {
+      return getDefaultThreatTriageField();
+    }
+    return ConfigurationsUtils.getFieldName(globalConfig.get(), Constants.THREAT_SCORE_FIELD_PROPERTY, getDefaultThreatTriageField());
+  }
+
+  protected abstract String getDefaultThreatTriageField();
+
+  public String getThreatSort() {
+    return threatSort;
+  }
+
+  public void setThreatSort(String threatSort) {
+    this.threatSort = threatSort;
+  }
+
+  public String getSourceTypeField() {
+    Optional<Map<String, Object>> globalConfig = Optional.ofNullable(globalConfigSupplier.get());
+    if(!globalConfig.isPresent()) {
+      return getDefaultSourceTypeField();
+    }
+    return ConfigurationsUtils.getFieldName(globalConfig.get(), Constants.SENSOR_TYPE_FIELD_PROPERTY, getDefaultSourceTypeField());
+  }
+
+  protected abstract String getDefaultSourceTypeField();
+
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConstants.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConstants.java
new file mode 100644
index 0000000..daa5424
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertConstants.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+public class MetaAlertConstants {
+  public static String METAALERT_TYPE = "metaalert";
+  public static String METAALERT_FIELD = "metaalerts";
+  public static String METAALERT_DOC = METAALERT_TYPE + "_doc";
+  public static String THREAT_FIELD_DEFAULT = "threat:triage:score";
+  public static String THREAT_SORT_DEFAULT = "sum";
+  public static String ALERT_FIELD = "metron_alert";
+  public static String STATUS_FIELD = "status";
+  public static String GROUPS_FIELD = "groups";
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertDao.java
new file mode 100644
index 0000000..c9e6711
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertDao.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import java.util.Optional;
+import org.apache.metron.indexing.dao.IndexDao;
+
+/**
+ * The MetaAlertDao exposes methods for interacting with meta alerts.  Meta alerts are objects that contain
+ * alerts and summary statistics based on the scores of these alerts.  Meta alerts are returned in searches
+ * just as alerts are and match based on the field values of child alerts.  If a child alert matches a search
+ * the meta alert will be returned while the original child alert will not.  A meta alert also contains a
+ * status field that controls it's inclusion in search results and a groups field that can be used to track
+ * the groups a meta alert was created from.
+ *
+ * </p>
+ * The structure of a meta alert is as follows:
+ * {
+ *   "guid": "meta alert guid",
+ *   "timestamp": timestamp,
+ *   "source:type": "metaalert",
+ *   "alerts": [ array of child alerts ],
+ *   "status": "active or inactive",
+ *   "groups": [ array of group names ],
+ *   "average": 10,
+ *   "max": 10,
+ *   "threat:triage:score": 30,
+ *   "count": 3,
+ *   "sum": 30,
+ *   "min": 10,
+ *   "median": 10
+ * }
+ *
+ * </p>
+ * A child alert that has been added to a meta alert will store the meta alert GUID in a "metaalerts" field.
+ * This field is an array of meta alert GUIDs, meaning a child alert can be contained in multiple meta alerts.
+ * Any update to a child alert will trigger an update to the meta alert so that the alert inside a meta alert
+ * and the original alert will be kept in sync.
+ *
+ * </p>
+ * Other fields can be added to a meta alert through the patch method on the IndexDao interface.  However, attempts
+ * to directly change the "alerts" or "status" field will result in an exception.
+ */
+public interface MetaAlertDao extends MetaAlertSearchDao, MetaAlertUpdateDao, IndexDao {
+
+  /**
+   * Initializes a Meta Alert DAO with default "sum" meta alert threat sorting.
+   * @param indexDao The DAO to wrap for our queries.
+   */
+  default void init(IndexDao indexDao) {
+    init(indexDao, Optional.empty());
+  }
+
+  /**
+   * Initializes a Meta Alert DAO.
+   * @param indexDao The DAO to wrap for our queries
+   * @param threatSort The aggregation to use as the threat field. E.g. "sum", "median", etc.
+   *     null is "sum"
+   */
+  void init(IndexDao indexDao, Optional<String> threatSort);
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertRetrieveLatestDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertRetrieveLatestDao.java
new file mode 100644
index 0000000..1a0d2a0
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertRetrieveLatestDao.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+
+public interface MetaAlertRetrieveLatestDao extends RetrieveLatestDao {
+
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertSearchDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertSearchDao.java
new file mode 100644
index 0000000..e8b9f26
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertSearchDao.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchDao;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+
+public interface MetaAlertSearchDao extends SearchDao {
+
+  /**
+   * Given an alert GUID, retrieve all associated meta alerts.
+   * @param guid The alert GUID to be searched for
+   * @return All meta alerts with a child alert having the GUID
+   * @throws InvalidSearchException If a problem occurs with the search
+   */
+  SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException;
+
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertUpdateDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertUpdateDao.java
new file mode 100644
index 0000000..f4374b4
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaAlertUpdateDao.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.InvalidCreateException;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.metron.indexing.dao.update.UpdateDao;
+
+public interface MetaAlertUpdateDao extends UpdateDao {
+
+  String STATUS_PATH = "/" + MetaAlertConstants.STATUS_FIELD;
+  String ALERT_PATH = "/" + MetaAlertConstants.ALERT_FIELD;
+
+  /**
+   * Determines if a given patch request is allowed or not. By default patching the 'alert' or
+   * 'status' fields are not allowed, because they should be updated via the specific methods.
+   * @param request The patch request to examine
+   * @return True if patch can be performed, false otherwise
+   */
+  default boolean isPatchAllowed(PatchRequest request) {
+    if (request.getPatch() != null && !request.getPatch().isEmpty()) {
+      for (Map<String, Object> patch : request.getPatch()) {
+        Object pathObj = patch.get("path");
+        if (pathObj != null && pathObj instanceof String) {
+          String path = (String) pathObj;
+          if (STATUS_PATH.equals(path) || ALERT_PATH.equals(path)) {
+            return false;
+          }
+        }
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Creates a meta alert from a list of child alerts.  The most recent version of each child alert is
+   * retrieved using the DAO abstractions.
+   *
+   * @param request A request object containing get requests for alerts to be added and a list of groups
+   * @return A response indicating success or failure along with the GUID of the new meta alert
+   * @throws InvalidCreateException If a malformed create request is provided
+   * @throws IOException If a problem occurs during communication
+   */
+  MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
+      throws InvalidCreateException, IOException;
+
+  /**
+   * Adds alerts to a metaalert, based on a list of GetRequests provided for retrieval.
+   * @param metaAlertGuid The GUID of the metaalert to be given new children.
+   * @param alertRequests GetRequests for the appropriate alerts to add.
+   * @return True if metaalert is modified, false otherwise.
+   */
+  boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException;
+
+  /**
+   * Removes alerts from a metaalert
+   * @param metaAlertGuid The metaalert guid to be affected.
+   * @param alertRequests A list of GetReqests that will provide the alerts to remove
+   * @return True if there are updates, false otherwise
+   * @throws IOException If an error is thrown during retrieal.
+   */
+  boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException;
+
+  /**
+   * Removes a metaalert link from a given alert. An nonexistent link performs no change.
+   * @param metaAlertGuid The metaalert GUID to link.
+   * @param alert The alert to be linked to.
+   * @return True if the alert changed, false otherwise.
+   */
+  default boolean removeMetaAlertFromAlert(String metaAlertGuid, Document alert) {
+    List<String> metaAlertField = new ArrayList<>();
+    @SuppressWarnings("unchecked")
+    List<String> alertField = (List<String>) alert.getDocument()
+        .get(MetaAlertConstants.METAALERT_FIELD);
+    if (alertField != null) {
+      metaAlertField.addAll(alertField);
+    }
+    boolean metaAlertRemoved = metaAlertField.remove(metaAlertGuid);
+    if (metaAlertRemoved) {
+      alert.getDocument().put(MetaAlertConstants.METAALERT_FIELD, metaAlertField);
+    }
+    return metaAlertRemoved;
+  }
+
+  /**
+   * The meta alert status field can be set to either 'active' or 'inactive' and will control whether or not meta alerts
+   * (and child alerts) appear in search results.  An 'active' status will cause meta alerts to appear in search
+   * results instead of it's child alerts and an 'inactive' status will suppress the meta alert from search results
+   * with child alerts appearing in search results as normal.  A change to 'inactive' will cause the meta alert GUID to
+   * be removed from all it's child alert's "metaalerts" field.  A change back to 'active' will have the opposite effect.
+   *
+   * @param metaAlertGuid The GUID of the meta alert
+   * @param status A status value of 'active' or 'inactive'
+   * @return True or false depending on if the status was changed
+   * @throws IOException if an error occurs during the update.
+   */
+  boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
+      throws IOException;
+
+  /**
+   * Adds a metaalert link to a provided alert Document.  Adding an existing link does no change.
+   * @param metaAlertGuid The GUID to be added.
+   * @param alert The alert we're adding the link to.
+   * @return True if the alert is modified, false if not.
+   */
+  default boolean addMetaAlertToAlert(String metaAlertGuid, Document alert) {
+    List<String> metaAlertField = new ArrayList<>();
+    @SuppressWarnings("unchecked")
+    List<String> alertField = (List<String>) alert.getDocument()
+        .get(MetaAlertConstants.METAALERT_FIELD);
+    if (alertField != null) {
+      metaAlertField.addAll(alertField);
+    }
+
+    boolean metaAlertAdded = !metaAlertField.contains(metaAlertGuid);
+    if (metaAlertAdded) {
+      metaAlertField.add(metaAlertGuid);
+      alert.getDocument().put(MetaAlertConstants.METAALERT_FIELD, metaAlertField);
+    }
+    return metaAlertAdded;
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaScores.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaScores.java
index 07285d6..55b1aa0 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaScores.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/MetaScores.java
@@ -18,12 +18,14 @@
 
 package org.apache.metron.indexing.dao.metaalert;
 
-import org.apache.commons.math3.stat.descriptive.rank.Median;
-
+import java.util.ArrayList;
 import java.util.DoubleSummaryStatistics;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.apache.commons.math3.stat.descriptive.rank.Median;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
 
 public class MetaScores {
 
@@ -52,4 +54,50 @@
   public Map<String, Object> getMetaScores() {
     return metaScores;
   }
+
+  /**
+   * Calculate the meta alert scores for a Document. The scores are placed directly in the provided
+   * document.
+   * @param metaAlert The Document containing scores
+   */
+  @SuppressWarnings("unchecked")
+  public static void calculateMetaScores(Document metaAlert, String threatTriageField,
+      String threatSort) {
+    MetaScores metaScores = new MetaScores(new ArrayList<>());
+    List<Object> alertsRaw = ((List<Object>) metaAlert.getDocument()
+        .get(MetaAlertConstants.ALERT_FIELD));
+    if (alertsRaw != null && !alertsRaw.isEmpty()) {
+      ArrayList<Double> scores = new ArrayList<>();
+      for (Object alertRaw : alertsRaw) {
+        Map<String, Object> alert = (Map<String, Object>) alertRaw;
+        Double scoreNum = parseThreatField(alert.get(threatTriageField));
+        if (scoreNum != null) {
+          scores.add(scoreNum);
+        }
+      }
+      metaScores = new MetaScores(scores);
+    }
+
+    // add a summary (max, min, avg, ...) of all the threat scores from the child alerts
+    metaAlert.getDocument().putAll(metaScores.getMetaScores());
+
+    // add the overall threat score for the metaalert; one of the summary aggregations as defined
+    // by `threatSort`
+    Object threatScore = metaScores.getMetaScores().get(threatSort);
+
+    // add the threat score as a float; type needs to match the threat score field from each of
+    // the sensor indices
+    metaAlert.getDocument()
+        .put(threatTriageField, ConversionUtils.convert(threatScore, Float.class));
+  }
+
+  protected static Double parseThreatField(Object threatRaw) {
+    Double threat = null;
+    if (threatRaw instanceof Number) {
+      threat = ((Number) threatRaw).doubleValue();
+    } else if (threatRaw instanceof String) {
+      threat = Double.parseDouble((String) threatRaw);
+    }
+    return threat;
+  }
 }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDao.java
new file mode 100644
index 0000000..4d48075
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDao.java
@@ -0,0 +1,337 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert.lucene;
+
+import static org.apache.metron.common.Constants.GUID;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.UUID;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.configuration.ConfigurationsUtils;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertRetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertUpdateDao;
+import org.apache.metron.indexing.dao.metaalert.MetaScores;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.metron.indexing.dao.update.UpdateDao;
+
+public abstract class AbstractLuceneMetaAlertUpdateDao implements MetaAlertUpdateDao {
+
+  private UpdateDao updateDao;
+  private MetaAlertRetrieveLatestDao retrieveLatestDao;
+  private MetaAlertConfig config;
+
+  protected AbstractLuceneMetaAlertUpdateDao(
+      UpdateDao updateDao,
+      MetaAlertRetrieveLatestDao retrieveLatestDao,
+      MetaAlertConfig config) {
+    this.updateDao = updateDao;
+    this.retrieveLatestDao = retrieveLatestDao;
+    this.config = config;
+  }
+
+  public UpdateDao getUpdateDao() {
+    return updateDao;
+  }
+
+  public MetaAlertRetrieveLatestDao getRetrieveLatestDao() {
+    return retrieveLatestDao;
+  }
+
+  public MetaAlertConfig getConfig() {
+    return config;
+  }
+
+  /**
+   * Performs a patch operation on a document based on the result of @{link #isPatchAllowed(PatchRequest)}
+   *
+   * @param retrieveLatestDao DAO to retrieve the item to be patched
+   * @param request The patch request.
+   * @param timestamp Optionally a timestamp to set. If not specified then current time is used.
+   * @throws OriginalNotFoundException If no original document is found to patch.
+   * @throws IOException If an error occurs performing the patch.
+   */
+  @Override
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp)
+      throws OriginalNotFoundException, IOException {
+    if (isPatchAllowed(request)) {
+      updateDao.patch(retrieveLatestDao, request, timestamp);
+    } else {
+      throw new IllegalArgumentException(
+          "Meta alert patches are not allowed for /alert or /status paths.  "
+              + "Please use the add/remove alert or update status functions instead.");
+    }
+  }
+
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) {
+    throw new UnsupportedOperationException("Meta alerts do not allow for bulk updates");
+  }
+
+  /**
+   * Build the Document representing a meta alert to be created.
+   * @param alerts The Elasticsearch results for the meta alerts child documents
+   * @param groups The groups used to create this meta alert
+   * @return A Document representing the new meta alert
+   */
+  protected Document buildCreateDocument(Iterable<Document> alerts, List<String> groups,
+      String alertField) {
+    // Need to create a Document from the multiget. Scores will be calculated later
+    Map<String, Object> metaSource = new HashMap<>();
+    List<Map<String, Object>> alertList = new ArrayList<>();
+    for (Document alert : alerts) {
+      alertList.add(alert.getDocument());
+    }
+    metaSource.put(alertField, alertList);
+
+    // Add any meta fields
+    String guid = UUID.randomUUID().toString();
+    metaSource.put(GUID, guid);
+    metaSource.put(Constants.Fields.TIMESTAMP.getName(), System.currentTimeMillis());
+    metaSource.put(MetaAlertConstants.GROUPS_FIELD, groups);
+    metaSource.put(MetaAlertConstants.STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
+
+    return new Document(metaSource, guid, MetaAlertConstants.METAALERT_TYPE,
+        System.currentTimeMillis());
+  }
+
+  /**
+   * Builds the set of updates when alerts are removed from a meta alert
+   * @param metaAlert The meta alert to remove alerts from
+   * @param alerts The alert Documents to be removed
+   * @return The updates to be run
+   * @throws IOException If an error is thrown.
+   */
+  @SuppressWarnings("unchecked")
+  protected Map<Document, Optional<String>> buildRemoveAlertsFromMetaAlert(Document metaAlert,
+      Iterable<Document> alerts)
+      throws IOException {
+    Map<Document, Optional<String>> updates = new HashMap<>();
+
+    List<String> alertGuids = new ArrayList<>();
+    for (Document alert : alerts) {
+      alertGuids.add(alert.getGuid());
+    }
+    List<Map<String, Object>> alertsBefore = new ArrayList<>();
+    Map<String, Object> documentBefore = metaAlert.getDocument();
+    if (documentBefore.containsKey(MetaAlertConstants.ALERT_FIELD)) {
+      alertsBefore
+          .addAll((List<Map<String, Object>>) documentBefore.get(MetaAlertConstants.ALERT_FIELD));
+    }
+    boolean metaAlertUpdated = removeAlertsFromMetaAlert(metaAlert, alertGuids);
+    if (metaAlertUpdated) {
+      List<Map<String, Object>> alertsAfter = (List<Map<String, Object>>) metaAlert.getDocument()
+          .get(MetaAlertConstants.ALERT_FIELD);
+      if (alertsAfter.size() < alertsBefore.size() && alertsAfter.size() == 0) {
+        throw new IllegalStateException("Removing these alerts will result in an empty meta alert.  Empty meta alerts are not allowed.");
+      }
+      MetaScores
+          .calculateMetaScores(metaAlert, config.getThreatTriageField(), config.getThreatSort());
+      updates.put(metaAlert, Optional.of(config.getMetaAlertIndex()));
+      for (Document alert : alerts) {
+        if (removeMetaAlertFromAlert(metaAlert.getGuid(), alert)) {
+          updates.put(alert, Optional.empty());
+        }
+      }
+    }
+    return updates;
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+    Document metaAlert = retrieveLatestDao
+        .getLatest(metaAlertGuid, MetaAlertConstants.METAALERT_TYPE);
+    if (metaAlert == null) {
+      return false;
+    }
+    if (MetaAlertStatus.ACTIVE.getStatusString()
+        .equals(metaAlert.getDocument().get(MetaAlertConstants.STATUS_FIELD))) {
+      Iterable<Document> alerts = retrieveLatestDao.getAllLatest(alertRequests);
+      Map<Document, Optional<String>> updates = buildRemoveAlertsFromMetaAlert(metaAlert, alerts);
+      update(updates);
+      return updates.size() != 0;
+    } else {
+      throw new IllegalStateException("Removing alerts from an INACTIVE meta alert is not allowed");
+    }
+  }
+
+  /**
+   * Removes a given set of alerts from a given alert. AlertGuids that are not found are ignored.
+   * @param metaAlert The metaalert to be mutated.
+   * @param alertGuids The alerts to remove from the metaaelrt.
+   * @return True if the metaAlert changed, false otherwise.
+   */
+  protected boolean removeAlertsFromMetaAlert(Document metaAlert, Collection<String> alertGuids) {
+    // If we don't have child alerts or nothing is being removed, immediately return false.
+    if (!metaAlert.getDocument().containsKey(MetaAlertConstants.ALERT_FIELD)
+        || alertGuids.size() == 0) {
+      return false;
+    }
+
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    int previousSize = currentAlerts.size();
+    // Only remove an alert if it is in the meta alert
+    currentAlerts.removeIf(currentAlert -> alertGuids.contains(currentAlert.get(GUID)));
+    return currentAlerts.size() != previousSize;
+  }
+
+  @Override
+  public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
+      throws IOException {
+    Document metaAlert = retrieveLatestDao
+        .getLatest(metaAlertGuid, MetaAlertConstants.METAALERT_TYPE);
+    String currentStatus = (String) metaAlert.getDocument().get(MetaAlertConstants.STATUS_FIELD);
+    boolean metaAlertUpdated = !status.getStatusString().equals(currentStatus);
+    if (metaAlertUpdated) {
+      List<GetRequest> getRequests = new ArrayList<>();
+      @SuppressWarnings("unchecked")
+      List<Map<String, Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument()
+          .get(MetaAlertConstants.ALERT_FIELD);
+      currentAlerts.stream()
+          .forEach(currentAlert -> getRequests.add(new GetRequest((String) currentAlert.get(GUID),
+              (String) currentAlert.get(config.getSourceTypeField()))));
+      Iterable<Document> alerts = retrieveLatestDao.getAllLatest(getRequests);
+      Map<Document, Optional<String>> updates = buildStatusChangeUpdates(metaAlert, alerts, status);
+      update(updates);
+    }
+    return metaAlertUpdated;
+  }
+
+  /**
+   * Given a Metaalert and a status change, builds the set of updates to be run.
+   * @param metaAlert The metaalert to have status changed
+   * @param alerts The alerts to change status for
+   * @param status The status to change to
+   * @return The updates to be run
+   */
+  protected Map<Document, Optional<String>> buildStatusChangeUpdates(Document metaAlert,
+      Iterable<Document> alerts,
+      MetaAlertStatus status) {
+    metaAlert.getDocument().put(MetaAlertConstants.STATUS_FIELD, status.getStatusString());
+
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    updates.put(metaAlert, Optional.of(config.getMetaAlertIndex()));
+
+    for (Document alert : alerts) {
+      boolean metaAlertAdded = false;
+      boolean metaAlertRemoved = false;
+      // If we're making it active add add the meta alert guid for every alert.
+      if (MetaAlertStatus.ACTIVE.equals(status)) {
+        metaAlertAdded = addMetaAlertToAlert(metaAlert.getGuid(), alert);
+      }
+      // If we're making it inactive, remove the meta alert guid from every alert.
+      if (MetaAlertStatus.INACTIVE.equals(status)) {
+        metaAlertRemoved = removeMetaAlertFromAlert(metaAlert.getGuid(), alert);
+      }
+      if (metaAlertAdded || metaAlertRemoved) {
+        updates.put(alert, Optional.empty());
+      }
+    }
+    return updates;
+  }
+
+  /**
+   * Builds the updates to be run based on a given metaalert and a set of new alerts for the it.
+   * @param metaAlert The base metaalert we're building updates for
+   * @param alerts The alerts being added
+   * @return The set of resulting updates.
+   */
+  protected Map<Document, Optional<String>> buildAddAlertToMetaAlertUpdates(Document metaAlert,
+      Iterable<Document> alerts) {
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    boolean metaAlertUpdated = addAlertsToMetaAlert(metaAlert, alerts);
+    if (metaAlertUpdated) {
+      MetaScores
+          .calculateMetaScores(metaAlert, config.getThreatTriageField(), config.getThreatSort());
+      updates.put(metaAlert, Optional.of(config.getMetaAlertIndex()));
+      for (Document alert : alerts) {
+        if (addMetaAlertToAlert(metaAlert.getGuid(), alert)) {
+          updates.put(alert, Optional.empty());
+        }
+      }
+    }
+    return updates;
+  }
+
+  /**
+   * Adds the provided alerts to a given metaalert.
+   * @param metaAlert The metaalert to be given new children.
+   * @param alerts The alerts to be added as children
+   * @return True if metaalert is modified, false otherwise.
+   */
+  protected boolean addAlertsToMetaAlert(Document metaAlert, Iterable<Document> alerts) {
+    boolean alertAdded = false;
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> currentAlerts = (List<Map<String, Object>>) metaAlert.getDocument()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    if (currentAlerts == null) {
+      currentAlerts = new ArrayList<>();
+      metaAlert.getDocument().put(MetaAlertConstants.ALERT_FIELD, currentAlerts);
+    }
+    Set<String> currentAlertGuids = currentAlerts.stream().map(currentAlert ->
+        (String) currentAlert.get(GUID)).collect(Collectors.toSet());
+    for (Document alert : alerts) {
+      String alertGuid = alert.getGuid();
+      // Only add an alert if it isn't already in the meta alert
+      if (!currentAlertGuids.contains(alertGuid)) {
+        currentAlerts.add(alert.getDocument());
+        alertAdded = true;
+      }
+    }
+    return alertAdded;
+  }
+
+  /**
+   * Calls the single update variant if there's only one update, otherwise calls batch.
+   * MetaAlerts may defer to an implementation specific IndexDao.
+   * @param updates The list of updates to run
+   * @throws IOException If there's an update error
+   */
+  protected void update(Map<Document, Optional<String>> updates)
+      throws IOException {
+    if (updates.size() == 1) {
+      Entry<Document, Optional<String>> singleUpdate = updates.entrySet().iterator().next();
+      updateDao.update(singleUpdate.getKey(), singleUpdate.getValue());
+    } else if (updates.size() > 1) {
+      updateDao.batchUpdate(updates);
+    } // else we have no updates, so don't do anything
+  }
+
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/AlertComment.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/AlertComment.java
new file mode 100644
index 0000000..04aac60
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/AlertComment.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.search;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
+public class AlertComment {
+
+  private static final String COMMENT_FIELD = "comment";
+  private static final String COMMENT_USERNAME_FIELD = "username";
+  private static final String COMMENT_TIMESTAMP_FIELD = "timestamp";
+  private String comment;
+  private String username;
+  private long timestamp;
+
+  private JSONParser parser = new JSONParser();
+
+  public AlertComment(String comment, String username, long timestamp) {
+    this.comment = comment;
+    this.username = username;
+    this.timestamp = timestamp;
+  }
+
+  public AlertComment(String json) throws ParseException {
+    JSONObject parsed = (JSONObject) parser.parse(json);
+    this.comment = (String) parsed.get(COMMENT_FIELD);
+    this.username = (String) parsed.get(COMMENT_USERNAME_FIELD);
+    this.timestamp = (long) parsed.get(COMMENT_TIMESTAMP_FIELD);
+  }
+
+  public AlertComment(Map<String, Object> comment) {
+    this.comment = (String) comment.get(COMMENT_FIELD);
+    this.username = (String) comment.get(COMMENT_USERNAME_FIELD);
+    this.timestamp = (long) comment.get(COMMENT_TIMESTAMP_FIELD);
+  }
+
+  public String getComment() {
+    return comment;
+  }
+
+  public String getUsername() {
+    return username;
+  }
+
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  @SuppressWarnings("unchecked")
+  public String asJson() {
+    return asJSONObject().toJSONString();
+  }
+
+  @SuppressWarnings("unchecked")
+  public Map<String, Object> asMap() {
+    Map<String, Object> map = new HashMap<>();
+    map.put(COMMENT_FIELD, comment);
+    map.put(COMMENT_USERNAME_FIELD, username);
+    map.put(COMMENT_TIMESTAMP_FIELD, timestamp);
+    return map;
+  }
+
+  @SuppressWarnings("unchecked")
+  public JSONObject asJSONObject() {
+    JSONObject json = new JSONObject();
+    json.put(COMMENT_FIELD, comment);
+    json.put(COMMENT_USERNAME_FIELD, username);
+    json.put(COMMENT_TIMESTAMP_FIELD, timestamp);
+    return json;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    AlertComment that = (AlertComment) o;
+
+    if (getTimestamp() != that.getTimestamp()) {
+      return false;
+    }
+    if (getComment() != null ? !getComment().equals(that.getComment())
+        : that.getComment() != null) {
+      return false;
+    }
+    return getUsername() != null ? getUsername().equals(that.getUsername())
+        : that.getUsername() == null;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = getComment() != null ? getComment().hashCode() : 0;
+    result = 31 * result + (getUsername() != null ? getUsername().hashCode() : 0);
+    result = 31 * result + (int) (getTimestamp() ^ (getTimestamp() >>> 32));
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "AlertComment{" +
+        "comment='" + comment + '\'' +
+        ", username='" + username + '\'' +
+        ", timestamp=" + timestamp +
+        '}';
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchDao.java
new file mode 100644
index 0000000..582f1ef
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchDao.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.indexing.dao.search;
+
+public interface SearchDao {
+
+  /**
+   * Return search response based on the search request
+   *
+   * @param searchRequest The request defining the search parameters.
+   * @return A response containing the results of the search.
+   * @throws InvalidSearchException If the search request is malformed.
+   */
+  SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException;
+
+  /**
+   * Return group response based on the group request
+   * @param groupRequest The request defining the grouping parameters.
+   * @return A response containing the results of the grouping operation.
+   * @throws InvalidSearchException If the grouping request is malformed.
+   */
+  GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException;
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchResponse.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchResponse.java
index aad489a..b4dfab7 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchResponse.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/search/SearchResponse.java
@@ -18,7 +18,6 @@
 package org.apache.metron.indexing.dao.search;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
-
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -61,4 +60,36 @@
   public void setFacetCounts(Map<String, Map<String, Long>> facetCounts) {
     this.facetCounts = facetCounts;
   }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    SearchResponse that = (SearchResponse) o;
+
+    return getTotal() == that.getTotal() &&
+            (getResults() != null ? getResults().equals(that.getResults()) : that.getResults() != null) &&
+            (getFacetCounts() != null ? getFacetCounts().equals(that.getFacetCounts()) : that.getFacetCounts() != null);
+  }
+
+  @Override
+  public int hashCode() {
+    int result = 31 * (int) getTotal() + (getResults() != null ? getResults().hashCode() : 0);
+    result = 31 * result + (getFacetCounts() != null ? getFacetCounts().hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "SearchResponse{" +
+        "total=" + total +
+        ", results=" + results +
+        ", facetCounts=" + facetCounts +
+        '}';
+  }
 }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/CommentAddRemoveRequest.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/CommentAddRemoveRequest.java
new file mode 100644
index 0000000..8e8bde7
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/CommentAddRemoveRequest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.update;
+
+public class CommentAddRemoveRequest {
+  private String guid;
+  private String sensorType;
+  private String comment;
+  private String username;
+  private long timestamp;
+
+  public String getGuid() {
+    return guid;
+  }
+
+  public void setGuid(String guid) {
+    this.guid = guid;
+  }
+
+  public String getSensorType() {
+    return sensorType;
+  }
+
+  public void setSensorType(String sensorType) {
+    this.sensorType = sensorType;
+  }
+
+  public String getComment() {
+    return comment;
+  }
+
+  public void setComment(String comment) {
+    this.comment = comment;
+  }
+
+  public String getUsername() {
+    return username;
+  }
+
+  public void setUsername(String username) {
+    this.username = username;
+  }
+
+  public long getTimestamp() {
+    return timestamp;
+  }
+
+  public void setTimestamp(long timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  @Override
+  public String toString() {
+    return "CommentAddRemoveRequest{" +
+        "guid='" + guid + '\'' +
+        ", sensorType='" + sensorType + '\'' +
+        ", comment='" + comment + '\'' +
+        ", username='" + username + '\'' +
+        ", timestamp=" + timestamp +
+        '}';
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/Document.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/Document.java
index 6f2f779..3686b19 100644
--- a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/Document.java
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/Document.java
@@ -18,10 +18,10 @@
 
 package org.apache.metron.indexing.dao.update;
 
-import org.apache.metron.common.utils.JSONUtils;
-
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.Map;
+import org.apache.metron.common.utils.JSONUtils;
 
 public class Document {
   Long timestamp;
@@ -36,7 +36,6 @@
     setSensorType(sensorType);
   }
 
-
   public Document(String document, String guid, String sensorType, Long timestamp) throws IOException {
     this(convertDoc(document), guid, sensorType, timestamp);
   }
@@ -45,6 +44,15 @@
     this( document, guid, sensorType, null);
   }
 
+  /**
+   * Copy constructor
+   * @param other The document to be copied.
+   */
+  public Document(Document other) {
+    this(new HashMap<>(other.getDocument()), other.getGuid(), other.getSensorType(),
+        other.getTimestamp());
+  }
+
   private static Map<String, Object> convertDoc(String document) throws IOException {
       return JSONUtils.INSTANCE.load(document, JSONUtils.MAP_SUPPLIER);
   }
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/UpdateDao.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/UpdateDao.java
new file mode 100644
index 0000000..b5f38e4
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/dao/update/UpdateDao.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.indexing.dao.update;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+
+public interface UpdateDao {
+
+  /**
+   * Update a given Document and optionally the index where the document exists.  This is a full
+   * update, meaning the current document will be replaced if it exists or a new document will be
+   * created if it does not exist.  Partial updates are not supported in this method.
+   *
+   * @param update The document to replace from the index.
+   * @param index The index where the document lives.
+   * @throws IOException If an error occurs during the update.
+   */
+  void update(Document update, Optional<String> index) throws IOException;
+
+  /**
+   * Similar to the update method but accepts multiple documents and performs updates in batch.
+   *
+   * @param updates A map of the documents to update to the index where they live.
+   * @throws IOException If an error occurs during the updates.
+   */
+  void batchUpdate(Map<Document, Optional<String>> updates) throws IOException;
+
+  void addCommentToAlert(CommentAddRemoveRequest request) throws IOException;
+
+  void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException;
+
+  void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException;
+
+  void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException;
+
+
+  /**
+   * Update a document in an index given a JSON Patch (see RFC 6902 at
+   * https://tools.ietf.org/html/rfc6902)
+   * @param request The patch request
+   * @param timestamp Optionally a timestamp to set. If not specified then current time is used.
+   * @throws OriginalNotFoundException If the original is not found, then it cannot be patched.
+   * @throws IOException If an error occurs while patching.
+   */
+  default void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request
+      , Optional<Long> timestamp
+  ) throws OriginalNotFoundException, IOException {
+    Document d = getPatchedDocument(retrieveLatestDao, request, timestamp);
+    update(d, Optional.ofNullable(request.getIndex()));
+  }
+
+  default Document getPatchedDocument(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp
+  ) throws OriginalNotFoundException, IOException {
+    Map<String, Object> latest = request.getSource();
+    if (latest == null) {
+      Document latestDoc = retrieveLatestDao.getLatest(request.getGuid(), request.getSensorType());
+      if (latestDoc != null && latestDoc.getDocument() != null) {
+        latest = latestDoc.getDocument();
+      } else {
+        throw new OriginalNotFoundException(
+            "Unable to patch an document that doesn't exist and isn't specified.");
+      }
+    }
+
+    Map<String, Object> updated = JSONUtils.INSTANCE.applyPatch(request.getPatch(), latest);
+    return new Document(updated,
+        request.getGuid(),
+        request.getSensorType(),
+        timestamp.orElse(System.currentTimeMillis()));
+  }
+
+  /**
+   * Replace a document in an index.
+   * @param request The replacement request.
+   * @param timestamp The timestamp (optional) of the update.  If not specified, then current time will be used.
+   * @throws IOException If an error occurs during replacement.
+   */
+  default void replace(ReplaceRequest request, Optional<Long> timestamp)
+      throws IOException {
+    Document d = new Document(request.getReplacement(),
+        request.getGuid(),
+        request.getSensorType(),
+        timestamp.orElse(System.currentTimeMillis())
+    );
+    update(d, Optional.ofNullable(request.getIndex()));
+  }
+}
diff --git a/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/util/IndexingCacheUtil.java b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/util/IndexingCacheUtil.java
new file mode 100644
index 0000000..86a0642
--- /dev/null
+++ b/metron-platform/metron-indexing/src/main/java/org/apache/metron/indexing/util/IndexingCacheUtil.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.util;
+
+import java.util.Map;
+import java.util.function.Function;
+import org.apache.metron.common.configuration.IndexingConfigurations;
+import org.apache.metron.common.zookeeper.ConfigurationsCache;
+
+public class IndexingCacheUtil {
+
+  @SuppressWarnings("unchecked")
+  public static Function<String, String> getIndexLookupFunction(ConfigurationsCache cache, String writerName) {
+    return sensorType -> {
+      String indexingTopic = sensorType;
+      IndexingConfigurations indexingConfigs = cache.get( IndexingConfigurations.class);
+      Map<String, Object> indexingSensorConfigs = indexingConfigs.getSensorIndexingConfig(sensorType);
+      if (indexingSensorConfigs != null) {
+        Map<String, Object> writerConfigs = (Map<String, Object>) indexingSensorConfigs.get(writerName);
+        if (writerConfigs != null) {
+          indexingTopic = (String) writerConfigs.getOrDefault(IndexingConfigurations.INDEX_CONF, indexingTopic);
+        }
+      }
+      return indexingTopic;
+    };
+  }
+}
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryDao.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryDao.java
index d6e1521..e306567 100644
--- a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryDao.java
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryDao.java
@@ -35,6 +35,7 @@
 import org.apache.metron.indexing.dao.search.SearchResult;
 import org.apache.metron.indexing.dao.search.SortField;
 import org.apache.metron.indexing.dao.search.SortOrder;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 
 import java.io.IOException;
@@ -291,6 +292,22 @@
     return indexColumnMetadata;
   }
 
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) {
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) {
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) {
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) {
+  }
+
   public static void setColumnMetadata(Map<String, Map<String, FieldType>> columnMetadata) {
     Map<String, Map<String, FieldType>> columnMetadataMap = new HashMap<>();
     for (Map.Entry<String, Map<String, FieldType>> e: columnMetadata.entrySet()) {
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryMetaAlertDao.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryMetaAlertDao.java
index 5ab5c48..cb8837b 100644
--- a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryMetaAlertDao.java
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/InMemoryMetaAlertDao.java
@@ -32,8 +32,10 @@
 import java.util.stream.Collectors;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
 import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
 import org.apache.metron.indexing.dao.metaalert.MetaScores;
 import org.apache.metron.indexing.dao.search.FieldType;
@@ -45,6 +47,7 @@
 import org.apache.metron.indexing.dao.search.SearchRequest;
 import org.apache.metron.indexing.dao.search.SearchResponse;
 import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
 import org.apache.metron.indexing.dao.update.Document;
 import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
 import org.apache.metron.indexing.dao.update.PatchRequest;
@@ -57,6 +60,7 @@
   public static Map<String, Collection<String>> METAALERT_STORE = new HashMap<>();
 
   private IndexDao indexDao;
+  private int pageSize = 10;
 
   /**
    * {
@@ -96,6 +100,7 @@
     // Ignore threatSort for test.
   }
 
+
   @Override
   public Document getLatest(String guid, String sensorType) throws IOException {
     return indexDao.getLatest(guid, sensorType);
@@ -112,7 +117,7 @@
   }
 
   @Override
-  public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
+  public void batchUpdate(Map<Document, Optional<String>> updates) {
     throw new UnsupportedOperationException("InMemoryMetaAlertDao can't do bulk updates");
   }
 
@@ -123,14 +128,31 @@
   }
 
   @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) {
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) {
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) {
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) {
+  }
+
+  @Override
   public Optional<Map<String, Object>> getLatestResult(GetRequest request) throws IOException {
     return indexDao.getLatestResult(request);
   }
 
   @Override
-  public void patch(PatchRequest request, Optional<Long> timestamp)
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp)
       throws OriginalNotFoundException, IOException {
-    indexDao.patch(request, timestamp);
+    indexDao.patch(retrieveLatestDao, request, timestamp);
   }
 
   @Override
@@ -153,7 +175,7 @@
   @SuppressWarnings("unchecked")
   @Override
   public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
-      throws InvalidCreateException, IOException {
+      throws InvalidCreateException {
     List<GetRequest> alertRequests = request.getAlerts();
     if (alertRequests.isEmpty()) {
       MetaAlertCreateResponse response = new MetaAlertCreateResponse();
@@ -162,12 +184,13 @@
     }
     // Build meta alert json.  Give it a reasonable GUID
     JSONObject metaAlert = new JSONObject();
-    String metaAlertGuid = "meta_" + (InMemoryDao.BACKING_STORE.get(MetaAlertDao.METAALERTS_INDEX).size() + 1);
+    String metaAlertGuid =
+        "meta_" + (InMemoryDao.BACKING_STORE.get(getMetaAlertIndex()).size() + 1);
     metaAlert.put(GUID, metaAlertGuid);
 
     JSONArray groupsArray = new JSONArray();
     groupsArray.addAll(request.getGroups());
-    metaAlert.put(MetaAlertDao.GROUPS_FIELD, groupsArray);
+    metaAlert.put(MetaAlertConstants.GROUPS_FIELD, groupsArray);
 
     // Retrieve the alert for each guid
     // For the purpose of testing, we're just using guids for the alerts field and grabbing the scores.
@@ -183,7 +206,8 @@
         List<SearchResult> searchResults = searchResponse.getResults();
         if (searchResults.size() > 1) {
           throw new InvalidCreateException(
-              "Found more than one result for: " + alertRequest.getGuid() + ". Values: " + searchResults
+              "Found more than one result for: " + alertRequest.getGuid() + ". Values: "
+                  + searchResults
           );
         }
 
@@ -191,7 +215,9 @@
           SearchResult result = searchResults.get(0);
           alertArray.add(result.getSource());
           Double threatScore = Double
-              .parseDouble(result.getSource().getOrDefault(THREAT_FIELD_DEFAULT, "0").toString());
+              .parseDouble(
+                  result.getSource().getOrDefault(MetaAlertConstants.THREAT_FIELD_DEFAULT, "0")
+                      .toString());
 
           threatScores.add(threatScore);
         }
@@ -201,12 +227,12 @@
       alertGuids.add(alertRequest.getGuid());
     }
 
-    metaAlert.put(MetaAlertDao.ALERT_FIELD, alertArray);
+    metaAlert.put(MetaAlertConstants.ALERT_FIELD, alertArray);
     metaAlert.putAll(new MetaScores(threatScores).getMetaScores());
-    metaAlert.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
+    metaAlert.put(MetaAlertConstants.STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
 
     // Add the alert to the store, but make sure not to overwrite existing results
-    InMemoryDao.BACKING_STORE.get(MetaAlertDao.METAALERTS_INDEX).add(metaAlert.toJSONString());
+    InMemoryDao.BACKING_STORE.get(getMetaAlertIndex()).add(metaAlert.toJSONString());
 
     METAALERT_STORE.put(metaAlertGuid, new HashSet<>(alertGuids));
 
@@ -217,12 +243,13 @@
   }
 
   @Override
-  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests) throws IOException {
+  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests) {
     Collection<String> currentAlertGuids = METAALERT_STORE.get(metaAlertGuid);
     if (currentAlertGuids == null) {
       return false;
     }
-    Collection<String> alertGuids = alertRequests.stream().map(GetRequest::getGuid).collect(Collectors.toSet());
+    Collection<String> alertGuids = alertRequests.stream().map(GetRequest::getGuid)
+        .collect(Collectors.toSet());
     boolean added = currentAlertGuids.addAll(alertGuids);
     if (added) {
       METAALERT_STORE.put(metaAlertGuid, currentAlertGuids);
@@ -231,12 +258,13 @@
   }
 
   @Override
-  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests) throws IOException {
+  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests) {
     Collection<String> currentAlertGuids = METAALERT_STORE.get(metaAlertGuid);
     if (currentAlertGuids == null) {
       return false;
     }
-    Collection<String> alertGuids = alertRequests.stream().map(GetRequest::getGuid).collect(Collectors.toSet());
+    Collection<String> alertGuids = alertRequests.stream().map(GetRequest::getGuid)
+        .collect(Collectors.toSet());
     boolean removed = currentAlertGuids.removeAll(alertGuids);
     if (removed) {
       METAALERT_STORE.put(metaAlertGuid, currentAlertGuids);
@@ -249,16 +277,17 @@
   public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
       throws IOException {
     boolean statusChanged = false;
-    List<String> metaAlerts = InMemoryDao.BACKING_STORE.get(MetaAlertDao.METAALERTS_INDEX);
-    for (String metaAlert: metaAlerts) {
+    List<String> metaAlerts = InMemoryDao.BACKING_STORE.get(getMetaAlertIndex());
+    for (String metaAlert : metaAlerts) {
       JSONObject metaAlertJSON = JSONUtils.INSTANCE.load(metaAlert, JSONObject.class);
       if (metaAlertGuid.equals(metaAlertJSON.get(GUID))) {
-        statusChanged = !status.getStatusString().equals(metaAlertJSON.get(STATUS_FIELD));
+        statusChanged = !status.getStatusString()
+            .equals(metaAlertJSON.get(MetaAlertConstants.STATUS_FIELD));
         if (statusChanged) {
-          metaAlertJSON.put(STATUS_FIELD, status.getStatusString());
+          metaAlertJSON.put(MetaAlertConstants.STATUS_FIELD, status.getStatusString());
           metaAlerts.remove(metaAlert);
           metaAlerts.add(metaAlertJSON.toJSONString());
-          InMemoryDao.BACKING_STORE.put(MetaAlertDao.METAALERTS_INDEX, metaAlerts);
+          InMemoryDao.BACKING_STORE.put(getMetaAlertIndex(), metaAlerts);
         }
         break;
       }
@@ -266,9 +295,24 @@
     return statusChanged;
   }
 
+  public int getPageSize() {
+    return pageSize;
+  }
+
+  public void setPageSize(int pageSize) {
+    this.pageSize = pageSize;
+  }
+
+  public String getMetAlertSensorName() {
+    return MetaAlertConstants.METAALERT_TYPE;
+  }
+
+  public String getMetaAlertIndex() {
+    return "metaalert_index";
+  }
+
   public static void clear() {
     InMemoryDao.clear();
     METAALERT_STORE.clear();
   }
-
 }
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/SearchIntegrationTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/SearchIntegrationTest.java
index b40db46..2e1968a 100644
--- a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/SearchIntegrationTest.java
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/SearchIntegrationTest.java
@@ -19,7 +19,6 @@
 package org.apache.metron.indexing.dao;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -40,7 +39,6 @@
 import org.apache.metron.integration.InMemoryComponent;
 import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -48,11 +46,11 @@
 public abstract class SearchIntegrationTest {
   /**
    * [
-   * {"source:type": "bro", "ip_src_addr":"192.168.1.1", "ip_src_port": 8010, "long_field": 10000, "timestamp":1, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 1", "duplicate_name_field": "data 1", "guid":"bro_1"},
-   * {"source:type": "bro", "ip_src_addr":"192.168.1.2", "ip_src_port": 8009, "long_field": 20000, "timestamp":2, "latitude": 48.0001, "score": 50.0, "is_alert":false, "location_point": "48.5839,7.7455", "bro_field": "bro data 2", "duplicate_name_field": "data 2", "guid":"bro_2"},
-   * {"source:type": "bro", "ip_src_addr":"192.168.1.3", "ip_src_port": 8008, "long_field": 10000, "timestamp":3, "latitude": 48.5839, "score": 20.0, "is_alert":true, "location_point": "50.0,7.7455", "bro_field": "bro data 3", "duplicate_name_field": "data 3", "guid":"bro_3"},
-   * {"source:type": "bro", "ip_src_addr":"192.168.1.4", "ip_src_port": 8007, "long_field": 10000, "timestamp":4, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 4", "duplicate_name_field": "data 4", "guid":"bro_4"},
-   * {"source:type": "bro", "ip_src_addr":"192.168.1.5", "ip_src_port": 8006, "long_field": 10000, "timestamp":5, "latitude": 48.5839, "score": 98.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 5", "duplicate_name_field": "data 5", "guid":"bro_5"}
+   * {"source:type": "bro", "ip_src_addr":"192.168.1.1", "ip_src_port": 8010, "long_field": 10000, "timestamp":1, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 1", "ttl": "data 1", "guid":"bro_1"},
+   * {"source:type": "bro", "ip_src_addr":"192.168.1.2", "ip_src_port": 8009, "long_field": 20000, "timestamp":2, "latitude": 48.0001, "score": 50.0, "is_alert":false, "location_point": "48.5839,7.7455", "bro_field": "bro data 2", "ttl": "data 2", "guid":"bro_2"},
+   * {"source:type": "bro", "ip_src_addr":"192.168.1.3", "ip_src_port": 8008, "long_field": 10000, "timestamp":3, "latitude": 48.5839, "score": 20.0, "is_alert":true, "location_point": "50.0,7.7455", "bro_field": "bro data 3", "ttl": "data 3", "guid":"bro_3"},
+   * {"source:type": "bro", "ip_src_addr":"192.168.1.4", "ip_src_port": 8007, "long_field": 10000, "timestamp":4, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 4", "ttl": "data 4", "guid":"bro_4"},
+   * {"source:type": "bro", "ip_src_addr":"192.168.1.5", "ip_src_port": 8006, "long_field": 10000, "timestamp":5, "latitude": 48.5839, "score": 98.0, "is_alert":true, "location_point": "48.5839,7.7455", "bro_field": "bro data 5", "ttl": "data 5", "guid":"bro_5"}
    * ]
    */
   @Multiline
@@ -60,11 +58,11 @@
 
   /**
    * [
-   * {"source:type": "snort", "ip_src_addr":"192.168.1.6", "ip_src_port": 8005, "long_field": 10000, "timestamp":6, "latitude": 48.5839, "score": 50.0, "is_alert":false, "location_point": "50.0,7.7455", "snort_field": 10, "duplicate_name_field": 1, "guid":"snort_1", "threat:triage:score":"10"},
-   * {"source:type": "snort", "ip_src_addr":"192.168.1.1", "ip_src_port": 8004, "long_field": 10000, "timestamp":7, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "snort_field": 20, "duplicate_name_field": 2, "guid":"snort_2", "threat:triage:score":"20"},
-   * {"source:type": "snort", "ip_src_addr":"192.168.1.7", "ip_src_port": 8003, "long_field": 10000, "timestamp":8, "latitude": 48.5839, "score": 20.0, "is_alert":false, "location_point": "48.5839,7.7455", "snort_field": 30, "duplicate_name_field": 3, "guid":"snort_3"},
-   * {"source:type": "snort", "ip_src_addr":"192.168.1.1", "ip_src_port": 8002, "long_field": 20000, "timestamp":9, "latitude": 48.0001, "score": 50.0, "is_alert":true, "location_point": "48.5839,7.7455", "snort_field": 40, "duplicate_name_field": 4, "guid":"snort_4"},
-   * {"source:type": "snort", "ip_src_addr":"192.168.1.8", "ip_src_port": 8001, "long_field": 10000, "timestamp":10, "latitude": 48.5839, "score": 10.0, "is_alert":false, "location_point": "48.5839,7.7455", "snort_field": 50, "duplicate_name_field": 5, "guid":"snort_5"}
+   * {"source:type": "snort", "ip_src_addr":"192.168.1.6", "ip_src_port": 8005, "long_field": 10000, "timestamp":6, "latitude": 48.5839, "score": 50.0, "is_alert":false, "location_point": "50.0,7.7455", "snort_field": 10, "ttl": 1, "guid":"snort_1", "threat:triage:score":10.0},
+   * {"source:type": "snort", "ip_src_addr":"192.168.1.1", "ip_src_port": 8004, "long_field": 10000, "timestamp":7, "latitude": 48.5839, "score": 10.0, "is_alert":true, "location_point": "48.5839,7.7455", "snort_field": 20, "ttl": 2, "guid":"snort_2", "threat:triage:score":20.0},
+   * {"source:type": "snort", "ip_src_addr":"192.168.1.7", "ip_src_port": 8003, "long_field": 10000, "timestamp":8, "latitude": 48.5839, "score": 20.0, "is_alert":false, "location_point": "48.5839,7.7455", "snort_field": 30, "ttl": 3, "guid":"snort_3"},
+   * {"source:type": "snort", "ip_src_addr":"192.168.1.1", "ip_src_port": 8002, "long_field": 20000, "timestamp":9, "latitude": 48.0001, "score": 50.0, "is_alert":true, "location_point": "48.5839,7.7455", "snort_field": 40, "ttl": 4, "guid":"snort_4"},
+   * {"source:type": "snort", "ip_src_addr":"192.168.1.8", "ip_src_port": 8001, "long_field": 10000, "timestamp":10, "latitude": 48.5839, "score": 10.0, "is_alert":false, "location_point": "48.5839,7.7455", "snort_field": 50, "ttl": 5, "guid":"snort_5"}
    * ]
    */
   @Multiline
@@ -72,7 +70,7 @@
 
   /**
    * {
-   * "indices": ["bro", "snort"],
+   * "indices": ["bro", "snort", "some_collection"],
    * "query": "*",
    * "from": 0,
    * "size": 10,
@@ -235,7 +233,7 @@
    * }
    */
   @Multiline
-  public static String facetQuery;
+  public static String facetQueryRaw;
 
   /**
    * {
@@ -274,6 +272,42 @@
 
   /**
    * {
+   * "facetFields": ["snort_field"],
+   * "indices": ["bro", "snort"],
+   * "query": "*:*",
+   * "from": 0,
+   * "size": 10,
+   * "sort": [
+   *   {
+   *     "field": "timestamp",
+   *     "sortOrder": "desc"
+   *   }
+   * ]
+   * }
+   */
+  @Multiline
+  public static String missingTypeFacetQuery;
+
+  /**
+   * {
+   * "facetFields": ["ttl"],
+   * "indices": ["bro", "snort"],
+   * "query": "*:*",
+   * "from": 0,
+   * "size": 10,
+   * "sort": [
+   *   {
+   *     "field": "timestamp",
+   *     "sortOrder": "desc"
+   *   }
+   * ]
+   * }
+   */
+  @Multiline
+  public static String differentTypeFacetQuery;
+
+  /**
+   * {
    * "indices": ["bro", "snort"],
    * "query": "*",
    * "from": 0,
@@ -419,17 +453,24 @@
   @Multiline
   public static String groupByIpQuery;
 
-  protected static IndexDao dao;
-  protected static InMemoryComponent indexComponent;
+  /**
+   * {
+   * "indices": ["bro", "snort"],
+   * "query": "ttl:\"data 1\"",
+   * "from": 0,
+   * "size": 10,
+   * "sort": [
+   *   {
+   *     "field": "timestamp",
+   *     "sortOrder": "desc"
+   *   }
+   * ]
+   * }
+   */
+  @Multiline
+  public static String differentTypeFilterQuery;
 
-  @Before
-  public synchronized void setup() throws Exception {
-    if(dao == null && indexComponent == null) {
-      indexComponent = startIndex();
-      loadTestData();
-      dao = createDao();
-    }
-  }
+  protected static InMemoryComponent indexComponent;
 
   @Rule
   public ExpectedException thrown = ExpectedException.none();
@@ -437,28 +478,30 @@
   @Test
   public void all_query_returns_all_results() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(allQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     Assert.assertEquals(10, results.size());
     for(int i = 0;i < 5;++i) {
-      Assert.assertEquals("snort", results.get(i).getSource().get("source:type"));
-      Assert.assertEquals(10 - i, results.get(i).getSource().get("timestamp"));
+      Assert.assertEquals("snort", results.get(i).getSource().get(getSourceTypeField()));
+      Assert.assertEquals(getIndexName("snort"), results.get(i).getIndex());
+      Assert.assertEquals(10 - i + "", results.get(i).getSource().get("timestamp").toString());
     }
     for (int i = 5; i < 10; ++i) {
-      Assert.assertEquals("bro", results.get(i).getSource().get("source:type"));
-      Assert.assertEquals(10 - i, results.get(i).getSource().get("timestamp"));
+      Assert.assertEquals("bro", results.get(i).getSource().get(getSourceTypeField()));
+      Assert.assertEquals(getIndexName("bro"), results.get(i).getIndex());
+      Assert.assertEquals(10 - i + "", results.get(i).getSource().get("timestamp").toString());
     }
   }
 
   @Test
   public void find_one_guid() throws Exception {
     GetRequest request = JSONUtils.INSTANCE.load(findOneGuidQuery, GetRequest.class);
-    Optional<Map<String, Object>> response = dao.getLatestResult(request);
+    Optional<Map<String, Object>> response = getIndexDao().getLatestResult(request);
     Assert.assertTrue(response.isPresent());
     Map<String, Object> doc = response.get();
-    Assert.assertEquals("bro", doc.get("source:type"));
-    Assert.assertEquals(3, doc.get("timestamp"));
+    Assert.assertEquals("bro", doc.get(getSourceTypeField()));
+    Assert.assertEquals("3", doc.get("timestamp").toString());
   }
 
   @Test
@@ -466,34 +509,34 @@
     List<GetRequest> request = JSONUtils.INSTANCE.load(getAllLatestQuery, new JSONUtils.ReferenceSupplier<List<GetRequest>>(){});
     Map<String, Document> docs = new HashMap<>();
 
-    for(Document doc : dao.getAllLatest(request)) {
+    for(Document doc : getIndexDao().getAllLatest(request)) {
       docs.put(doc.getGuid(), doc);
     }
     Assert.assertEquals(2, docs.size());
     Assert.assertTrue(docs.keySet().contains("bro_1"));
     Assert.assertTrue(docs.keySet().contains("snort_2"));
-    Assert.assertEquals("bro", docs.get("bro_1").getDocument().get("source:type"));
-    Assert.assertEquals("snort", docs.get("snort_2").getDocument().get("source:type"));
+    Assert.assertEquals("bro", docs.get("bro_1").getDocument().get(getSourceTypeField()));
+    Assert.assertEquals("snort", docs.get("snort_2").getDocument().get(getSourceTypeField()));
   }
 
   @Test
   public void filter_query_filters_results() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(filterQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(3, response.getTotal());
     List<SearchResult> results = response.getResults();
-    Assert.assertEquals("snort", results.get(0).getSource().get("source:type"));
-    Assert.assertEquals(9, results.get(0).getSource().get("timestamp"));
-    Assert.assertEquals("snort", results.get(1).getSource().get("source:type"));
-    Assert.assertEquals(7, results.get(1).getSource().get("timestamp"));
-    Assert.assertEquals("bro", results.get(2).getSource().get("source:type"));
-    Assert.assertEquals(1, results.get(2).getSource().get("timestamp"));
+    Assert.assertEquals("snort", results.get(0).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("9", results.get(0).getSource().get("timestamp").toString());
+    Assert.assertEquals("snort", results.get(1).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("7", results.get(1).getSource().get("timestamp").toString());
+    Assert.assertEquals("bro", results.get(2).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("1", results.get(2).getSource().get("timestamp").toString());
   }
 
   @Test
   public void sort_query_sorts_results_ascending() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(sortQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     for (int i = 8001; i < 8011; ++i) {
@@ -504,7 +547,7 @@
   @Test
   public void sort_ascending_with_missing_fields() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(sortAscendingWithMissingFields, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     Assert.assertEquals(10, results.size());
@@ -515,21 +558,21 @@
     }
 
     // validate sorted order - there are only 2 with a 'threat:triage:score'
-    Assert.assertEquals("10", results.get(8).getSource().get("threat:triage:score"));
-    Assert.assertEquals("20", results.get(9).getSource().get("threat:triage:score"));
+    Assert.assertEquals("10.0", results.get(8).getSource().get("threat:triage:score").toString());
+    Assert.assertEquals("20.0", results.get(9).getSource().get("threat:triage:score").toString());
   }
 
   @Test
   public void sort_descending_with_missing_fields() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(sortDescendingWithMissingFields, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     Assert.assertEquals(10, results.size());
 
     // validate sorted order - there are only 2 with a 'threat:triage:score'
-    Assert.assertEquals("20", results.get(0).getSource().get("threat:triage:score"));
-    Assert.assertEquals("10", results.get(1).getSource().get("threat:triage:score"));
+    Assert.assertEquals("20.0", results.get(0).getSource().get("threat:triage:score").toString());
+    Assert.assertEquals("10.0", results.get(1).getSource().get("threat:triage:score").toString());
 
     // the remaining are missing the 'threat:triage:score' and should be sorted last
     for (int i = 2; i < 10; i++) {
@@ -540,38 +583,39 @@
   @Test
   public void results_are_paginated() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(paginationQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     Assert.assertEquals(3, results.size());
-    Assert.assertEquals("snort", results.get(0).getSource().get("source:type"));
-    Assert.assertEquals(6, results.get(0).getSource().get("timestamp"));
-    Assert.assertEquals("bro", results.get(1).getSource().get("source:type"));
-    Assert.assertEquals(5, results.get(1).getSource().get("timestamp"));
-    Assert.assertEquals("bro", results.get(2).getSource().get("source:type"));
-    Assert.assertEquals(4, results.get(2).getSource().get("timestamp"));
+    Assert.assertEquals("snort", results.get(0).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("6", results.get(0).getSource().get("timestamp").toString());
+    Assert.assertEquals("bro", results.get(1).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("5", results.get(1).getSource().get("timestamp").toString());
+    Assert.assertEquals("bro", results.get(2).getSource().get(getSourceTypeField()));
+    Assert.assertEquals("4", results.get(2).getSource().get("timestamp").toString());
   }
 
   @Test
   public void returns_results_only_for_specified_indices() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(indexQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(5, response.getTotal());
     List<SearchResult> results = response.getResults();
     for (int i = 5, j = 0; i > 0; i--, j++) {
-      Assert.assertEquals("bro", results.get(j).getSource().get("source:type"));
-      Assert.assertEquals(i, results.get(j).getSource().get("timestamp"));
+      Assert.assertEquals("bro", results.get(j).getSource().get(getSourceTypeField()));
+      Assert.assertEquals(i + "", results.get(j).getSource().get("timestamp").toString());
     }
   }
 
   @Test
   public void facet_query_yields_field_types() throws Exception {
+    String facetQuery = facetQueryRaw.replace("source:type", getSourceTypeField());
     SearchRequest request = JSONUtils.INSTANCE.load(facetQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     Map<String, Map<String, Long>> facetCounts = response.getFacetCounts();
     Assert.assertEquals(8, facetCounts.size());
-    Map<String, Long> sourceTypeCounts = facetCounts.get("source:type");
+    Map<String, Long> sourceTypeCounts = facetCounts.get(getSourceTypeField());
     Assert.assertEquals(2, sourceTypeCounts.size());
     Assert.assertEquals(new Long(5), sourceTypeCounts.get("bro"));
     Assert.assertEquals(new Long(5), sourceTypeCounts.get("snort"));
@@ -640,103 +684,67 @@
   }
 
   @Test
-  public void bad_facet_query_throws_exception() throws Exception {
-    thrown.expect(InvalidSearchException.class);
-    thrown.expectMessage("Failed to execute search");
-    SearchRequest request = JSONUtils.INSTANCE.load(badFacetQuery, SearchRequest.class);
-    dao.search(request);
-  }
-
-  @Test
   public void disabled_facet_query_returns_null_count() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(disabledFacetQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertNull(response.getFacetCounts());
   }
 
   @Test
-  public void exceeding_max_resulsts_throws_exception() throws Exception {
+  public void missing_type_facet_query() throws Exception {
+    SearchRequest request = JSONUtils.INSTANCE.load(missingTypeFacetQuery, SearchRequest.class);
+    SearchResponse response = getIndexDao().search(request);
+    Assert.assertEquals(10, response.getTotal());
+
+    Map<String, Map<String, Long>> facetCounts = response.getFacetCounts();
+    Assert.assertEquals(1, facetCounts.size());
+    Map<String, Long> snortFieldCounts = facetCounts.get("snort_field");
+    Assert.assertEquals(5, snortFieldCounts.size());
+
+    Assert.assertEquals(1L, snortFieldCounts.get("50").longValue());
+    Assert.assertEquals(1L, snortFieldCounts.get("40").longValue());
+    Assert.assertEquals(1L, snortFieldCounts.get("30").longValue());
+    Assert.assertEquals(1L, snortFieldCounts.get("20").longValue());
+    Assert.assertEquals(1L, snortFieldCounts.get("10").longValue());
+    response.getFacetCounts();
+  }
+
+  @Test
+  public void different_type_facet_query() throws Exception {
+    thrown.expect(Exception.class);
+    SearchRequest request = JSONUtils.INSTANCE.load(differentTypeFacetQuery, SearchRequest.class);
+    SearchResponse response = getIndexDao().search(request);
+    Assert.assertEquals(3, response.getTotal());
+  }
+
+  @Test
+  public void exceeding_max_results_throws_exception() throws Exception {
     thrown.expect(InvalidSearchException.class);
     thrown.expectMessage("Search result size must be less than 100");
     SearchRequest request = JSONUtils.INSTANCE.load(exceededMaxResultsQuery, SearchRequest.class);
-    dao.search(request);
+    getIndexDao().search(request);
   }
 
   @Test
-  public void returns_column_data_for_multiple_indices() throws Exception {
-    Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Arrays.asList("bro", "snort"));
-    Assert.assertEquals(15, fieldTypes.size());
-    Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
-    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
-    Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
-    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
-    Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
-    Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
-    Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
-    Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
-    Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
-    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
-    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
-    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("snort_field"));
-    //NOTE: This is because the field is in both bro and snort and they have different types.
-    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("duplicate_name_field"));
-    Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("threat:triage:score"));
-    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
-  }
-
-  @Test
-  public void returns_column_metadata_for_specified_indices() throws Exception {
-    // getColumnMetadata with only bro
-    {
-      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("bro"));
-      Assert.assertEquals(13, fieldTypes.size());
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("duplicate_name_field"));
-      Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
-      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
-      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
-      Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
-      Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
-      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
-      Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
-      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
-      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("bro_field"));
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("duplicate_name_field"));
-      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
-    }
-    // getColumnMetadata with only snort
-    {
-      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("snort"));
-      Assert.assertEquals(14, fieldTypes.size());
-      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("snort_field"));
-      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("duplicate_name_field"));
-      Assert.assertEquals(FieldType.KEYWORD, fieldTypes.get("guid"));
-      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source:type"));
-      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
-      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
-      Assert.assertEquals(FieldType.LONG, fieldTypes.get("long_field"));
-      Assert.assertEquals(FieldType.DATE, fieldTypes.get("timestamp"));
-      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("latitude"));
-      Assert.assertEquals(FieldType.DOUBLE, fieldTypes.get("score"));
-      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
-      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
-      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("duplicate_name_field"));
-      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("alert"));
-    }
+  public void column_metadata_for_missing_index() throws Exception {
     // getColumnMetadata with an index that doesn't exist
     {
-      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("someindex"));
+      Map<String, FieldType> fieldTypes = getIndexDao().getColumnMetadata(Collections.singletonList("someindex"));
       Assert.assertEquals(0, fieldTypes.size());
     }
   }
 
+  @Test
+  public void no_results_returned_when_query_does_not_match() throws Exception {
+    SearchRequest request = JSONUtils.INSTANCE.load(noResultsFieldsQuery, SearchRequest.class);
+    SearchResponse response = getIndexDao().search(request);
+    Assert.assertEquals(0, response.getTotal());
+  }
 
   @Test
   public void group_by_ip_query() throws Exception {
     GroupRequest request = JSONUtils.INSTANCE.load(groupByIpQuery, GroupRequest.class);
-    GroupResponse response = dao.group(request);
+    GroupResponse response = getIndexDao().group(request);
 
     // expect only 1 group for 'ip_src_addr'
     Assert.assertEquals("ip_src_addr", response.getGroupedBy());
@@ -757,17 +765,10 @@
   }
 
   @Test
-  public void no_results_returned_when_query_does_not_match() throws Exception {
-    SearchRequest request = JSONUtils.INSTANCE.load(noResultsFieldsQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
-    Assert.assertEquals(0, response.getTotal());
-  }
-
-  @Test
   public void group_by_returns_results_in_groups() throws Exception {
     // Group by test case, default order is count descending
     GroupRequest request = JSONUtils.INSTANCE.load(groupByQuery, GroupRequest.class);
-    GroupResponse response = dao.group(request);
+    GroupResponse response = getIndexDao().group(request);
     Assert.assertEquals("is_alert", response.getGroupedBy());
     List<GroupResult> isAlertGroups = response.getGroupResults();
     Assert.assertEquals(2, isAlertGroups.size());
@@ -819,7 +820,7 @@
   public void group_by_returns_results_in_sorted_groups() throws Exception {
     // Group by with sorting test case where is_alert is sorted by count ascending and ip_src_addr is sorted by term descending
     GroupRequest request = JSONUtils.INSTANCE.load(sortedGroupByQuery, GroupRequest.class);
-    GroupResponse response = dao.group(request);
+    GroupResponse response = getIndexDao().group(request);
     Assert.assertEquals("is_alert", response.getGroupedBy());
     List<GroupResult> isAlertGroups = response.getGroupResults();
     Assert.assertEquals(2, isAlertGroups.size());
@@ -896,18 +897,9 @@
   }
 
   @Test
-  public void throws_exception_on_aggregation_queries_on_non_string_non_numeric_fields()
-          throws Exception {
-    thrown.expect(InvalidSearchException.class);
-    thrown.expectMessage("Failed to execute search");
-    GroupRequest request = JSONUtils.INSTANCE.load(badGroupQuery, GroupRequest.class);
-    dao.group(request);
-  }
-
-  @Test
   public void queries_fields() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(fieldsQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(10, response.getTotal());
     List<SearchResult> results = response.getResults();
     for (int i = 0; i < 5; ++i) {
@@ -925,7 +917,7 @@
   @Test
   public void sort_by_guid() throws Exception {
     SearchRequest request = JSONUtils.INSTANCE.load(sortByGuidQuery, SearchRequest.class);
-    SearchResponse response = dao.search(request);
+    SearchResponse response = getIndexDao().search(request);
     Assert.assertEquals(5, response.getTotal());
     List<SearchResult> results = response.getResults();
     for (int i = 0; i < 5; ++i) {
@@ -936,11 +928,20 @@
   }
 
   @AfterClass
-  public static void stop() throws Exception {
+  public static void stop() {
     indexComponent.stop();
   }
 
-  protected abstract IndexDao createDao() throws Exception;
-  protected abstract InMemoryComponent startIndex() throws Exception;
-  protected abstract void loadTestData() throws Exception;
+  @Test
+  public abstract void returns_column_data_for_multiple_indices() throws Exception;
+  @Test
+  public abstract void returns_column_metadata_for_specified_indices() throws Exception;
+  @Test
+  public abstract void different_type_filter_query() throws Exception;
+
+  protected abstract IndexDao getIndexDao();
+
+  protected abstract String getSourceTypeField();
+
+  protected abstract String getIndexName(String sensorType);
 }
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/UpdateIntegrationTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/UpdateIntegrationTest.java
new file mode 100644
index 0000000..1e35523
--- /dev/null
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/UpdateIntegrationTest.java
@@ -0,0 +1,306 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements.  See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership.  The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with the License.  You may obtain
+ * a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.metron.indexing.dao;
+
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.commons.collections.MapUtils;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.hbase.mock.MockHTable;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.metron.indexing.dao.update.ReplaceRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+public abstract class UpdateIntegrationTest {
+
+  /**
+   * {
+   *   "comment":"New Comment",
+   *   "username":"test_user",
+   *   "timestamp":1526401584951
+   *   }
+   */
+  @Multiline
+  protected String commentOne;
+
+  /**
+   * {
+   *   "comment":"New Comment 2",
+   *   "username":"test_user_2",
+   *   "timestamp":1526401584952
+   *   }
+   */
+  @Multiline
+  protected String commentTwo;
+
+  private static final int MAX_RETRIES = 10;
+  private static final int SLEEP_MS = 500;
+  protected static final String SENSOR_NAME = "test";
+  private static final String CF = "p";
+
+  private MultiIndexDao dao;
+
+  @Test
+  public void test() throws Exception {
+    List<Map<String, Object>> inputData = new ArrayList<>();
+    for(int i = 0; i < 10;++i) {
+      final String name = "message" + i;
+      inputData.add(
+          new HashMap<String, Object>() {{
+            put("source.type", SENSOR_NAME);
+            put("name" , name);
+            put("timestamp", System.currentTimeMillis());
+            put(Constants.GUID, name);
+          }}
+      );
+    }
+    addTestData(getIndexName(), SENSOR_NAME, inputData);
+    List<Map<String,Object>> docs = null;
+    for(int t = 0;t < MAX_RETRIES;++t, Thread.sleep(SLEEP_MS)) {
+      docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
+      if(docs.size() >= 10) {
+        break;
+      }
+    }
+    Assert.assertEquals(10, docs.size());
+    //modify the first message and add a new field
+    {
+      Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {{
+        put("new-field", "metron");
+      }};
+      String guid = "" + message0.get(Constants.GUID);
+      getDao().replace(new ReplaceRequest(){{
+        setReplacement(message0);
+        setGuid(guid);
+        setSensorType(SENSOR_NAME);
+        setIndex(getIndexName());
+      }}, Optional.empty());
+
+      Assert.assertEquals(1, getMockHTable().size());
+      findUpdatedDoc(message0, guid, SENSOR_NAME);
+      {
+        //ensure hbase is up to date
+        Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
+        Result r = getMockHTable().get(g);
+        NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
+        Assert.assertEquals(1, columns.size());
+        Assert.assertEquals(message0
+            , JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue())
+                , JSONUtils.MAP_SUPPLIER)
+        );
+      }
+      {
+        //ensure ES is up-to-date
+        long cnt = 0;
+        for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
+          docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
+          cnt = docs
+              .stream()
+              .filter(d -> message0.get("new-field").equals(d.get("new-field")))
+              .count();
+        }
+        Assert.assertNotEquals("Data store is not updated!", cnt, 0);
+      }
+    }
+    //modify the same message and modify the new field
+    {
+      Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {{
+        put("new-field", "metron2");
+      }};
+      String guid = "" + message0.get(Constants.GUID);
+      getDao().replace(new ReplaceRequest(){{
+        setReplacement(message0);
+        setGuid(guid);
+        setSensorType(SENSOR_NAME);
+        setIndex(getIndexName());
+      }}, Optional.empty());
+      Assert.assertEquals(1, getMockHTable().size());
+      Document doc = getDao().getLatest(guid, SENSOR_NAME);
+      Assert.assertEquals(message0, doc.getDocument());
+      findUpdatedDoc(message0, guid, SENSOR_NAME);
+      {
+        //ensure hbase is up to date
+        Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
+        Result r = getMockHTable().get(g);
+        NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
+        Assert.assertEquals(2, columns.size());
+        Assert.assertEquals(message0, JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue())
+            , JSONUtils.MAP_SUPPLIER)
+        );
+        Assert.assertNotEquals(message0, JSONUtils.INSTANCE.load(new String(columns.firstEntry().getValue())
+            , JSONUtils.MAP_SUPPLIER)
+        );
+      }
+      {
+        //ensure ES is up-to-date
+        long cnt = 0;
+        for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t,Thread.sleep(SLEEP_MS)) {
+          docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
+          cnt = docs
+              .stream()
+              .filter(d -> message0.get("new-field").equals(d.get("new-field")))
+              .count();
+        }
+
+        Assert.assertNotEquals("Data store is not updated!", cnt, 0);
+      }
+    }
+  }
+
+  @Test
+  public void testAddCommentAndPatch() throws Exception {
+    Map<String, Object> fields = new HashMap<>();
+    fields.put("guid", "add_comment");
+    fields.put("source.type", SENSOR_NAME);
+
+    Document document = new Document(fields, "add_comment", SENSOR_NAME, 1526306463050L);
+    getDao().update(document, Optional.of(SENSOR_NAME));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    addAlertComment("add_comment", "New Comment", "test_user", 1526306463050L);
+    // Ensure we have the first comment
+    ArrayList<AlertComment> comments = new ArrayList<>();
+    comments.add(new AlertComment("New Comment", "test_user", 1526306463050L));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    List<Map<String, Object>> patchList = new ArrayList<>();
+    Map<String, Object> patch = new HashMap<>();
+    patch.put("op", "add");
+    patch.put("path", "/project");
+    patch.put("value", "metron");
+    patchList.add(patch);
+
+    PatchRequest pr = new PatchRequest();
+    pr.setGuid("add_comment");
+    pr.setIndex(SENSOR_NAME);
+    pr.setSensorType(SENSOR_NAME);
+    pr.setPatch(patchList);
+    getDao().patch(getDao(), pr, Optional.of(new Date().getTime()));
+
+    document.getDocument().put("project", "metron");
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRemoveComments() throws Exception {
+    Map<String, Object> fields = new HashMap<>();
+    fields.put("guid", "add_comment");
+    fields.put("source.type", SENSOR_NAME);
+
+    Document document = new Document(fields, "add_comment", SENSOR_NAME, 1526401584951L);
+    getDao().update(document, Optional.of(SENSOR_NAME));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    addAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
+    // Ensure we have the first comment
+    ArrayList<AlertComment> comments = new ArrayList<>();
+    comments.add(new AlertComment("New Comment", "test_user", 1526401584951L));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    addAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
+    // Ensure we have the second comment
+    comments.add(new AlertComment("New Comment 2", "test_user_2", 1526401584952L));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    removeAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
+    // Ensure we only have the first comments
+    comments = new ArrayList<>();
+    comments.add(new AlertComment(commentOne));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    removeAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
+    // Ensure we have no comments
+    document.getDocument().remove(COMMENTS_FIELD);
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+  }
+
+  protected void addAlertComment(String guid, String comment, String username, long timestamp)
+      throws IOException {
+    CommentAddRemoveRequest request = buildAlertRequest(guid, comment, username, timestamp);
+    getDao().addCommentToAlert(request);
+  }
+
+  protected void removeAlertComment(String guid, String comment, String username, long timestamp)
+      throws IOException {
+    CommentAddRemoveRequest request = buildAlertRequest(guid, comment, username, timestamp);
+    getDao().removeCommentFromAlert(request);
+  }
+
+  private CommentAddRemoveRequest buildAlertRequest(String guid, String comment, String username,
+      long timestamp) {
+    CommentAddRemoveRequest request = new CommentAddRemoveRequest();
+    request.setGuid(guid);
+    request.setComment(comment);
+    request.setUsername(username);
+    request.setTimestamp(timestamp);
+    request.setSensorType(SENSOR_NAME);
+    return request;
+  }
+
+  protected void findUpdatedDoc(Map<String, Object> message0, String guid, String sensorType)
+      throws InterruptedException, IOException, OriginalNotFoundException {
+    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
+      Document doc = getDao().getLatest(guid, sensorType);
+      if (doc != null && message0.equals(doc.getDocument())) {
+        return;
+      }
+      if (t == MAX_RETRIES -1) {
+        MapUtils.debugPrint(System.out, "Expected", message0);
+        MapUtils.debugPrint(System.out, "actual", doc.getDocument());
+      }
+    }
+    throw new OriginalNotFoundException("Count not find " + guid + " after " + MAX_RETRIES + " tries");
+  }
+
+  protected IndexDao getDao() {
+    return dao;
+  }
+
+  protected void setDao(MultiIndexDao dao) {
+    this.dao = dao;
+  }
+
+  protected abstract String getIndexName();
+  protected abstract MockHTable getMockHTable();
+  protected abstract void addTestData(String indexName, String sensorType, List<Map<String,Object>> docs) throws Exception;
+  protected abstract List<Map<String,Object>> getIndexedTestData(String indexName, String sensorType) throws Exception;
+}
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaAlertIntegrationTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaAlertIntegrationTest.java
new file mode 100644
index 0000000..6f96fb5
--- /dev/null
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaAlertIntegrationTest.java
@@ -0,0 +1,1012 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.ALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.STATUS_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_FIELD_DEFAULT;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.Group;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.GroupResult;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.search.SortField;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+public abstract class MetaAlertIntegrationTest {
+
+  private static final String META_INDEX_FLAG = "%META_INDEX%";
+  // To change back after testing
+  protected static int MAX_RETRIES = 10;
+  protected static final int SLEEP_MS = 500;
+  protected static final String SENSOR_NAME = "test";
+
+  protected static final String NEW_FIELD = "new-field";
+  protected static final String NAME_FIELD = "name";
+  protected static final String DATE_FORMAT = "yyyy.MM.dd.HH";
+
+  // Separate the raw indices from the query indices. ES for example, modifies the indices to
+  // have a separator
+  protected ArrayList<String> allIndices = new ArrayList<String>() {
+    {
+      add(getTestIndexName());
+      add(getMetaAlertIndex());
+    }
+  };
+
+  protected ArrayList<String> queryIndices = allIndices;
+
+  protected static MetaAlertDao metaDao;
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "%META_INDEX%",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/name",
+   "value": "New Meta Alert"
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String namePatchRequest;
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "%META_INDEX%",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/name",
+   "value": "New Meta Alert"
+   },
+   {
+   "op": "add",
+   "path": "/metron_alert",
+   "value": []
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String alertPatchRequest;
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "%META_INDEX%",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/status",
+   "value": "inactive"
+   },
+   {
+   "op": "add",
+   "path": "/name",
+   "value": "New Meta Alert"
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String statusPatchRequest;
+
+
+  @Test
+  public void shouldGetAllMetaAlertsForAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(3);
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlerts
+    List<Map<String, Object>> metaAlerts = buildMetaAlerts(12, MetaAlertStatus.ACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    metaAlerts.add(buildMetaAlert("meta_active_12", MetaAlertStatus.ACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0), alerts.get(2)))));
+    metaAlerts.add(buildMetaAlert("meta_inactive", MetaAlertStatus.INACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0), alerts.get(2)))));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(metaAlerts, getMetaAlertIndex(), METAALERT_TYPE);
+
+    // Verify load was successful
+    List<GetRequest> createdDocs = metaAlerts.stream().map(metaAlert ->
+        new GetRequest((String) metaAlert.get(Constants.GUID), METAALERT_TYPE))
+        .collect(Collectors.toList());
+    createdDocs.addAll(alerts.stream().map(alert ->
+        new GetRequest((String) alert.get(Constants.GUID), SENSOR_NAME))
+        .collect(Collectors.toList()));
+    findCreatedDocs(createdDocs);
+
+    {
+      // Verify searches successfully return more than 10 results
+      SearchResponse searchResponse0 = metaDao.getAllMetaAlertsForAlert("message_0");
+      List<SearchResult> searchResults0 = searchResponse0.getResults();
+      Assert.assertEquals(13, searchResults0.size());
+      Set<Map<String, Object>> resultSet = new HashSet<>();
+      Iterables.addAll(resultSet, Iterables.transform(searchResults0, r -> r.getSource()));
+      StringBuffer reason = new StringBuffer("Unable to find " + metaAlerts.get(0) + "\n");
+      reason.append(Joiner.on("\n").join(resultSet));
+      Assert.assertTrue(reason.toString(), resultSet.contains(metaAlerts.get(0)));
+
+      // Verify no meta alerts are returned because message_1 was not added to any
+      SearchResponse searchResponse1 = metaDao.getAllMetaAlertsForAlert("message_1");
+      List<SearchResult> searchResults1 = searchResponse1.getResults();
+      Assert.assertEquals(0, searchResults1.size());
+
+      // Verify only the meta alert message_2 was added to is returned
+      SearchResponse searchResponse2 = metaDao.getAllMetaAlertsForAlert("message_2");
+      List<SearchResult> searchResults2 = searchResponse2.getResults();
+      Assert.assertEquals(1, searchResults2.size());
+      Assert.assertEquals(metaAlerts.get(12), searchResults2.get(0).getSource());
+    }
+  }
+
+  @Test
+  public void getAllMetaAlertsForAlertShouldThrowExceptionForEmptyGuid() throws Exception {
+    try {
+      metaDao.getAllMetaAlertsForAlert("");
+      Assert.fail("An exception should be thrown for empty guid");
+    } catch (InvalidSearchException ise) {
+      Assert.assertEquals("Guid cannot be empty", ise.getMessage());
+    }
+  }
+
+  @Test
+  public void shouldCreateMetaAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(3);
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("message_2", SENSOR_NAME)));
+
+    {
+      MetaAlertCreateRequest metaAlertCreateRequest = new MetaAlertCreateRequest() {{
+        setAlerts(new ArrayList<GetRequest>() {{
+          add(new GetRequest("message_1", SENSOR_NAME));
+          add(new GetRequest("message_2", SENSOR_NAME, getTestIndexFullName()));
+        }});
+        setGroups(Collections.singletonList("group"));
+      }};
+      MetaAlertCreateResponse metaAlertCreateResponse = metaDao
+          .createMetaAlert(metaAlertCreateRequest);
+      {
+        // Verify metaAlert was created
+        findCreatedDoc(metaAlertCreateResponse.getGuid(), METAALERT_TYPE);
+      }
+      {
+        // Verify alert 0 was not updated with metaalert field
+        Document alert = metaDao.getLatest("message_0", SENSOR_NAME);
+        Assert.assertEquals(4, alert.getDocument().size());
+        Assert.assertNull(alert.getDocument().get(METAALERT_FIELD));
+      }
+      {
+        // Verify alert 1 was properly updated with metaalert field
+        Map<String, Object> expectedAlert = new HashMap<>(alerts.get(1));
+        expectedAlert
+            .put(METAALERT_FIELD, Collections.singletonList(metaAlertCreateResponse.getGuid()));
+        findUpdatedDoc(expectedAlert, "message_1", SENSOR_NAME);
+      }
+      {
+        // Verify alert 2 was properly updated with metaalert field
+        Map<String, Object> expectedAlert = new HashMap<>(alerts.get(2));
+        expectedAlert
+            .put(METAALERT_FIELD, Collections.singletonList(metaAlertCreateResponse.getGuid()));
+        findUpdatedDoc(expectedAlert, "message_2", SENSOR_NAME);
+      }
+    }
+  }
+
+  @Test
+  public void shouldAddAlertsToMetaAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(4);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlert
+    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    addRecords(Collections.singletonList(metaAlert), getMetaAlertIndex(), METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("message_2", SENSOR_NAME),
+        new GetRequest("message_3", SENSOR_NAME),
+        new GetRequest("meta_alert", METAALERT_TYPE)
+    ));
+
+    // Build expected metaAlert after alerts are added
+    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
+
+    // Verify the proper alerts were added
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> metaAlertAlerts = new ArrayList<>(
+        (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
+    // Alert 0 is already in the metaalert. Add alerts 1 and 2.
+    Map<String, Object> expectedAlert1 = alerts.get(1);
+    expectedAlert1.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    metaAlertAlerts.add(expectedAlert1);
+    Map<String, Object> expectedAlert2 = alerts.get(2);
+    expectedAlert2.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    metaAlertAlerts.add(expectedAlert2);
+    expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
+
+    // Verify the counts were properly updated
+    expectedMetaAlert.put("average", 1.0d);
+    expectedMetaAlert.put("min", 0.0d);
+    expectedMetaAlert.put("median", 1.0d);
+    expectedMetaAlert.put("max", 2.0d);
+    expectedMetaAlert.put("count", 3);
+    expectedMetaAlert.put("sum", 3.0d);
+    expectedMetaAlert.put(getThreatTriageField(), 3.0d);
+
+    {
+      // Verify alerts were successfully added to the meta alert
+      Assert.assertTrue(metaDao.addAlertsToMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_1", SENSOR_NAME),
+              new GetRequest("message_2", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify False when alerts are already in a meta alert and no new alerts are added
+      Assert.assertFalse(metaDao.addAlertsToMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_0", SENSOR_NAME),
+              new GetRequest("message_1", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify only 1 alert is added when a list of alerts only contains 1 alert that is not in the meta alert
+      metaAlertAlerts = (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD);
+      Map<String, Object> expectedAlert3 = alerts.get(3);
+      expectedAlert3.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+      metaAlertAlerts.add(expectedAlert3);
+      expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
+
+      expectedMetaAlert.put("average", 1.5d);
+      expectedMetaAlert.put("min", 0.0d);
+      expectedMetaAlert.put("median", 1.5d);
+      expectedMetaAlert.put("max", 3.0d);
+      expectedMetaAlert.put("count", 4);
+      expectedMetaAlert.put("sum", 6.0d);
+      expectedMetaAlert.put(getThreatTriageField(), 6.0d);
+
+      Assert.assertTrue(metaDao.addAlertsToMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_2", SENSOR_NAME),
+              new GetRequest("message_3", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void shouldRemoveAlertsFromMetaAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(4);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    alerts.get(1).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    alerts.get(2).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    alerts.get(3).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlert
+    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1), alerts.get(2), alerts.get(3))));
+    addRecords(Collections.singletonList(metaAlert), getMetaAlertIndex(), METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("message_2", SENSOR_NAME),
+        new GetRequest("message_3", SENSOR_NAME),
+        new GetRequest("meta_alert", METAALERT_TYPE)));
+
+    // Build expected metaAlert after alerts are added
+    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
+
+    // Verify the proper alerts were added
+    List<Map<String, Object>> metaAlertAlerts = new ArrayList<>(
+        (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
+    metaAlertAlerts.remove(0);
+    metaAlertAlerts.remove(0);
+    expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
+
+    // Verify the counts were properly updated
+    expectedMetaAlert.put("average", 2.5d);
+    expectedMetaAlert.put("min", 2.0d);
+    expectedMetaAlert.put("median", 2.5d);
+    expectedMetaAlert.put("max", 3.0d);
+    expectedMetaAlert.put("count", 2);
+    expectedMetaAlert.put("sum", 5.0d);
+    expectedMetaAlert.put(getThreatTriageField(), 5.0d);
+
+    {
+      // Verify a list of alerts are removed from a meta alert
+      Assert.assertTrue(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_0", SENSOR_NAME),
+              new GetRequest("message_1", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify False when alerts are not present in a meta alert and no alerts are removed
+      Assert.assertFalse(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_0", SENSOR_NAME),
+              new GetRequest("message_1", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify only 1 alert is removed when a list of alerts only contains 1 alert that is in the meta alert
+      metaAlertAlerts = new ArrayList<>(
+          (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
+      metaAlertAlerts.remove(0);
+      expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
+
+      expectedMetaAlert.put("average", 3.0d);
+      expectedMetaAlert.put("min", 3.0d);
+      expectedMetaAlert.put("median", 3.0d);
+      expectedMetaAlert.put("max", 3.0d);
+      expectedMetaAlert.put("count", 1);
+      expectedMetaAlert.put("sum", 3.0d);
+      expectedMetaAlert.put(getThreatTriageField(), 3.0d);
+
+      Assert.assertTrue(metaDao.removeAlertsFromMetaAlert("meta_alert", Arrays
+          .asList(new GetRequest("message_0", SENSOR_NAME),
+              new GetRequest("message_2", SENSOR_NAME))));
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify all alerts are removed from a metaAlert
+      metaAlertAlerts = new ArrayList<>(
+          (List<Map<String, Object>>) expectedMetaAlert.get(ALERT_FIELD));
+      metaAlertAlerts.remove(0);
+      if (isEmptyMetaAlertList()) {
+        expectedMetaAlert.put(ALERT_FIELD, metaAlertAlerts);
+      } else {
+        expectedMetaAlert.remove(ALERT_FIELD);
+      }
+
+      expectedMetaAlert.put("average", 0.0d);
+      expectedMetaAlert.put("count", 0);
+      expectedMetaAlert.put("sum", 0.0d);
+      expectedMetaAlert.put(getThreatTriageField(), 0.0d);
+
+      // Handle the cases with non-finite Double values on a per store basis
+      if (isFiniteDoubleOnly()) {
+        expectedMetaAlert.put("min", String.valueOf(Double.POSITIVE_INFINITY));
+        expectedMetaAlert.put("median", String.valueOf(Double.NaN));
+        expectedMetaAlert.put("max", String.valueOf(Double.NEGATIVE_INFINITY));
+      } else {
+        expectedMetaAlert.put("min", Double.POSITIVE_INFINITY);
+        expectedMetaAlert.put("median", Double.NaN);
+        expectedMetaAlert.put("max", Double.NEGATIVE_INFINITY);
+      }
+
+      // Verify removing alerts cannot result in an empty meta alert
+      try {
+        metaDao.removeAlertsFromMetaAlert("meta_alert",
+                Collections.singletonList(new GetRequest("message_3", SENSOR_NAME)));
+        Assert.fail("Removing these alerts will result in an empty meta alert.  Empty meta alerts are not allowed.");
+      } catch (IllegalStateException ise) {
+        Assert.assertEquals("Removing these alerts will result in an empty meta alert.  Empty meta alerts are not allowed.",
+                ise.getMessage());
+      }
+    }
+  }
+
+  @Test
+  public void addRemoveAlertsShouldThrowExceptionForInactiveMetaAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(2);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlert
+    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.INACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    addRecords(Collections.singletonList(metaAlert), getMetaAlertIndex(), METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("meta_alert", METAALERT_TYPE)));
+
+    {
+      // Verify alerts cannot be added to an INACTIVE meta alert
+      try {
+        metaDao.addAlertsToMetaAlert("meta_alert",
+            Collections.singletonList(new GetRequest("message_1", SENSOR_NAME)));
+        Assert.fail("Adding alerts to an inactive meta alert should throw an exception");
+      } catch (IllegalStateException ise) {
+        Assert.assertEquals("Adding alerts to an INACTIVE meta alert is not allowed",
+            ise.getMessage());
+      }
+    }
+
+    {
+      // Verify alerts cannot be removed from an INACTIVE meta alert
+      try {
+        metaDao.removeAlertsFromMetaAlert("meta_alert",
+            Collections.singletonList(new GetRequest("message_0", SENSOR_NAME)));
+        Assert.fail("Removing alerts from an inactive meta alert should throw an exception");
+      } catch (IllegalStateException ise) {
+        Assert.assertEquals("Removing alerts from an INACTIVE meta alert is not allowed",
+            ise.getMessage());
+      }
+    }
+  }
+
+  @Test
+  public void shouldUpdateMetaAlertStatus() throws Exception {
+    int numChildAlerts = 25;
+    int numUnrelatedAlerts = 25;
+    int totalAlerts = numChildAlerts + numUnrelatedAlerts;
+
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(totalAlerts);
+    List<Map<String, Object>> childAlerts = alerts.subList(0, numChildAlerts);
+    List<Map<String, Object>> unrelatedAlerts = alerts.subList(numChildAlerts, totalAlerts);
+    for (Map<String, Object> alert : childAlerts) {
+      alert.put(METAALERT_FIELD, Collections.singletonList("meta_alert"));
+    }
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlerts
+    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
+        Optional.of(childAlerts));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Collections.singletonList(metaAlert), getMetaAlertIndex(),
+        METAALERT_TYPE);
+
+    List<GetRequest> requests = new ArrayList<>();
+    for (int i = 0; i < numChildAlerts; ++i) {
+      requests.add(new GetRequest("message_" + i, SENSOR_NAME));
+    }
+    requests.add(new GetRequest("meta_alert", METAALERT_TYPE));
+
+    // Verify load was successful
+    findCreatedDocs(requests);
+
+    {
+      // Verify status changed to inactive and child alerts are updated
+      Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.INACTIVE));
+
+      Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
+      expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.INACTIVE.getStatusString());
+
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+
+      for (int i = 0; i < numChildAlerts; ++i) {
+        Map<String, Object> expectedAlert = new HashMap<>(childAlerts.get(i));
+        setEmptiedMetaAlertField(expectedAlert);
+        findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
+      }
+
+      // Ensure unrelated alerts are unaffected
+      for (int i = 0; i < numUnrelatedAlerts; ++i) {
+        Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
+        // Make sure to handle the guid offset from creation
+        findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
+      }
+    }
+
+    {
+      // Verify status changed to active and child alerts are updated
+      Assert.assertTrue(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.ACTIVE));
+
+      Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
+      expectedMetaAlert.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
+
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+
+      for (int i = 0; i < numChildAlerts; ++i) {
+        Map<String, Object> expectedAlert = new HashMap<>(alerts.get(i));
+        expectedAlert.put("metaalerts", Collections.singletonList("meta_alert"));
+        findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
+      }
+
+      // Ensure unrelated alerts are unaffected
+      for (int i = 0; i < numUnrelatedAlerts; ++i) {
+        Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
+        // Make sure to handle the guid offset from creation
+        findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
+      }
+
+      {
+        // Verify status changed to current status has no effect
+        Assert.assertFalse(metaDao.updateMetaAlertStatus("meta_alert", MetaAlertStatus.ACTIVE));
+
+        findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+
+        for (int i = 0; i < numChildAlerts; ++i) {
+          Map<String, Object> expectedAlert = new HashMap<>(alerts.get(i));
+          expectedAlert.put("metaalerts", Collections.singletonList("meta_alert"));
+          findUpdatedDoc(expectedAlert, "message_" + i, SENSOR_NAME);
+        }
+
+        // Ensure unrelated alerts are unaffected
+        for (int i = 0; i < numUnrelatedAlerts; ++i) {
+          Map<String, Object> expectedAlert = new HashMap<>(unrelatedAlerts.get(i));
+          // Make sure to handle the guid offset from creation
+          findUpdatedDoc(expectedAlert, "message_" + (i + numChildAlerts), SENSOR_NAME);
+        }
+      }
+    }
+  }
+
+  @Test
+  public void shouldSearchByStatus() throws Exception {
+    // Load alert
+    List<Map<String, Object>> alerts = buildAlerts(1);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(0).put("ip_src_addr", "192.168.1.1");
+    alerts.get(0).put("ip_src_port", 8010);
+
+    // Load metaAlerts
+    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive",
+        MetaAlertStatus.INACTIVE,
+        Optional.empty());
+
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Arrays.asList(activeMetaAlert, inactiveMetaAlert), getMetaAlertIndex(),
+        METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("meta_active", METAALERT_TYPE),
+        new GetRequest("meta_inactive", METAALERT_TYPE)));
+
+    SearchResponse searchResponse = metaDao.search(new SearchRequest() {
+      {
+        setQuery("*:*");
+        setIndices(Collections.singletonList(METAALERT_TYPE));
+        setFrom(0);
+        setSize(5);
+        setSort(Collections.singletonList(new SortField() {{
+          setField(Constants.GUID);
+        }}));
+      }
+    });
+
+    // Verify only active meta alerts are returned
+    Assert.assertEquals(1, searchResponse.getTotal());
+    Assert.assertEquals(MetaAlertStatus.ACTIVE.getStatusString(),
+        searchResponse.getResults().get(0).getSource().get(STATUS_FIELD));
+  }
+
+
+  @Test
+  public void shouldHidesAlertsOnGroup() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(2);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(0).put("ip_src_addr", "192.168.1.1");
+    alerts.get(0).put("score", 1);
+    alerts.get(1).put("ip_src_addr", "192.168.1.1");
+    alerts.get(1).put("score", 10);
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Put the nested type into the test index, so that it'll match appropriately
+    setupTypings();
+
+    // Don't need any meta alerts to actually exist, since we've populated the field on the alerts.
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME)));
+
+    // Build our group request
+    Group searchGroup = new Group();
+    searchGroup.setField("ip_src_addr");
+    List<Group> groupList = new ArrayList<>();
+    groupList.add(searchGroup);
+    GroupResponse groupResponse = metaDao.group(new GroupRequest() {
+      {
+        setQuery("ip_src_addr:192.168.1.1");
+        setIndices(queryIndices);
+        setScoreField("score");
+        setGroups(groupList);
+      }
+    });
+
+    // Should only return the standalone alert in the group
+    GroupResult result = groupResponse.getGroupResults().get(0);
+    Assert.assertEquals(1, result.getTotal());
+    Assert.assertEquals("192.168.1.1", result.getKey());
+    // No delta, since no ops happen
+    Assert.assertEquals(10.0d, result.getScore(), 0.0d);
+  }
+
+  // This test is important enough that everyone should implement it, but is pretty specific to
+  // implementation
+  @Test
+  public abstract void shouldSearchByNestedAlert() throws Exception;
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void shouldUpdateMetaAlertOnAlertUpdate() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(2);
+    alerts.get(0).put(METAALERT_FIELD, Arrays.asList("meta_active", "meta_inactive"));
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Load metaAlerts
+    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive",
+        MetaAlertStatus.INACTIVE,
+        Optional.of(Collections.singletonList(alerts.get(0))));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Arrays.asList(activeMetaAlert, inactiveMetaAlert), getMetaAlertIndex(),
+        METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("meta_active", METAALERT_TYPE),
+        new GetRequest("meta_inactive", METAALERT_TYPE)));
+
+    {
+      // Modify the first message and add a new field
+      Map<String, Object> message0 = new HashMap<String, Object>(alerts.get(0)) {
+        {
+          put(NEW_FIELD, "metron");
+          put(THREAT_FIELD_DEFAULT, 10.0d);
+        }
+      };
+      String guid = "" + message0.get(Constants.GUID);
+      metaDao.update(new Document(message0, guid, SENSOR_NAME, null),
+          Optional.of(getTestIndexFullName()));
+
+      {
+        // Verify alerts are up-to-date
+        findUpdatedDoc(message0, guid, SENSOR_NAME);
+        long cnt = getMatchingAlertCount(NEW_FIELD, message0.get(NEW_FIELD));
+        if (cnt == 0) {
+          Assert.fail("Alert not updated!");
+        }
+      }
+
+      {
+        // Verify meta alerts are up-to-date
+        long cnt = getMatchingMetaAlertCount(NEW_FIELD, "metron");
+        if (cnt == 0) {
+          Assert.fail("Active metaalert was not updated!");
+        }
+        if (cnt != 1) {
+          Assert.fail("Metaalerts not updated correctly!");
+        }
+      }
+    }
+    //modify the same message and modify the new field
+    {
+      Map<String, Object> message0 = new HashMap<String, Object>(alerts.get(0)) {
+        {
+          put(NEW_FIELD, "metron2");
+        }
+      };
+      String guid = "" + message0.get(Constants.GUID);
+      metaDao.update(new Document(message0, guid, SENSOR_NAME, null), Optional.empty());
+
+      {
+        // Verify index is up-to-date
+        findUpdatedDoc(message0, guid, SENSOR_NAME);
+        long cnt = getMatchingAlertCount(NEW_FIELD, message0.get(NEW_FIELD));
+        if (cnt == 0) {
+          Assert.fail("Alert not updated!");
+        }
+      }
+      {
+        // Verify meta alerts are up-to-date
+        long cnt = getMatchingMetaAlertCount(NEW_FIELD, "metron2");
+        if (cnt == 0) {
+          Assert.fail("Active metaalert was not updated!");
+        }
+        if (cnt != 1) {
+          Assert.fail("Metaalerts not updated correctly!");
+        }
+      }
+    }
+  }
+
+  @Test
+  public void shouldThrowExceptionOnMetaAlertUpdate() throws Exception {
+    Document metaAlert = new Document(new HashMap<>(), "meta_alert", METAALERT_TYPE, 0L);
+    try {
+      // Verify a meta alert cannot be updated in the meta alert dao
+      metaDao.update(metaAlert, Optional.empty());
+      Assert.fail("Direct meta alert update should throw an exception");
+    } catch (UnsupportedOperationException uoe) {
+      Assert.assertEquals("Meta alerts cannot be directly updated", uoe.getMessage());
+    }
+  }
+
+  @Test
+  public void shouldPatchAllowedMetaAlerts() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(2);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(1).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    addRecords(alerts, getTestIndexFullName(), SENSOR_NAME);
+
+    // Put the nested type into the test index, so that it'll match appropriately
+    setupTypings();
+
+    // Load metaAlerts
+    Map<String, Object> metaAlert = buildMetaAlert("meta_alert", MetaAlertStatus.ACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1))));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Collections.singletonList(metaAlert), getMetaAlertIndex(), METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("meta_alert", METAALERT_TYPE)));
+
+    Map<String, Object> expectedMetaAlert = new HashMap<>(metaAlert);
+    expectedMetaAlert.put(NAME_FIELD, "New Meta Alert");
+    {
+      // Verify a patch to a field other than "status" or "alert" can be patched
+      String namePatch = namePatchRequest.replace(META_INDEX_FLAG, getMetaAlertIndex());
+      PatchRequest patchRequest = JSONUtils.INSTANCE.load(namePatch, PatchRequest.class);
+      metaDao.patch(metaDao, patchRequest, Optional.of(System.currentTimeMillis()));
+
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify a patch to an alert field should throw an exception
+      try {
+        String alertPatch = alertPatchRequest.replace(META_INDEX_FLAG, getMetaAlertIndex());
+        PatchRequest patchRequest = JSONUtils.INSTANCE.load(alertPatch, PatchRequest.class);
+        metaDao.patch(metaDao, patchRequest, Optional.of(System.currentTimeMillis()));
+
+        Assert.fail("A patch on the alert field should throw an exception");
+      } catch (IllegalArgumentException iae) {
+        Assert.assertEquals("Meta alert patches are not allowed for /alert or /status paths.  "
+                + "Please use the add/remove alert or update status functions instead.",
+            iae.getMessage());
+      }
+
+      // Verify the metaAlert was not updated
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+
+    {
+      // Verify a patch to a status field should throw an exception
+      try {
+        String statusPatch = statusPatchRequest
+            .replace(META_INDEX_FLAG, getMetaAlertIndex());
+        PatchRequest patchRequest = JSONUtils.INSTANCE.load(statusPatch, PatchRequest.class);
+        metaDao.patch(metaDao, patchRequest, Optional.of(System.currentTimeMillis()));
+
+        Assert.fail("A patch on the status field should throw an exception");
+      } catch (IllegalArgumentException iae) {
+        Assert.assertEquals("Meta alert patches are not allowed for /alert or /status paths.  "
+                + "Please use the add/remove alert or update status functions instead.",
+            iae.getMessage());
+      }
+
+      // Verify the metaAlert was not updated
+      findUpdatedDoc(expectedMetaAlert, "meta_alert", METAALERT_TYPE);
+    }
+  }
+
+  protected void findUpdatedDoc(Map<String, Object> message0, String guid, String sensorType)
+      throws InterruptedException, IOException, OriginalNotFoundException {
+    commit();
+    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
+      Document doc = metaDao.getLatest(guid, sensorType);
+      // Change the underlying document alerts lists to sets to avoid ordering issues.
+      convertAlertsFieldToSet(doc.getDocument());
+      convertAlertsFieldToSet(message0);
+
+      if (doc.getDocument() != null && message0.equals(doc.getDocument())) {
+        convertAlertsFieldToList(doc.getDocument());
+        convertAlertsFieldToList(message0);
+        return;
+      }
+    }
+
+    throw new OriginalNotFoundException(
+        "Count not find " + guid + " after " + MAX_RETRIES + " tries");
+  }
+
+  protected void convertAlertsFieldToSet(Map<String, Object> document) {
+    if (document.get(ALERT_FIELD) instanceof List) {
+      @SuppressWarnings("unchecked")
+      List<Map<String, Object>> message0AlertField = (List<Map<String, Object>>) document
+          .get(ALERT_FIELD);
+      Set<Map<String, Object>> message0AlertSet = new HashSet<>(message0AlertField);
+      document.put(ALERT_FIELD, message0AlertSet);
+    }
+  }
+
+  protected void convertAlertsFieldToList(Map<String, Object> document) {
+    if (document.get(ALERT_FIELD) instanceof Set) {
+      @SuppressWarnings("unchecked")
+      Set<Map<String, Object>> message0AlertField = (Set<Map<String, Object>>) document
+          .get(ALERT_FIELD);
+      List<Map<String, Object>> message0AlertList = new ArrayList<>(message0AlertField);
+      message0AlertList.sort(Comparator.comparing(o -> ((String) o.get(Constants.GUID))));
+      document.put(ALERT_FIELD, message0AlertList);
+    }
+  }
+
+  protected boolean findCreatedDoc(String guid, String sensorType)
+      throws InterruptedException, IOException, OriginalNotFoundException {
+    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
+      Document doc = metaDao.getLatest(guid, sensorType);
+      if (doc != null) {
+        return true;
+      }
+    }
+    throw new OriginalNotFoundException(
+        "Count not find " + guid + " after " + MAX_RETRIES + "tries");
+  }
+
+  protected boolean findCreatedDocs(List<GetRequest> getRequests)
+      throws InterruptedException, IOException, OriginalNotFoundException {
+    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
+      Iterable<Document> docs = metaDao.getAllLatest(getRequests);
+      if (docs != null) {
+        int docCount = 0;
+        for (Document doc : docs) {
+          docCount++;
+        }
+        if (getRequests.size() == docCount) {
+          return true;
+        }
+      }
+    }
+    throw new OriginalNotFoundException("Count not find guids after " + MAX_RETRIES + "tries");
+  }
+
+  protected List<Map<String, Object>> buildAlerts(int count) {
+    List<Map<String, Object>> inputData = new ArrayList<>();
+    for (int i = 0; i < count; ++i) {
+      final String guid = "message_" + i;
+      Map<String, Object> alerts = new HashMap<>();
+      alerts.put(Constants.GUID, guid);
+      alerts.put(getSourceTypeField(), SENSOR_NAME);
+      alerts.put(THREAT_FIELD_DEFAULT, (double) i);
+      alerts.put("timestamp", System.currentTimeMillis());
+      inputData.add(alerts);
+    }
+    return inputData;
+  }
+
+  protected List<Map<String, Object>> buildMetaAlerts(int count, MetaAlertStatus status,
+      Optional<List<Map<String, Object>>> alerts) {
+    List<Map<String, Object>> inputData = new ArrayList<>();
+    for (int i = 0; i < count; ++i) {
+      final String guid = "meta_" + status.getStatusString() + "_" + i;
+      inputData.add(buildMetaAlert(guid, status, alerts));
+    }
+    return inputData;
+  }
+
+  protected Map<String, Object> buildMetaAlert(String guid, MetaAlertStatus status,
+      Optional<List<Map<String, Object>>> alerts) {
+    Map<String, Object> metaAlert = new HashMap<>();
+    metaAlert.put(Constants.GUID, guid);
+    metaAlert.put(getSourceTypeField(), METAALERT_TYPE);
+    metaAlert.put(STATUS_FIELD, status.getStatusString());
+    if (alerts.isPresent()) {
+      List<Map<String, Object>> alertsList = alerts.get();
+      metaAlert.put(ALERT_FIELD, alertsList);
+    }
+    return metaAlert;
+  }
+
+  protected abstract long getMatchingAlertCount(String fieldName, Object fieldValue)
+      throws IOException, InterruptedException;
+
+  protected abstract void addRecords(List<Map<String, Object>> inputData, String index,
+      String docType) throws IOException;
+
+  protected abstract long getMatchingMetaAlertCount(String fieldName, String fieldValue)
+      throws IOException, InterruptedException;
+
+  protected abstract void setupTypings();
+
+  // Get the base index name without any adjustments (e.g. without ES's "_index")
+  protected abstract String getTestIndexName();
+
+  // Get the full name of the test index.  E.g. Elasticsearch appends "_index"
+  protected String getTestIndexFullName() {
+    return getTestIndexName();
+  }
+
+  protected abstract String getMetaAlertIndex();
+
+  protected abstract String getSourceTypeField();
+
+  protected String getThreatTriageField() {
+    return THREAT_FIELD_DEFAULT;
+  }
+
+  // Allow for impls to do any commit they need to do.
+  protected void commit() throws IOException {
+  }
+
+  // Different stores can have different representations of empty metaalerts field.
+  // E.g. Solr expects the field to not be present, ES expects it to be empty.
+  protected abstract void setEmptiedMetaAlertField(Map<String, Object> docMap);
+
+  // Different stores may choose to store non finite double values as Strings.
+  // E.g. NaN may be a string, not a double value.
+  protected abstract boolean isFiniteDoubleOnly();
+
+  // Different stores may choose to return empty alerts lists differently.
+  // E.g. It may be missing completely, or may be an empty list
+  protected abstract boolean isEmptyMetaAlertList();
+}
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaScoresTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaScoresTest.java
new file mode 100644
index 0000000..6ebfad8
--- /dev/null
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/MetaScoresTest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert;
+
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.ALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_FIELD_DEFAULT;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_SORT_DEFAULT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.update.Document;
+import org.junit.Test;
+
+public class MetaScoresTest {
+  @Test
+  public void testCalculateMetaScoresList() {
+    final double delta = 0.001;
+    List<Map<String, Object>> alertList = new ArrayList<>();
+
+    // add an alert with a threat score
+    alertList.add(Collections.singletonMap(THREAT_FIELD_DEFAULT, 10.0f));
+
+    // add a second alert with a threat score
+    alertList.add(Collections.singletonMap(THREAT_FIELD_DEFAULT, 20.0f));
+
+    // add a third alert with NO threat score
+    alertList.add(Collections.singletonMap("alert3", "has no threat score"));
+
+    // create the metaalert
+    Map<String, Object> docMap = new HashMap<>();
+    docMap.put(ALERT_FIELD, alertList);
+    Document metaalert = new Document(docMap, "guid", METAALERT_TYPE, 0L);
+
+    // calculate the threat score for the metaalert
+    MetaScores.calculateMetaScores(metaalert, THREAT_FIELD_DEFAULT, THREAT_SORT_DEFAULT);
+
+    // the metaalert must contain a summary of all child threat scores
+    assertEquals(20D, (Double) metaalert.getDocument().get("max"), delta);
+    assertEquals(10D, (Double) metaalert.getDocument().get("min"), delta);
+    assertEquals(15D, (Double) metaalert.getDocument().get("average"), delta);
+    assertEquals(2L, metaalert.getDocument().get("count"));
+    assertEquals(30D, (Double) metaalert.getDocument().get("sum"), delta);
+    assertEquals(15D, (Double) metaalert.getDocument().get("median"), delta);
+
+    // it must contain an overall threat score; a float to match the type of the threat score of
+    // the other sensor indices
+    Object threatScore = metaalert.getDocument().get(THREAT_FIELD_DEFAULT);
+    assertTrue(threatScore instanceof Float);
+
+    // by default, the overall threat score is the sum of all child threat scores
+    assertEquals(30.0F, threatScore);
+  }
+
+  @Test
+  public void testCalculateMetaScoresWithDifferentFieldName() {
+    List<Map<String, Object>> alertList = new ArrayList<>();
+
+    // add an alert with a threat score
+    alertList.add( Collections.singletonMap(MetaAlertConstants.THREAT_FIELD_DEFAULT, 10.0f));
+
+    // create the metaalert
+    Map<String, Object> docMap = new HashMap<>();
+    docMap.put(MetaAlertConstants.ALERT_FIELD, alertList);
+    Document metaalert = new Document(docMap, "guid", MetaAlertConstants.METAALERT_TYPE, 0L);
+
+    // Configure a different threat triage score field name
+    AccessConfig accessConfig = new AccessConfig();
+    accessConfig.setGlobalConfigSupplier(() -> new HashMap<String, Object>() {{
+      put(Constants.THREAT_SCORE_FIELD_PROPERTY, MetaAlertConstants.THREAT_FIELD_DEFAULT);
+    }});
+
+    MetaScores.calculateMetaScores(metaalert, MetaAlertConstants.THREAT_FIELD_DEFAULT, MetaAlertConstants.THREAT_SORT_DEFAULT);
+    assertNotNull(metaalert.getDocument().get(MetaAlertConstants.THREAT_FIELD_DEFAULT));
+  }
+}
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDaoTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDaoTest.java
new file mode 100644
index 0000000..5a70636
--- /dev/null
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/dao/metaalert/lucene/AbstractLuceneMetaAlertUpdateDaoTest.java
@@ -0,0 +1,885 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.dao.metaalert.lucene;
+
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.ALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.GROUPS_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.STATUS_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_FIELD_DEFAULT;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_SORT_DEFAULT;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertStatus.ACTIVE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertStatus.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.UUID;
+
+import com.google.common.collect.ImmutableMap;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.commons.math.util.MathUtils;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.Constants.Fields;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertRetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.metaalert.MetaScores;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.class)
+public class AbstractLuceneMetaAlertUpdateDaoTest {
+
+  @Mock
+  IndexDao indexDao;
+
+  @Before
+  public void setup() {
+    dao = new TestLuceneMetaAlertUpdateDao();
+  }
+
+  private static final double EPS = 0.00001;
+  private static final String METAALERT_INDEX = "metaalert_index";
+  private static final String METAALERT_GUID = "meta_0";
+  private static final String DEFAULT_PREFIX = "child_";
+  private static final MetaAlertConfig TEST_CONFIG =
+          new MetaAlertConfig(METAALERT_INDEX
+                             , THREAT_SORT_DEFAULT
+                             , () -> ImmutableMap.of(Constants.SENSOR_TYPE_FIELD_PROPERTY, Constants.SENSOR_TYPE
+                                                    , Constants.THREAT_SCORE_FIELD_PROPERTY, THREAT_FIELD_DEFAULT
+                                                    )
+          ) {
+
+            @Override
+            protected String getDefaultThreatTriageField() {
+              return THREAT_FIELD_DEFAULT.replace(':', '.');
+            }
+
+            @Override
+            protected String getDefaultSourceTypeField() {
+              return Constants.SENSOR_TYPE;
+            }
+          };
+
+  private static Map<String, Document> documents = new HashMap<>();
+
+  static {
+    Document active = new Document(
+        new HashMap<>(),
+        ACTIVE.getStatusString(),
+        METAALERT_TYPE,
+        0L
+    );
+    documents.put(ACTIVE.getStatusString(), active);
+
+    Document inactive = new Document(
+        new HashMap<>(),
+        INACTIVE.getStatusString(),
+        METAALERT_TYPE,
+        0L
+    );
+    inactive.getDocument().put(
+        STATUS_FIELD,
+        INACTIVE.getStatusString()
+    );
+    documents.put(INACTIVE.getStatusString(), inactive);
+  }
+
+  TestMetaAlertRetrieveLatestDao retrieveLatestDao = new TestMetaAlertRetrieveLatestDao();
+
+  private class TestMetaAlertRetrieveLatestDao implements MetaAlertRetrieveLatestDao {
+
+    @Override
+    public Document getLatest(String guid, String sensorType) {
+      return documents.get(guid);
+    }
+
+    @Override
+    public Iterable<Document> getAllLatest(List<GetRequest> getRequests) {
+      return null;
+    }
+  }
+
+  TestLuceneMetaAlertUpdateDao dao = new TestLuceneMetaAlertUpdateDao();
+
+  private class TestLuceneMetaAlertUpdateDao extends AbstractLuceneMetaAlertUpdateDao {
+
+    TestLuceneMetaAlertUpdateDao() {
+      super(indexDao, retrieveLatestDao, TEST_CONFIG);
+    }
+
+    @Override
+    public void update(Document update, Optional<String> index) {
+    }
+
+    @Override
+    public void addCommentToAlert(CommentAddRemoveRequest request) {
+    }
+
+    @Override
+    public void removeCommentFromAlert(CommentAddRemoveRequest request) {
+    }
+
+    @Override
+    public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) {
+    }
+
+    @Override
+    public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) {
+    }
+
+    @Override
+    public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+        Optional<Long> timestamp) {
+    }
+
+    @Override
+    public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request) {
+      return null;
+    }
+
+    @Override
+    public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests) {
+      return false;
+    }
+
+    @Override
+    public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status) {
+      return false;
+    }
+  }
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "metaalert_index",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/metron_alert",
+   "value": []
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String alertPatchRequest;
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "metaalert_index",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/status",
+   "value": []
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String statusPatchRequest;
+
+  /**
+   {
+   "guid": "meta_alert",
+   "index": "metaalert_index",
+   "patch": [
+   {
+   "op": "add",
+   "path": "/name",
+   "value": []
+   }
+   ],
+   "sensorType": "metaalert"
+   }
+   */
+  @Multiline
+  public static String namePatchRequest;
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  @Test(expected = UnsupportedOperationException.class)
+  public void testBatchUpdateThrowsException() {
+    dao.batchUpdate(null);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testPatchNotAllowedAlert() throws ParseException {
+    PatchRequest pr = new PatchRequest();
+    Map<String, Object> patch = (JSONObject) new JSONParser().parse(alertPatchRequest);
+    pr.setPatch(Collections.singletonList((JSONObject) ((JSONArray) patch.get("patch")).get(0)));
+    assertFalse(dao.isPatchAllowed(pr));
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testPatchNotAllowedStatus() throws ParseException {
+    PatchRequest pr = new PatchRequest();
+    Map<String, Object> patch = (JSONObject) new JSONParser().parse(statusPatchRequest);
+    pr.setPatch(Collections.singletonList((JSONObject) ((JSONArray) patch.get("patch")).get(0)));
+    assertFalse(dao.isPatchAllowed(pr));
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testPatchAllowedName() throws ParseException {
+    PatchRequest pr = new PatchRequest();
+    Map<String, Object> patch = (JSONObject) new JSONParser().parse(namePatchRequest);
+    pr.setPatch(Collections.singletonList((JSONObject) ((JSONArray) patch.get("patch")).get(0)));
+    assertTrue(dao.isPatchAllowed(pr));
+  }
+
+  @Test
+  public void testUpdateSingle() throws IOException {
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    Document document = new Document(new HashMap<>(), "guid", "sensor", 0L);
+    updates.put(document, Optional.empty());
+    dao.update(updates);
+    verify(indexDao, times(1)).update(document, Optional.empty());
+  }
+
+  @Test
+  public void testUpdateMultiple() throws IOException {
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    Document documentOne = new Document(new HashMap<>(), "guid", "sensor", 0L);
+    updates.put(documentOne, Optional.empty());
+    Document documentTwo = new Document(new HashMap<>(), "guid2", "sensor", 0L);
+    updates.put(documentTwo, Optional.empty());
+    dao.update(updates);
+    verify(indexDao, times(1)).batchUpdate(updates);
+  }
+
+  @Test
+  public void testBuildAddAlertToMetaAlertUpdatesEmpty() {
+    Document metaDoc = new Document(
+        new HashMap<>(),
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+    metaDoc.getDocument().put(
+        ALERT_FIELD,
+        getRawMaps(buildChildAlerts(1, METAALERT_GUID, null))
+    );
+    Map<Document, Optional<String>> actual = dao
+        .buildAddAlertToMetaAlertUpdates(metaDoc, new ArrayList<>());
+    assertEquals(0, actual.size());
+  }
+
+  @Test
+  public void testBuildAddAlertToMetaAlertUpdates() {
+    List<Document> alerts = buildChildAlerts(1, METAALERT_GUID, null);
+
+    Document metaDoc = buildMetaAlert(alerts);
+
+    List<Document> newAlerts = buildChildAlerts(2, null, "new_");
+    Map<Document, Optional<String>> actual = dao
+        .buildAddAlertToMetaAlertUpdates(metaDoc, newAlerts);
+    assertEquals(3, actual.size());
+
+    HashMap<String, Object> expectedExistingAlert = new HashMap<>();
+    expectedExistingAlert.put(Constants.GUID, "child_0");
+    expectedExistingAlert.put(METAALERT_FIELD, Collections.singletonList(METAALERT_GUID));
+    expectedExistingAlert.put(THREAT_FIELD_DEFAULT, 0.0f);
+
+    List<Map<String, Object>> expectedAlerts = new ArrayList<>();
+    expectedAlerts.add(expectedExistingAlert);
+    expectedAlerts.addAll(getRawMaps(newAlerts));
+
+    List<Double> scores = new ArrayList<>();
+    scores.add(0.0d);
+    scores.add(0.0d);
+    scores.add(0.0d);
+
+    Map<String, Object> expectedMetaAlertMap = new HashMap<>();
+    expectedMetaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    expectedMetaAlertMap.put(ALERT_FIELD, expectedAlerts);
+    expectedMetaAlertMap.put(THREAT_FIELD_DEFAULT, 0.0f);
+
+    expectedMetaAlertMap.putAll(new MetaScores(scores).getMetaScores());
+    Document expectedMetaAlertDoc = new Document(expectedMetaAlertMap, METAALERT_GUID,
+        METAALERT_TYPE,
+        0L);
+
+    Map<Document, Optional<String>> expected = new HashMap<>();
+    expected.put(expectedMetaAlertDoc, Optional.of(METAALERT_INDEX));
+    expected.put(newAlerts.get(0), Optional.empty());
+    expected.put(newAlerts.get(1), Optional.empty());
+
+    assertTrue(updatesMapEquals(expected, actual));
+  }
+
+  @Test
+  public void testRemoveAlertsFromMetaAlert() throws IOException {
+    List<Document> alerts = buildChildAlerts(3, METAALERT_GUID, null);
+
+    Document metaDoc = buildMetaAlert(alerts);
+
+    List<Document> deletedAlerts = new ArrayList<>();
+    deletedAlerts.add(alerts.get(0));
+    deletedAlerts.add(alerts.get(2));
+
+    Map<Document, Optional<String>> actual = dao
+        .buildRemoveAlertsFromMetaAlert(metaDoc, deletedAlerts);
+    assertEquals(3, actual.size());
+
+    Map<String, Object> expectedDeletedAlert = new HashMap<>();
+    expectedDeletedAlert.put(Constants.GUID, "child_0");
+    expectedDeletedAlert.put(THREAT_FIELD_DEFAULT, 0.0f);
+    expectedDeletedAlert
+        .put(MetaAlertConstants.METAALERT_FIELD, new ArrayList<>());
+    Document expectedDeletedDocument = new Document(expectedDeletedAlert, "child_0", "test", 0L);
+
+    Map<String, Object> expectedDeletedAlert3 = new HashMap<>();
+    expectedDeletedAlert3.put(Constants.GUID, "child_2");
+    expectedDeletedAlert3.put(THREAT_FIELD_DEFAULT, 0.0f);
+    expectedDeletedAlert3
+        .put(MetaAlertConstants.METAALERT_FIELD, new ArrayList<>());
+    Document expectedDeletedDocument2 = new Document(expectedDeletedAlert3, "child_2", "test", 0L);
+
+    List<Map<String, Object>> expectedAlerts = new ArrayList<>();
+    expectedAlerts.add(alerts.get(1).getDocument());
+
+    Map<String, Object> expectedMetaAlertMap = new HashMap<>();
+    expectedMetaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    expectedMetaAlertMap.put(ALERT_FIELD, expectedAlerts);
+    expectedMetaAlertMap.put(THREAT_FIELD_DEFAULT, 0.0f);
+    expectedMetaAlertMap.putAll(new MetaScores(Collections.singletonList(0.0d)).getMetaScores());
+    Document expectedMetaAlertDoc = new Document(expectedMetaAlertMap, METAALERT_GUID,
+        METAALERT_TYPE,
+        0L);
+
+    Map<Document, Optional<String>> expected = new HashMap<>();
+    expected.put(expectedDeletedDocument, Optional.empty());
+    expected.put(expectedDeletedDocument2, Optional.empty());
+    expected.put(expectedMetaAlertDoc, Optional.of(METAALERT_INDEX));
+
+    assertTrue(updatesMapEquals(expected, actual));
+  }
+
+  @Test
+  public void testBuildRemoveAlertsFromMetaAlertThrowsException() throws Exception {
+    thrown.expect(IllegalStateException.class);
+    thrown.expectMessage("Removing these alerts will result in an empty meta alert.  Empty meta alerts are not allowed.");
+
+    List<Document> alerts = buildChildAlerts(1, METAALERT_GUID, null);
+    Document metaDoc = buildMetaAlert(alerts);
+
+    dao.buildRemoveAlertsFromMetaAlert(metaDoc, alerts);
+  }
+
+  @Test
+  public void testRemoveAlertsFromMetaAlertNoChildAlerts() {
+    Document empty = new Document(new HashMap<>(), "empty", METAALERT_TYPE, 0L);
+    boolean actual = dao.removeAlertsFromMetaAlert(empty, Collections.singletonList("child"));
+    assertFalse(actual);
+  }
+
+  @Test
+  public void testRemoveAlertsFromMetaAlertEmptyRemoveList() {
+    Document metaDoc = new Document(
+        new HashMap<>(),
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+    metaDoc.getDocument().put(
+        STATUS_FIELD,
+        ACTIVE.getStatusString()
+    );
+    metaDoc.getDocument().put(
+        ALERT_FIELD,
+        new HashMap<String, Object>() {{
+          put(Constants.GUID, "child_0");
+        }}
+    );
+    boolean actual = dao.removeAlertsFromMetaAlert(metaDoc, new ArrayList<>());
+    assertFalse(actual);
+  }
+
+  @Test
+  public void testRemoveAlertsFromMetaAlertEmptyRemoveSingle() {
+    Document metaDoc = new Document(
+        new HashMap<>(),
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+    metaDoc.getDocument().put(
+        STATUS_FIELD,
+        ACTIVE.getStatusString()
+    );
+    List<Map<String, Object>> alerts = new ArrayList<>();
+    alerts.add(new HashMap<String, Object>() {{
+      put(Constants.GUID, "child_0");
+    }});
+    metaDoc.getDocument().put(
+        ALERT_FIELD,
+        alerts
+    );
+    boolean actual = dao.removeAlertsFromMetaAlert(metaDoc, Collections.singletonList("child_0"));
+
+    Document expected = new Document(
+        new HashMap<>(),
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+    expected.getDocument().put(
+        STATUS_FIELD,
+        ACTIVE.getStatusString()
+    );
+    expected.getDocument().put(ALERT_FIELD, new ArrayList<>());
+    assertTrue(actual);
+    assertEquals(expected, metaDoc);
+  }
+
+  @Test
+  public void testBuildStatusChangeUpdatesToInactive() {
+    List<Document> alerts = buildChildAlerts(2, METAALERT_GUID, null);
+
+    Map<String, Object> metaAlertMap = new HashMap<>();
+    metaAlertMap.put(ALERT_FIELD, getRawMaps(alerts));
+    metaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    metaAlertMap.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
+    Document metaDoc = new Document(
+        metaAlertMap,
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+
+    Map<Document, Optional<String>> actual = dao
+        .buildStatusChangeUpdates(metaDoc, alerts, MetaAlertStatus.INACTIVE);
+    assertEquals(3, actual.size());
+
+    List<Document> expectedDeletedAlerts = buildChildAlerts(2, null, null);
+    List<Map<String, Object>> expectedAlerts = new ArrayList<>();
+    expectedAlerts.add(alerts.get(0).getDocument());
+    expectedAlerts.add(alerts.get(1).getDocument());
+
+    Map<String, Object> expectedMetaAlertMap = new HashMap<>();
+    expectedMetaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    expectedMetaAlertMap.put(ALERT_FIELD, expectedAlerts);
+    expectedMetaAlertMap.put(STATUS_FIELD, MetaAlertStatus.INACTIVE.getStatusString());
+    Document expectedMetaAlertDoc = new Document(expectedMetaAlertMap, METAALERT_GUID,
+        METAALERT_TYPE,
+        0L);
+
+    Map<Document, Optional<String>> expected = new HashMap<>();
+    expected.put(expectedMetaAlertDoc, Optional.of(METAALERT_INDEX));
+    expected.put(expectedDeletedAlerts.get(0), Optional.empty());
+    expected.put(expectedDeletedAlerts.get(1), Optional.empty());
+
+    assertTrue(updatesMapEquals(expected, actual));
+  }
+
+  @Test
+  public void testBuildStatusChangeUpdatesToActive() {
+    List<Document> alerts = buildChildAlerts(2, METAALERT_GUID, null);
+
+    Map<String, Object> metaAlertMap = new HashMap<>();
+    metaAlertMap.put(ALERT_FIELD, getRawMaps(alerts));
+    metaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    metaAlertMap.put(STATUS_FIELD, MetaAlertStatus.INACTIVE.getStatusString());
+    Document metaDoc = new Document(
+        metaAlertMap,
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+
+    Map<Document, Optional<String>> actual = dao.buildStatusChangeUpdates(
+        metaDoc,
+        alerts,
+        MetaAlertStatus.ACTIVE
+    );
+
+    List<Map<String, Object>> expectedAlerts = new ArrayList<>();
+    expectedAlerts.add(alerts.get(0).getDocument());
+    expectedAlerts.add(alerts.get(1).getDocument());
+
+    Map<String, Object> expectedMetaAlertMap = new HashMap<>();
+    expectedMetaAlertMap.put(ALERT_FIELD, expectedAlerts);
+    expectedMetaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    expectedMetaAlertMap.put(STATUS_FIELD, MetaAlertStatus.ACTIVE.getStatusString());
+    Document expectedMetaAlertDoc = new Document(
+        expectedMetaAlertMap,
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+
+    Map<Document, Optional<String>> expected = new HashMap<>();
+    expected.put(expectedMetaAlertDoc, Optional.of(METAALERT_INDEX));
+
+    assertTrue(updatesMapEquals(expected, actual));
+  }
+
+  @Test
+  public void testRemoveAlertsFromMetaAlertEmptyRemoveMultiple() {
+    Document metDoc = new Document(new HashMap<>(), METAALERT_GUID, METAALERT_TYPE, 0L);
+    metDoc.getDocument().put(STATUS_FIELD, ACTIVE.getStatusString());
+    List<Document> alerts = buildChildAlerts(3, null, null);
+    metDoc.getDocument().put(ALERT_FIELD, getRawMaps(alerts));
+    List<String> removeGuids = new ArrayList<>();
+    removeGuids.add("child_0");
+    removeGuids.add("child_2");
+    removeGuids.add("child_doesn't_exist");
+
+    boolean actual = dao.removeAlertsFromMetaAlert(metDoc, removeGuids);
+
+    // Build the expected metaalert
+    Document expected = new Document(new HashMap<>(), METAALERT_GUID, METAALERT_TYPE, 0L);
+    expected.getDocument().put(STATUS_FIELD, ACTIVE.getStatusString());
+    List<Map<String, Object>> alertsExpected = new ArrayList<>();
+    alertsExpected.add(new HashMap<String, Object>() {{
+                         put(METAALERT_FIELD, new ArrayList<>());
+                         put(Constants.GUID, "child_1");
+                         put(THREAT_FIELD_DEFAULT, 0.0f);
+                       }}
+    );
+
+    expected.getDocument().put(ALERT_FIELD, alertsExpected);
+    assertEquals(expected, metDoc);
+    assertTrue(actual);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testRemoveAlertsFromMetaAlertInactive() throws IOException {
+    dao.removeAlertsFromMetaAlert(INACTIVE.getStatusString(), null);
+  }
+
+  @Test
+  public void testRemoveMetaAlertFromAlertSuccess() {
+    List<String> metaAlertGuids = new ArrayList<>();
+    metaAlertGuids.add("metaalert1");
+    metaAlertGuids.add("metaalert2");
+    Map<String, Object> alertFields = new HashMap<>();
+    alertFields.put(METAALERT_FIELD, metaAlertGuids);
+    Document alert = new Document(alertFields, "alert", "test", 0L);
+
+    Document expected = new Document(new HashMap<>(), "alert", "test", 0L);
+    List<String> expectedMetaAlertGuids = new ArrayList<>();
+    expectedMetaAlertGuids.add("metaalert2");
+    expected.getDocument().put(METAALERT_FIELD, expectedMetaAlertGuids);
+
+    boolean actual = dao.removeMetaAlertFromAlert("metaalert1", alert);
+    assertTrue(actual);
+    assertEquals(expected, alert);
+  }
+
+  @Test
+  public void testRemoveMetaAlertFromAlertMissing() {
+    List<String> metaAlertGuids = new ArrayList<>();
+    metaAlertGuids.add("metaalert1");
+    metaAlertGuids.add("metaalert2");
+    Map<String, Object> alertFields = new HashMap<>();
+    alertFields.put(METAALERT_FIELD, metaAlertGuids);
+    Document alert = new Document(alertFields, "alert", "test", 0L);
+
+    boolean actual = dao.removeMetaAlertFromAlert("metaalert3", alert);
+    assertFalse(actual);
+  }
+
+  @Test
+  public void testAddMetaAlertToAlertEmpty() {
+    Map<String, Object> alertFields = new HashMap<>();
+    alertFields.put(METAALERT_FIELD, new ArrayList<>());
+    Document alert = new Document(alertFields, "alert", "test", 0L);
+
+    Document expected = new Document(new HashMap<>(), "alert", "test", 0L);
+    List<String> expectedMetaAlertGuids = new ArrayList<>();
+    expectedMetaAlertGuids.add("metaalert1");
+    expected.getDocument().put(METAALERT_FIELD, expectedMetaAlertGuids);
+
+    boolean actual = dao.addMetaAlertToAlert("metaalert1", alert);
+    assertTrue(actual);
+    assertEquals(expected, alert);
+  }
+
+  @Test
+  public void testAddMetaAlertToAlertNonEmpty() {
+    List<String> metaAlertGuids = new ArrayList<>();
+    metaAlertGuids.add("metaalert1");
+    Map<String, Object> alertFields = new HashMap<>();
+    alertFields.put(METAALERT_FIELD, metaAlertGuids);
+    Document alert = new Document(alertFields, "alert", "test", 0L);
+
+    Document expected = new Document(new HashMap<>(), "alert", "test", 0L);
+    List<String> expectedMetaAlertGuids = new ArrayList<>();
+    expectedMetaAlertGuids.add("metaalert1");
+    expectedMetaAlertGuids.add("metaalert2");
+    expected.getDocument().put(METAALERT_FIELD, expectedMetaAlertGuids);
+
+    boolean actual = dao.addMetaAlertToAlert("metaalert2", alert);
+    assertTrue(actual);
+    assertEquals(expected, alert);
+  }
+
+  @Test
+  public void testAddMetaAlertToAlertDuplicate() {
+    List<String> metaAlertGuids = new ArrayList<>();
+    metaAlertGuids.add("metaalert1");
+    Map<String, Object> alertFields = new HashMap<>();
+    alertFields.put(METAALERT_FIELD, metaAlertGuids);
+    Document alert = new Document(alertFields, "alert", "test", 0L);
+
+    boolean actual = dao.addMetaAlertToAlert("metaalert1", alert);
+    assertFalse(actual);
+  }
+
+  @Test
+  public void testBuildCreateDocumentSingleAlert() {
+    List<String> groups = new ArrayList<>();
+    groups.add("group_one");
+    groups.add("group_two");
+
+    // Build the first response from the multiget
+    Map<String, Object> alertOne = new HashMap<>();
+    alertOne.put(Constants.GUID, "alert_one");
+    alertOne.put(THREAT_FIELD_DEFAULT, 10.0d);
+    List<Document> alerts = new ArrayList<Document>() {{
+      add(new Document(alertOne, "", "", 0L));
+    }};
+
+    // Actually build the doc
+    Document actual = dao.buildCreateDocument(alerts, groups, ALERT_FIELD);
+
+    ArrayList<Map<String, Object>> alertList = new ArrayList<>();
+    alertList.add(alertOne);
+
+    Map<String, Object> actualDocument = actual.getDocument();
+    assertEquals(
+        MetaAlertStatus.ACTIVE.getStatusString(),
+        actualDocument.get(STATUS_FIELD)
+    );
+    assertEquals(
+        alertList,
+        actualDocument.get(ALERT_FIELD)
+    );
+    assertEquals(
+        groups,
+        actualDocument.get(GROUPS_FIELD)
+    );
+
+    // Don't care about the result, just that it's a UUID. Exception will be thrown if not.
+    UUID.fromString((String) actualDocument.get(Constants.GUID));
+  }
+
+  @Test
+  public void testBuildCreateDocumentMultipleAlerts() {
+    List<String> groups = new ArrayList<>();
+    groups.add("group_one");
+    groups.add("group_two");
+
+    // Build the first response from the multiget
+    Map<String, Object> alertOne = new HashMap<>();
+    alertOne.put(Constants.GUID, "alert_one");
+    alertOne.put(THREAT_FIELD_DEFAULT, 10.0d);
+
+    // Build the second response from the multiget
+    Map<String, Object> alertTwo = new HashMap<>();
+    alertTwo.put(Constants.GUID, "alert_one");
+    alertTwo.put(THREAT_FIELD_DEFAULT, 5.0d);
+    List<Document> alerts = new ArrayList<>();
+    alerts.add(new Document(alertOne, "", "", 0L));
+    alerts.add(new Document(alertTwo, "", "", 0L));
+
+    // Actually build the doc
+    Document actual = dao.buildCreateDocument(alerts, groups, ALERT_FIELD);
+
+    ArrayList<Map<String, Object>> alertList = new ArrayList<>();
+    alertList.add(alertOne);
+    alertList.add(alertTwo);
+
+    Map<String, Object> actualDocument = actual.getDocument();
+    assertNotNull(actualDocument.get(Fields.TIMESTAMP.getName()));
+    assertEquals(
+        alertList,
+        actualDocument.get(ALERT_FIELD)
+    );
+    assertEquals(
+        groups,
+        actualDocument.get(GROUPS_FIELD)
+    );
+
+    // Don't care about the result, just that it's a UUID. Exception will be thrown if not.
+    UUID.fromString((String) actualDocument.get(Constants.GUID));
+  }
+
+  // Utility method to manage comparing update maps
+  protected boolean updatesMapEquals(Map<Document, Optional<String>> expected,
+      Map<Document, Optional<String>> actual) {
+    Entry<Document, Optional<String>> expectedMetaEntry;
+    Entry<Document, Optional<String>> actualMetaEntry;
+
+    expectedMetaEntry = findMetaEntry(expected);
+    actualMetaEntry = findMetaEntry(actual);
+
+    // Compare the metaalerts directly: they can mess with comparison because of float scores.
+    if (!metaAlertDocumentEquals(expectedMetaEntry.getKey(), actualMetaEntry.getKey())) {
+      return false;
+    } else {
+      // Remove the potentially problematic metaalert comparison.
+      return removeMetaEntry(expected).equals(removeMetaEntry(actual));
+    }
+  }
+
+  protected Entry<Document, Optional<String>> findMetaEntry(
+      Map<Document, Optional<String>> expected) {
+    for (Entry<Document, Optional<String>> entry : expected.entrySet()) {
+      if (entry.getKey().getSensorType().equals(METAALERT_TYPE)) {
+        return entry;
+      }
+    }
+    return null;
+  }
+
+  // Unfortunately, the floating point comparison problem prevents direct remove call.
+  protected Map<Document, Optional<String>> removeMetaEntry(
+      Map<Document, Optional<String>> updates) {
+    Map<Document, Optional<String>> filteredUpdates = new HashMap<>();
+    for (Entry<Document, Optional<String>> entry : updates.entrySet()) {
+      if (!(entry.getKey().getSensorType().equals(METAALERT_TYPE))) {
+        filteredUpdates.put(entry.getKey(), entry.getValue());
+      }
+    }
+    return filteredUpdates;
+  }
+
+
+  // Utility method to ensure that the floating point values contained in a metaalert don't get
+  // incorrectly evaluated as not equal.
+  private boolean metaAlertDocumentEquals(Document expected, Document actual) {
+    if (!expected.getGuid().equals(actual.getGuid())) {
+      return false;
+    }
+    if (!expected.getSensorType().equals(actual.getSensorType())) {
+      return false;
+    }
+    if (!expected.getTimestamp().equals(actual.getTimestamp())) {
+      return false;
+    }
+
+    // The underlying documents have to be compared more thoroughly since it has floating point
+    Map<String, Object> expectedDocument = expected.getDocument();
+    Map<String, Object> actualDocument = actual.getDocument();
+
+    if (expectedDocument.size() != actualDocument.size()) {
+      return false;
+    }
+
+    for (Entry<String, Object> entry : expectedDocument.entrySet()) {
+      Object value = entry.getValue();
+      Object actualValue = actual.getDocument().get(entry.getKey());
+      if (value instanceof Float) {
+        if (!MathUtils.equals((Float) value, (Float) actualValue, EPS)) {
+          return false;
+        }
+      } else if (value instanceof Double) {
+        if (!MathUtils.equals((Double) value, (Double) actualValue, EPS)) {
+          return false;
+        }
+      } else {
+        if (!value.equals(actual.getDocument().get(entry.getKey()))) {
+          return false;
+        }
+      }
+    }
+
+    return true;
+  }
+
+  // Generate some child alerts.
+  protected List<Document> buildChildAlerts(int num, String parent, String guidPrefix) {
+    String prefix = guidPrefix != null ? guidPrefix : DEFAULT_PREFIX;
+    List<Document> alerts = new ArrayList<>();
+    for (int i = 0; i < num; i++) {
+      HashMap<String, Object> fields = new HashMap<>();
+      fields.put(Constants.GUID, prefix + i);
+      fields.put(THREAT_FIELD_DEFAULT, 0.0f);
+      if (parent != null) {
+        fields.put(METAALERT_FIELD, Collections.singletonList(parent));
+      } else {
+        fields.put(METAALERT_FIELD, new ArrayList<>());
+      }
+      alerts.add(new Document(fields, prefix + i, "test", 0L));
+    }
+    return alerts;
+  }
+
+  protected List<Map<String, Object>> getRawMaps(List<Document> documents) {
+    List<Map<String, Object>> rawMaps = new ArrayList<>();
+    for (Document document : documents) {
+      rawMaps.add(document.getDocument());
+    }
+    return rawMaps;
+  }
+
+  protected Document buildMetaAlert(List<Document> alerts) {
+    Map<String, Object> metaAlertMap = new HashMap<>();
+    metaAlertMap.put(ALERT_FIELD, getRawMaps(alerts));
+    metaAlertMap.put(Constants.GUID, METAALERT_GUID);
+    return new Document(
+        metaAlertMap,
+        METAALERT_GUID,
+        METAALERT_TYPE,
+        0L
+    );
+  }
+}
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/HBaseDaoIntegrationTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/HBaseDaoIntegrationTest.java
index f57a101..73a9077 100644
--- a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/HBaseDaoIntegrationTest.java
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/HBaseDaoIntegrationTest.java
@@ -20,6 +20,7 @@
 
 import static org.apache.metron.indexing.dao.HBaseDao.HBASE_CF;
 import static org.apache.metron.indexing.dao.HBaseDao.HBASE_TABLE;
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,12 +30,14 @@
 import java.util.Map;
 import java.util.Optional;
 import java.util.stream.Collectors;
-
-import org.apache.commons.codec.binary.Hex;
 import org.apache.metron.hbase.mock.MockHBaseTableProvider;
+import org.apache.metron.hbase.mock.MockHTable;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.HBaseDao;
 import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.UpdateIntegrationTest;
+import org.apache.metron.indexing.dao.search.AlertComment;
 import org.apache.metron.indexing.dao.search.GetRequest;
 import org.apache.metron.indexing.dao.update.Document;
 import org.junit.After;
@@ -42,7 +45,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class HBaseDaoIntegrationTest {
+public class HBaseDaoIntegrationTest extends UpdateIntegrationTest  {
 
   private static final String TABLE_NAME = "metron_update";
   private static final String COLUMN_FAMILY = "cf";
@@ -167,6 +170,13 @@
     Assert.assertFalse("Result size should be 12 but was greater", results.hasNext());
   }
 
+  @Override
+  public void test() {
+    // The main test ensures a variety of things not implemented by HBase run alongside
+    // HBaseDao itself.
+    // Therefore, just don't do anything for this test.
+  }
+
   protected List<Document> buildAlerts(int count) throws IOException {
     List<Document> alerts = new ArrayList<>();
     for (int i = 0; i < count; ++i) {
@@ -178,4 +188,67 @@
     return alerts;
   }
 
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRemoveComments() throws Exception {
+    Map<String, Object> fields = new HashMap<>();
+    fields.put("guid", "add_comment");
+    fields.put("source.type", SENSOR_NAME);
+
+    Document document = new Document(fields, "add_comment", SENSOR_NAME, 1526401584951L);
+    hbaseDao.update(document, Optional.of(SENSOR_NAME));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    addAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
+    // Ensure we have the first comment
+    ArrayList<AlertComment> comments = new ArrayList<>();
+    comments.add(new AlertComment("New Comment", "test_user", 1526401584951L));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    addAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
+    // Ensure we have the second comment
+    comments.add(new AlertComment("New Comment 2", "test_user_2", 1526401584952L));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    removeAlertComment("add_comment", "New Comment 2", "test_user_2", 1526401584952L);
+    // Ensure we only have the first comments
+    comments = new ArrayList<>();
+    comments.add(new AlertComment(commentOne));
+    document.getDocument().put(COMMENTS_FIELD, comments.stream().map(AlertComment::asMap).collect(
+        Collectors.toList()));
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+
+    removeAlertComment("add_comment", "New Comment", "test_user", 1526401584951L);
+    // Ensure we have no comments
+    document.getDocument().remove(COMMENTS_FIELD);
+    findUpdatedDoc(document.getDocument(), "add_comment", SENSOR_NAME);
+  }
+
+  @Override
+  protected IndexDao getDao() {
+    return hbaseDao;
+  }
+
+  @Override
+  protected String getIndexName() {
+    return null;
+  }
+
+  @Override
+  protected MockHTable getMockHTable() {
+    return null;
+  }
+
+  @Override
+  protected void addTestData(String indexName, String sensorType, List<Map<String, Object>> docs) {
+  }
+
+  @Override
+  protected List<Map<String, Object>> getIndexedTestData(String indexName, String sensorType) {
+    return null;
+  }
 }
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/IndexingIntegrationTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/IndexingIntegrationTest.java
index 2e703f6..ad39c85 100644
--- a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/IndexingIntegrationTest.java
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/integration/IndexingIntegrationTest.java
@@ -51,12 +51,10 @@
   protected final int NUM_RETRIES = 100;
   protected final long TOTAL_TIME_MS = 150000L;
 
-  protected void preTest() {}
-
+  protected void preTest() { }
 
   @Test
   public void test() throws Exception {
-    preTest();
     final List<byte[]> inputMessages = TestUtils.readSampleData(sampleParsedPath);
     final Properties topologyProperties = new Properties() {{
       setProperty("indexing_kafka_start", "UNCOMMITTED_EARLIEST");
diff --git a/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/util/IndexingCacheUtilTest.java b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/util/IndexingCacheUtilTest.java
new file mode 100644
index 0000000..3d6ee85
--- /dev/null
+++ b/metron-platform/metron-indexing/src/test/java/org/apache/metron/indexing/util/IndexingCacheUtilTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.indexing.util;
+
+import org.apache.metron.common.configuration.IndexingConfigurations;
+import org.apache.metron.common.zookeeper.ConfigurationsCache;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class IndexingCacheUtilTest {
+
+  @Test
+  public void getIndexLookupFunctionShouldReturnConfiguredIndex() {
+    IndexingConfigurations indexingConfigs = mock(IndexingConfigurations.class);
+    ConfigurationsCache cache = mock(ConfigurationsCache.class);
+
+    Map<String, Object> broIndexingConfig = new HashMap<String, Object>() {{
+      put("writer", new HashMap<String, Object>() {{
+        put("index", "bro_index");
+      }});
+    }};
+    when(indexingConfigs.getSensorIndexingConfig("bro")).thenReturn(broIndexingConfig);
+    when(cache.get(IndexingConfigurations.class)).thenReturn(indexingConfigs);
+
+    assertEquals("bro_index", IndexingCacheUtil.getIndexLookupFunction(cache, "writer").apply("bro"));
+  }
+
+  @Test
+  public void getIndexLookupFunctionShouldDefaultToSensorType() {
+    IndexingConfigurations indexingConfigs = mock(IndexingConfigurations.class);
+    ConfigurationsCache cache = mock(ConfigurationsCache.class);
+
+    Map<String, Object> broIndexingConfig = new HashMap<String, Object>() {{
+      put("writer", new HashMap<String, Object>() {{
+        put("index", "bro_index");
+      }});
+    }};
+    when(indexingConfigs.getSensorIndexingConfig("bro")).thenReturn(broIndexingConfig);
+    when(cache.get(IndexingConfigurations.class)).thenReturn(indexingConfigs);
+
+    assertEquals("Should default to sensor type on missing sensor config", "snort", IndexingCacheUtil.getIndexLookupFunction(cache, "writer").apply("snort"));
+    assertEquals("Should default to sensor type on missing writer config", "bro", IndexingCacheUtil.getIndexLookupFunction(cache, "someWriter").apply("bro"));
+  }
+}
diff --git a/metron-platform/metron-parsers/README.md b/metron-platform/metron-parsers/README.md
index 8254baf..d79b9ce 100644
--- a/metron-platform/metron-parsers/README.md
+++ b/metron-platform/metron-parsers/README.md
@@ -561,6 +561,8 @@
 
 Please see a description of the steps necessary to make this change in the metron-elasticsearch [Using Metron with Elasticsearch 2.x](../../metron-platform/metron-elasticsearch#using-metron-with-elasticsearch-2x)
 
+If Solr is selected as the real-time store, it is also necessary to add additional fields.  See the [Solr](../metron-indexing#solr) section in metron-indexing for more details.
+
 ## Kafka Queue
 The kafka queue associated with your parser is a collection point for
 all of the data sent to your parser.  As such, make sure that the number of partitions in
diff --git a/metron-platform/metron-pcap-backend/.gitignore b/metron-platform/metron-pcap-backend/.gitignore
new file mode 100644
index 0000000..df1a13b
--- /dev/null
+++ b/metron-platform/metron-pcap-backend/.gitignore
@@ -0,0 +1 @@
+/logs
\ No newline at end of file
diff --git a/metron-platform/metron-solr/README.md b/metron-platform/metron-solr/README.md
new file mode 100644
index 0000000..ca90c73
--- /dev/null
+++ b/metron-platform/metron-solr/README.md
@@ -0,0 +1,170 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+# Solr in Metron
+
+## Table of Contents
+
+* [Introduction](#introduction)
+* [Configuration](#configuration)
+* [Installing](#installing)
+* [Schemas](#schemas)
+* [Collections](#collections)
+
+## Introduction
+
+Metron ships with Solr 6.6.2 support. Solr Cloud can be used as the real-time portion of the datastore resulting from [metron-indexing](../metron-indexing/README.md).
+
+## Configuration
+
+### The Indexing Topology
+
+Solr is a viable option for the `random access topology` and, similar to the Elasticsearch Writer, can be configured
+via the global config.  The following settings are possible as part of the global config:
+* `solr.zookeeper`
+  * The zookeeper quorum associated with the SolrCloud instance.  This is a required field with no default.
+* `solr.commitPerBatch`
+  * This is a boolean which defines whether the writer commits every batch.  The default is `true`.
+  * _WARNING_: If you set this to `false`, then commits will happen based on the SolrClient's internal mechanism and
+    worker failure *may* result data being acknowledged in storm but not written in Solr.
+* `solr.commit.soft`
+  * This is a boolean which defines whether the writer makes a soft commit or a durable commit.  See [here](https://lucene.apache.org/solr/guide/6_6/near-real-time-searching.html#NearRealTimeSearching-AutoCommits)  The default is `false`.
+  * _WARNING_: If you set this to `true`, then commits will happen based on the SolrClient's internal mechanism and
+    worker failure *may* result data being acknowledged in storm but not written in Solr.
+* `solr.commit.waitSearcher`
+  * This is a boolean which defines whether the writer blocks the commit until the data is available to search.  See [here](https://lucene.apache.org/solr/guide/6_6/near-real-time-searching.html#NearRealTimeSearching-AutoCommits)  The default is `true`.
+  * _WARNING_: If you set this to `false`, then commits will happen based on the SolrClient's internal mechanism and
+    worker failure *may* result data being acknowledged in storm but not written in Solr.
+* `solr.commit.waitFlush`
+  * This is a boolean which defines whether the writer blocks the commit until the data is flushed.  See [here](https://lucene.apache.org/solr/guide/6_6/near-real-time-searching.html#NearRealTimeSearching-AutoCommits)  The default is `true`.
+  * _WARNING_: If you set this to `false`, then commits will happen based on the SolrClient's internal mechanism and
+    worker failure *may* result data being acknowledged in storm but not written in Solr.
+* `solr.collection`
+  * The default solr collection (if unspecified, the name is `metron`).  By default, sensors will write to a collection associated with the index name in the
+  indexing config for that sensor.  If that index name is the empty string, then the default collection will be used.
+* `solr.http.config`
+  * This is a map which allows users to configure the Solr client's HTTP client.
+  * Possible fields here are:
+    * `socketTimeout` : Socket timeout measured in ms, closes a socket if read takes longer than x ms to complete
+    throws `java.net.SocketTimeoutException: Read timed out exception`
+    * `connTimeout` : Connection timeout measures in ms, closes a socket if connection cannot be established within x ms
+    with a `java.net.SocketTimeoutException: Connection timed out`
+    * `maxConectionsPerHost` : Maximum connections allowed per host
+    * `maxConnections` :  Maximum total connections allowed
+    * `retry` : Retry http requests on error
+    * `allowCompression` :  Allow compression (deflate,gzip) if server supports it
+    * `followRedirects` : Follow redirects
+    * `httpBasicAuthUser` : Basic auth username
+    * `httpBasicAuthPassword` : Basic auth password
+    * `solr.ssl.checkPeerName` : Check peer name
+
+
+## Installing
+
+Solr is installed in the [full dev environment for CentOS](../../metron-deployment/development/centos6) by default but is not started initially.  Navigate to `$METRON_HOME/bin` 
+and start Solr Cloud by running `start_solr.sh`.  
+
+Metron's Ambari MPack installs several scripts in `$METRON_HOME/bin` that can be used to manage Solr.  A script is also provided for installing Solr Cloud outside of full dev.
+The script performs the following tasks
+
+* Stops ES and Kibana
+* Downloads Solr
+* Installs Solr
+* Starts Solr Cloud
+
+_Note: for details on setting up Solr Cloud in production mode, see https://lucene.apache.org/solr/guide/6_6/taking-solr-to-production.html_
+
+Navigate to `$METRON_HOME/bin` and spin up Solr Cloud by running `install_solr.sh`.  After running this script, 
+Elasticsearch and Kibana will have been stopped and you should now have an instance of Solr Cloud up and running at http://localhost:8983/solr/#/~cloud.  This manner of starting Solr
+will also spin up an embedded Zookeeper instance at port 9983. More information can be found [here](https://lucene.apache.org/solr/guide/6_6/getting-started-with-solrcloud.html)
+
+Solr can also be installed using [HDP Search 3](https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.4/bk_solr-search-installation/content/ch_hdp_search_30.html).  HDP Search 3 sets the Zookeeper root to 
+`/solr` so this will need to be added to each url in the comma-separated list in Ambari UI -> Services -> Metron -> Configs -> Index Settings -> Solr Zookeeper Urls.  For example, in full dev
+this would be `node1:2181/solr`.
+
+## Enabling Solr
+
+Elasticsearch is the real-time store used by default in Metron.  Solr can be enabled following these steps:
+
+1. Stop the Metron Indexing component in Ambari.
+1. Update Ambari UI -> Services -> Metron -> Configs -> Index Settings -> Solr Zookeeper Urls to match the Solr installation described in the previous section.
+1. Change Ambari UI -> Services -> Metron -> Configs -> Indexing -> Index Writer - Random Access -> Random Access Search Engine to `Solr`.
+1. Set the `source.type.field` property to `source.type` in the [Global Configuration](../metron-common#global-configuration).
+1. Set the `threat.triage.score.field` property to `threat.triage.score` in the [Global Configuration](../metron-common#global-configuration).
+1. Start the Metron Indexing component in Ambari.
+1. Restart Metron REST and the Alerts UI in Ambari.
+
+This will automatically create collections for the schemas shipped with Metron:
+
+* bro 
+* snort
+* yaf
+* error (used internally by Metron)
+* metaalert (used internall by Metron)
+
+Any other collections must be created manually before starting the Indexing component.  Alerts should be present in the Alerts UI after enabling Solr.
+
+## Schemas
+
+As of now, we have mapped out the Schemas in `src/main/config/schema`.
+Ambari will eventually install these, but at the moment it's manual and
+you should refer to the Solr documentation [https://lucene.apache.org/solr/guide/6_6](here) in general
+and [here](https://lucene.apache.org/solr/guide/6_6/documents-fields-and-schema-design.html) if you'd like to know more about schemas in Solr.
+
+In Metron's Solr DAO implementation, document updates involve reading a document, applying the update and replacing the original by reindexing the whole document.  
+Indexing LatLonType and PointType field types stores data in internal fields that should not be returned in search results.  For these fields a dynamic field type matching the suffix needs to be added to store the data points.
+Solr 6+ comes with a new LatLonPointSpatialField field type that should be used instead of LatLonType if possible.  Otherwise, a LatLongType field should be defined as:
+```
+<dynamicField name="*.location_point" type="location" multiValued="false" docValues="false"/>
+<dynamicField name="*_coordinate" type="pdouble" indexed="true" stored="false" docValues="false"/>
+<fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+```
+A PointType field should be defined as:
+```
+<dynamicField name="*.point" type="point" multiValued="false" docValues="false"/>
+<dynamicField name="*_point" type="pdouble" indexed="true" stored="false" docValues="false"/>
+<fieldType name="point" class="solr.PointType" subFieldSuffix="_point"/>
+```
+If any copy fields are defined, stored and docValues should be set to false.
+
+## Collections
+
+Convenience scripts are provided with Metron to create and delete collections.  Ambari uses these scripts to automatically create collections.  To use them outside of Ambari, a few environment variables must be set first:
+```
+# Path to the zookeeper node used by Solr
+export ZOOKEEPER=node1:2181/solr
+# Set to true if Kerberos is enabled
+export SECURITY_ENABLED=true 
+```
+The scripts can then be called directly with the collection name as the first argument .  For example, to create the bro collection:
+```
+$METRON_HOME/bin/create_collection.sh bro
+```
+To delete the bro collection:
+```
+$METRON_HOME/bin/delete_collection.sh bro
+```
+The `create_collection.sh` script depends on schemas installed in `$METRON_HOME/config/schema`.  There are several schemas that come with Metron:
+
+* bro
+* snort
+* yaf
+* metaalert
+* error
+
+Additional schemas should be installed in that location if using the `create_collection.sh` script.  Any collection can be deleted with the `delete_collection.sh` script.
+These scripts use the [Solr Collection API](http://lucene.apache.org/solr/guide/6_6/collections-api.html).
\ No newline at end of file
diff --git a/metron-platform/metron-solr/pom.xml b/metron-platform/metron-solr/pom.xml
index adfc7b6..2e2dfe2 100644
--- a/metron-platform/metron-solr/pom.xml
+++ b/metron-platform/metron-solr/pom.xml
@@ -31,7 +31,7 @@
         <dependency>
             <groupId>com.google.guava</groupId>
             <artifactId>guava</artifactId>
-            <version>${global_hbase_guava_version}</version>
+            <version>${global_guava_version}</version>
         </dependency>
         <dependency>
             <groupId>org.apache.solr</groupId>
@@ -63,6 +63,14 @@
                     <artifactId>fastutil</artifactId>
                     <groupId>it.unimi.dsi</groupId>
                 </exclusion>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-core</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-databind</artifactId>
+                </exclusion>
             </exclusions>
         </dependency>
         <dependency>
@@ -192,11 +200,15 @@
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-common</artifactId>
                 </exclusion>
+                <exclusion>
+                  <groupId>org.hamcrest</groupId>
+                  <artifactId>hamcrest-core</artifactId>
+                </exclusion>
             </exclusions>
         </dependency>
         <dependency>
             <groupId>org.mockito</groupId>
-            <artifactId>mockito-all</artifactId>
+            <artifactId>mockito-core</artifactId>
             <version>${global_mockito_version}</version>
             <scope>test</scope>
         </dependency>
@@ -231,6 +243,29 @@
                 </exclusion>
             </exclusions>
         </dependency>
+      <dependency>
+        <groupId>org.apache.metron</groupId>
+        <artifactId>metron-hbase</artifactId>
+        <version>${project.parent.version}</version>
+        <scope>test</scope>
+        <type>test-jar</type>
+        <exclusions>
+          <exclusion>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
+        <groupId>org.hamcrest</groupId>
+        <artifactId>hamcrest-core</artifactId>
+        <version>1.3</version>
+        <scope>test</scope>
+      </dependency>
 
     </dependencies>
 
@@ -265,7 +300,7 @@
                             <relocations>
                                 <relocation>
                                     <pattern>com.google.common</pattern>
-                                    <shadedPattern>org.apache.metron.guava</shadedPattern>
+                                    <shadedPattern>org.apache.metron.guava.metron-solr</shadedPattern>
                                 </relocation>
                                 <relocation>
                                     <pattern>com.fasterxml.jackson</pattern>
diff --git a/metron-platform/metron-solr/src/main/config/schema/bro/schema.xml b/metron-platform/metron-solr/src/main/config/schema/bro/schema.xml
new file mode 100644
index 0000000..ea9f6d3
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/bro/schema.xml
@@ -0,0 +1,700 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="bro_doc" version="1.6">
+  <!--
+    WARNING
+
+    Because Metron inserts many distinct bro records into a single ElasticSearch index
+    by default, it encounters field collisions due to field name reuse across bro logs.
+
+    Be careful when modifying this file to not unintentionally affect other logs.
+    For instance, the "version" field exists in the HTTP, SSL, and SSH logs.  If you
+    were to only consider the SSH log, you would set the type to integer, but because
+    in the SSL and HTTP logs version is a string, we must set the type to keyword.
+   -->
+  <field name="_version_" type="plong" indexed="true" stored="true"/>
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
+
+  <!--
+         * Metron-specific fields
+  -->
+  <field name="source.type" type="string" indexed="true" stored="true" />
+  <field name="timestamp" type="timestamp" indexed="true" stored="true" />
+  <field name="guid" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+  <uniqueKey>guid</uniqueKey>
+
+  <!--
+         * Widely-used Bro fields (potentially renamed during Metron ingest)
+  -->
+  <field name="uid" type="string" indexed="true" stored="true" />
+  <field name="ip_src_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_src_port" type="pint" indexed="true" stored="true" />
+  <field name="ip_dst_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_dst_port" type="pint" indexed="true" stored="true" />
+
+  <!--
+         * HTTP log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/http/main.bro.html#type-HTTP::Info
+         *
+         * Notable Fields
+         *   Field:     method
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     uri
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     password
+         *   Notes:     Field exists in the HTTP and FTP logs
+         *
+         *   Field:     capture_password
+         *   Notes:     Field exists in the HTTP and FTP logs
+         *
+         *   Field:     trans_depth
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+         *
+         *   Field:     user_agent
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+         *
+         *   Field:     version
+         *   Notes:     Field exists in the HTTP, SSL, and SSH logs
+         *
+         *   Field:     host
+         *   Notes:     Field exists in the HTTP, KnownCerts, and Software logs
+         *
+         *   Field:     username
+         *   Notes:     Field exists in the HTTP and RADIUS logs
+         *
+         *   Field:     status_code
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     status_msg
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     request_body_len
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     response_body_len
+         *   Notes:     Field exists in the HTTP and SIP logs
+  -->
+  <field name="trans_depth" type="pint" indexed="true" stored="true" />
+  <field name="method" type="string" indexed="true" stored="true" />
+  <field name="host" type="string" indexed="true" stored="true" />
+  <field name="uri" type="string" indexed="true" stored="true" />
+  <field name="referrer" type="string" indexed="true" stored="true" />
+  <field name="version" type="string" indexed="true" stored="true" />
+  <field name="user_agent" type="string" indexed="true" stored="true" />
+  <field name="request_body_len" type="plong" indexed="true" stored="true" />
+  <field name="response_body_len" type="plong" indexed="true" stored="true" />
+  <field name="status_code" type="pint" indexed="true" stored="true" />
+  <field name="status_msg" type="string" indexed="true" stored="true" />
+  <field name="info_code" type="pint" indexed="true" stored="true" />
+  <field name="info_msg" type="string" indexed="true" stored="true" />
+  <field name="tags" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="username" type="string" indexed="true" stored="true" />
+  <field name="password" type="string" indexed="true" stored="true" />
+  <field name="proxied" type="string" indexed="true" stored="true" />
+  <field name="orig_fuids" type="string" indexed="true" stored="true" multiValued="true"/>
+  <field name="orig_filenames" type="string" indexed="true" stored="true" />
+  <field name="orig_mime_types" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="resp_fuids" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="resp_filenames" type="string" indexed="true" stored="true" />
+  <field name="resp_mime_types" type="string" indexed="true" stored="true" multiValued="true" />
+
+  <!--
+         * DNS log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/dns/main.bro.html#type-DNS::Info
+         *
+         * Notable Fields
+         *   Field:     proto
+         *   Notes:     Field exists in the DNS, Conn, DPD, and Notice logs
+         *
+         *   Field:     trans_id
+         *   Notes:     Field exists in the DNS and DHCP logs
+         *
+         *   Field:     rtt
+         *   Notes:     This field uses the "interval" type, which may need handled differently.
+         *              https://www.bro.org/sphinx-git/script-reference/types.html#type-interval
+  -->
+  <field name="proto" type="string" indexed="true" stored="true" />
+  <field name="trans_id" type="plong" indexed="true" stored="true" />
+  <field name="rtt" type="string" indexed="true" stored="true" />
+
+  <field name="query" type="string" indexed="true" stored="true" />
+  <field name="qclass" type="pint" indexed="true" stored="true" />
+  <field name="qclass_name" type="string" indexed="true" stored="true" />
+  <field name="qtype" type="pint" indexed="true" stored="true" />
+  <field name="qtype_name" type="string" indexed="true" stored="true" />
+  <field name="rcode" type="pint" indexed="true" stored="true" />
+  <field name="rcode_name" type="string" indexed="true" stored="true" />
+  <field name="AA" type="boolean" indexed="true" stored="true" />
+  <field name="TC" type="boolean" indexed="true" stored="true" />
+  <field name="RD" type="boolean" indexed="true" stored="true" />
+  <field name="RA" type="boolean" indexed="true" stored="true" />
+  <field name="Z" type="pint" indexed="true" stored="true" />
+  <field name="answers" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="TTLs" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="rejected" type="boolean" indexed="true" stored="true" />
+  <!--
+         * Conn log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/conn/main.bro.html#type-Conn::Info
+         *
+         * Notable Fields
+         *   Field:     proto
+         *   Notes:     Field exists in the DNS, Conn, DPD, and Notice logs
+         *
+         *   Field:     duration
+         *   Notes:     Field exists in the Conn and Files logs
+         *
+         *   Field:     local_orig
+         *   Notes:     Field exists in the Conn and Files logs
+  -->
+  <field name="service" type="string" indexed="true" stored="true" />
+  <field name="duration" type="pfloat" indexed="true" stored="true" />
+  <field name="orig_bytes" type="plong" indexed="true" stored="true" />
+  <field name="resp_bytes" type="plong" indexed="true" stored="true" />
+  <field name="conn_state" type="string" indexed="true" stored="true" />
+  <field name="local_orig" type="boolean" indexed="true" stored="true" />
+
+  <field name="local_resp" type="string" indexed="true" stored="true" />
+  <field name="missed_bytes" type="plong" indexed="true" stored="true" />
+  <field name="history" type="string" indexed="true" stored="true" />
+  <field name="orig_pkts" type="plong" indexed="true" stored="true" />
+  <field name="orig_ip_bytes" type="plong" indexed="true" stored="true" />
+  <field name="resp_pkts" type="plong" indexed="true" stored="true" />
+  <field name="resp_ip_bytes" type="plong" indexed="true" stored="true" />
+  <field name="tunnel_parents" type="string" indexed="true" stored="true" multiValued="true" />
+  <!--
+         * DPD log support
+         * https://www.bro.org/sphinx-git/scripts/base/frameworks/dpd/main.bro.html#type-DPD::Info
+         *
+         * Notable Fields
+         *   Field:     proto
+         *   Notes:     Field exists in the DNS, Conn, DPD, and Notice logs
+  -->
+  <field name="analyzer" type="string" indexed="true" stored="true" />
+  <field name="failure_reason" type="string" indexed="true" stored="true" />
+  <!--
+         * FTP log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/ftp/info.bro.html#type-FTP::Info
+         *
+         * Notable Fields
+         *   Field:     password
+         *   Notes:     Field exists in the HTTP and FTP logs
+         *
+         *   Field:     capture_password
+         *   Notes:     Field exists in the HTTP and FTP logs
+         *
+         *   Field:     mime_type
+         *   Notes:     Field exists in the FTP and Files logs
+         *
+         *   Field:     fuid
+         *   Notes:     Field exists in the FTP, Files, and Notice logs
+   -->
+  <field name="user" type="string" indexed="true" stored="true" />
+  <field name="command" type="string" indexed="true" stored="true" />
+  <field name="arg" type="string" indexed="true" stored="true" />
+  <field name="mime_type" type="string" indexed="true" stored="true" />
+  <field name="file_size" type="plong" indexed="true" stored="true" />
+  <field name="reply_code" type="pint" indexed="true" stored="true" />
+  <field name="reply_msg" type="string" indexed="true" stored="true" />
+  <field name="data_channel.passive" type="boolean" indexed="true" stored="true" />
+  <field name="data_channel.orig_h" type="ip" indexed="true" stored="true" />
+  <field name="data_channel.resp_h" type="ip" indexed="true" stored="true" />
+  <field name="data_channel.resp_p" type="pint" indexed="true" stored="true" />
+  <field name="cwd" type="string" indexed="true" stored="true" />
+  <field name="passive" type="boolean" indexed="true" stored="true" />
+  <field name="fuid" type="string" indexed="true" stored="true" />
+  <!--
+         * Files log support
+         * https://www.bro.org/sphinx/scripts/base/frameworks/files/main.bro.html#type-Files::Info
+         *
+         * Notable Fields
+         *   Field:     tx_hosts
+         *   Notes:     Metron rewrites this to "ip_src_addr"
+         *
+         *   Field:     rx_hosts
+         *   Notes:     Metron rewrites this to "ip_dst_addr"
+         *
+         *   Field:     mime_type
+         *   Notes:     Field exists in the FTP and Files logs
+         *
+         *   Field:     duration
+         *   Notes:     Field exists in the Conn and Files logs
+         *
+         *   Field:     local_orig
+         *   Notes:     Field exists in the Conn and Files logs
+         *
+         *   Field:     fuid
+         *   Notes:     Field exists in the FTP, Files, and Notice logs
+  -->
+  <field name="conn_uids" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="source" type="string" indexed="true" stored="true" />
+  <field name="depth" type="pint" indexed="true" stored="true" />
+  <field name="analyzers" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="filename" type="string" indexed="true" stored="true" />
+  <field name="is_orig" type="boolean" indexed="true" stored="true" />
+  <field name="seen_bytes" type="plong" indexed="true" stored="true" />
+  <field name="total_bytes" type="plong" indexed="true" stored="true" />
+  <field name="missing_bytes" type="plong" indexed="true" stored="true" />
+  <field name="overflow_bytes" type="plong" indexed="true" stored="true" />
+  <field name="timedout" type="boolean" indexed="true" stored="true" />
+  <field name="parent_fuid" type="string" indexed="true" stored="true" />
+  <field name="md5" type="string" indexed="true" stored="true" />
+  <field name="sha1" type="string" indexed="true" stored="true" />
+  <field name="sha256" type="string" indexed="true" stored="true" />
+  <field name="extracted" type="string" indexed="true" stored="true" />
+  <field name="extracted_cutoff" type="boolean" indexed="true" stored="true" />
+  <field name="extracted_size" type="plong" indexed="true" stored="true" />
+
+  <!--
+         * Known::CertInfo log support
+         * https://www.bro.org/sphinx/scripts/policy/protocols/ssl/known-certs.bro.html#type-Known::CertsInfo
+         *
+         * Notable Fields
+         *   Field:     host
+         *   Notes:     Field exists in the HTTP, KnownCerts, and Software logs
+         *
+         *   Field:     subject
+         *   Notes:     Field exists in the KnownCerts, SMTP, SIP, and SSL logs
+  -->
+  <field name="port_num" type="pint" indexed="true" stored="true" />
+  <field name="subject" type="string" indexed="true" stored="true" />
+  <field name="issuer_subject" type="string" indexed="true" stored="true" />
+  <field name="serial" type="string" indexed="true" stored="true" />
+
+  <!--
+         * SMTP log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/smtp/main.bro.html#type-SMTP::Info
+         *
+         * Notable Fields
+         *   Field:     trans_depth
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+         *
+         *   Field:     date
+         *   Notes:     Field exists in the SMTP and SIP logs
+         *
+         *   Field:     subject
+         *   Notes:     Field exists in the KnownCerts, SMTP, SIP, and SSL logs
+         *
+         *   Field:     reply_to
+         *   Notes:     Field exists in the SMTP and SIP logs
+         *
+         *   Field:     user_agent
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+  -->
+  <field name="helo" type="string" indexed="true" stored="true" />
+  <field name="mailfrom" type="string" indexed="true" stored="true" />
+  <field name="rcptto" type="string" indexed="true" stored="true" />
+  <field name="date" type="string" indexed="true" stored="true" />
+  <field name="from" type="string" indexed="true" stored="true" />
+  <field name="to" type="string" indexed="true" stored="true" />
+  <field name="cc" type="string" indexed="true" stored="true" />
+  <field name="reply_to" type="string" indexed="true" stored="true" />
+  <field name="msg_id" type="string" indexed="true" stored="true" />
+  <field name="in_reply_to" type="string" indexed="true" stored="true" />
+  <field name="x_originating_ip" type="ip" indexed="true" stored="true" />
+  <field name="first_received" type="string" indexed="true" stored="true" />
+  <field name="second_received" type="string" indexed="true" stored="true" />
+  <field name="last_reply" type="string" indexed="true" stored="true" />
+  <field name="path" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="tls" type="boolean" indexed="true" stored="true" />
+  <field name="fuids" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="is_webmail" type="boolean" indexed="true" stored="true" />
+
+  <!--
+         * SSL log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/ssl/main.bro.html#type-SSL::Info
+         *
+         * Notable Fields
+         *   Field:     version
+         *   Notes:     Field exists in the HTTP, SSL, and SSH logs
+         *
+         *   Field:     subject
+         *   Notes:     Field exists in the KnownCerts, SMTP, SIP, and SSL logs
+  -->
+  <field name="cipher" type="string" indexed="true" stored="true" />
+  <field name="curve" type="string" indexed="true" stored="true" />
+  <field name="server_name" type="string" indexed="true" stored="true" />
+  <field name="resumed" type="boolean" indexed="true" stored="true" />
+  <field name="server_appdata" type="string" indexed="true" stored="true" />
+  <field name="client_appdata" type="boolean" indexed="true" stored="true" />
+  <field name="last_alert" type="string" indexed="true" stored="true" />
+  <field name="next_protocol" type="string" indexed="true" stored="true" />
+  <field name="established" type="boolean" indexed="true" stored="true" />
+  <field name="cert_chain_fuids" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="client_cert_chain_fuids" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="issuer" type="string" indexed="true" stored="true" />
+  <field name="client_subject" type="string" indexed="true" stored="true" />
+  <field name="client_issuer" type="string" indexed="true" stored="true" />
+  <field name="validation_status" type="string" indexed="true" stored="true" />
+  <!--
+         * Weird log support
+         * https://www.bro.org/sphinx/scripts/base/frameworks/notice/weird.bro.html#type-Weird::Info
+         *
+         * Notable Fields
+         *   Field:     peer
+         *   Notes:     Field exists in the Weird, CaptureLoss, and Stats logs
+         *
+         *   Field:     name
+         *   Notes:     Field exists in the Weird and LoadedScripts logs
+  -->
+  <field name="name" type="string" indexed="true" stored="true" />
+  <field name="addl" type="string" indexed="true" stored="true" />
+  <field name="notice" type="boolean" indexed="true" stored="true" />
+  <field name="peer" type="string" indexed="true" stored="true" />
+
+  <!--
+         * Notice log support
+         * https://www.bro.org/sphinx/scripts/base/frameworks/notice/main.bro.html#type-Notice::Info
+         *
+         * Notable Fields
+         *   Field:     fuid
+         *   Notes:     Field exists in the FTP, Files, and Notice logs
+         *
+         *   Field:     proto
+         *   Notes:     Field exists in the DNS, Conn, DPD, and Notice logs
+         *
+         *   Field:     remote_location:country_code
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:region
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:city
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:latitude
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:longitude
+         *   Notes:     Field exists in the Notice and SSH logs
+  -->
+  <field name="file_mime_type" type="string" indexed="true" stored="true" />
+  <field name="file_desc" type="string" indexed="true" stored="true" />
+  <field name="note" type="string" indexed="true" stored="true" />
+  <field name="msg" type="string" indexed="true" stored="true" />
+  <field name="sub" type="string" indexed="true" stored="true" />
+  <field name="src" type="ip" indexed="true" stored="true" />
+  <field name="dst" type="ip" indexed="true" stored="true" />
+  <field name="p" type="pint" indexed="true" stored="true" />
+  <field name="n" type="pint" indexed="true" stored="true" />
+  <field name="src_peer" type="ip" indexed="true" stored="true" />
+  <field name="peer_descr" type="string" indexed="true" stored="true" />
+  <field name="actions" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="suppress_for" type="pdouble" indexed="true" stored="true" />
+  <field name="dropped" type="boolean" indexed="true" stored="true" />
+  <field name="remote_location.country_code" type="string" indexed="true" stored="true" />
+  <field name="remote_location.region" type="string" indexed="true" stored="true" />
+  <field name="remote_location.city" type="string" indexed="true" stored="true" />
+  <field name="remote_location.latitude" type="pdouble" indexed="true" stored="true" />
+  <field name="remote_location.longitude" type="pdouble" indexed="true" stored="true" />
+
+  <!--
+         * DHCP log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/dhcp/main.bro.html#type-DHCP::Info
+         *
+         * Notable Fields
+         *   Field:     mac
+         *   Notes:     Field exists in the DHCP, RADIUS, and KnownDevices logs
+         *
+         *   Field:     trans_id
+         *   Notes:     Field exists in the DNS and DHCP logs
+  -->
+  <field name="mac" type="string" indexed="true" stored="true" />
+  <field name="assigned_ip" type="ip" indexed="true" stored="true" />
+  <field name="lease_time" type="pfloat" indexed="true" stored="true" />
+  <!--
+         * SSH log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/ssh/main.bro.html#type-SSH::Info
+         *
+         * Notable Fields
+         *   Field:     version
+         *   Notes:     Field exists in the HTTP, SSL, and SSH logs
+         *
+         *   Field:     remote_location:country_code
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:region
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:city
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:latitude
+         *   Notes:     Field exists in the Notice and SSH logs
+         *
+         *   Field:     remote_location:longitude
+         *   Notes:     Field exists in the Notice and SSH logs
+  -->
+  <field name="auth_success" type="boolean" indexed="true" stored="true" />
+  <field name="auth_attempts" type="pint" indexed="true" stored="true" />
+  <field name="direction" type="string" indexed="true" stored="true" />
+  <field name="client" type="string" indexed="true" stored="true" />
+  <field name="server" type="string" indexed="true" stored="true" />
+  <field name="cipher_alg" type="string" indexed="true" stored="true" />
+  <field name="mac_alg" type="string" indexed="true" stored="true" />
+  <field name="compression_alg" type="string" indexed="true" stored="true" />
+  <field name="kex_alg" type="string" indexed="true" stored="true" />
+  <field name="host_key_alg" type="string" indexed="true" stored="true" />
+  <field name="host_key" type="string" indexed="true" stored="true" />
+
+  <!--
+         * Software log support
+         * https://www.bro.org/sphinx/scripts/base/frameworks/software/main.bro.html#type-Software::Info
+         *
+         * Notable Fields
+         *   Field:     host
+         *   Notes:     Field exists in the HTTP, KnownCerts, and Software logs
+  -->
+  <field name="host_p" type="pint" indexed="true" stored="true" />
+  <field name="software_type" type="string" indexed="true" stored="true" />
+  <field name="version.major" type="string" indexed="true" stored="true" />
+  <field name="version.minor" type="string" indexed="true" stored="true" />
+  <field name="version.minor2" type="string" indexed="true" stored="true" />
+  <field name="version.minor3" type="string" indexed="true" stored="true" />
+  <field name="version.addl" type="string" indexed="true" stored="true" />
+  <field name="unparsed_version" type="string" indexed="true" stored="true" />
+
+  <!--
+         * RADIUS log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/radius/main.bro.html#type-RADIUS::Info
+         *
+         * Notable Fields
+         *   Field:     username
+         *   Notes:     Field exists in the HTTP and RADIUS logs
+         *
+         *   Field:     mac
+         *   Notes:     Field exists in the DHCP, RADIUS, and KnownDevices logs
+         *
+         *   Field:     ttl
+         *   Notes:     This field uses the "interval" type, which may need handled differently.
+         *              https://www.bro.org/sphinx-git/script-reference/types.html#type-interval
+  -->
+  <field name="framed_addr" type="ip" indexed="true" stored="true" />
+  <field name="remote_ip" type="ip" indexed="true" stored="true" />
+  <field name="connect_info" type="string" indexed="true" stored="true" />
+  <field name="result" type="string" indexed="true" stored="true" />
+  <field name="ttl" type="string" indexed="true" stored="true" />
+
+  <!--
+         * X509 log support
+         * https://www.bro.org/sphinx/scripts/base/files/x509/main.bro.html#type-X509::Info
+         *
+         * Notable Fields
+         *   Field:     id
+         *   Notes:     In other bro records, the id field is of type conn_id, so it is
+         *              expanded before being logged into 4 fields, all of which are addressed
+         *              under the "Widely-used Bro fields" section of this template.  In X509
+         *              logs, however, id is a keyword to identify the certificate file id.
+  -->
+  <field name="id" type="string" indexed="true" stored="true" />
+  <field name="certificate.version" type="pint" indexed="true" stored="true" />
+  <field name="certificate.serial" type="string" indexed="true" stored="true" />
+  <field name="certificate.subject" type="string" indexed="true" stored="true" />
+  <field name="certificate.issuer" type="string" indexed="true" stored="true" />
+  <field name="certificate.not_valid_before" type="string" indexed="true" stored="true" />
+  <field name="certificate.not_valid_after" type="string" indexed="true" stored="true" />
+  <field name="certificate.key_alg" type="string" indexed="true" stored="true" />
+  <field name="certificate.sig_alg" type="string" indexed="true" stored="true" />
+  <field name="certificate.key_type" type="string" indexed="true" stored="true" />
+  <field name="certificate.key_length" type="pint" indexed="true" stored="true" />
+  <field name="certificate.exponent" type="string" indexed="true" stored="true" />
+  <field name="certificate.curve" type="string" indexed="true" stored="true" />
+  <field name="san.dns" type="string" indexed="true" stored="true" />
+  <field name="san.uri" type="string" indexed="true" stored="true" />
+  <field name="san.email" type="string" indexed="true" stored="true" />
+  <field name="san.ip" type="string" indexed="true" stored="true" />
+  <field name="basic_constraints.ca" type="boolean" indexed="true" stored="true" />
+  <field name="basic_constraints.path_len" type="pint" indexed="true" stored="true" />
+
+  <!--
+         * Known::DevicesInfo log support
+         * https://www.bro.org/sphinx/scripts/policy/misc/known-devices.bro.html#type-Known::DevicesInfo
+         *
+         * Notable Fields
+         *   Field:     mac
+         *   Notes:     Field exists in the DHCP, RADIUS, and KnownDevices logs
+  -->
+  <field name="dhcp_host_name" type="string" indexed="true" stored="true" />
+  <!--
+         * RFB::Info log support
+         * https://www.bro.org/sphinx-git/scripts/base/protocols/rfb/main.bro.html#type-RFB::Info
+  -->
+  <field name="client_major_version" type="string" indexed="true" stored="true" />
+  <field name="client_minor_version" type="string" indexed="true" stored="true" />
+  <field name="server_major_version" type="string" indexed="true" stored="true" />
+  <field name="server_minor_version" type="string" indexed="true" stored="true" />
+  <field name="authentication_method" type="string" indexed="true" stored="true" />
+  <field name="auth" type="boolean" indexed="true" stored="true" />
+  <field name="share_flag" type="boolean" indexed="true" stored="true" />
+  <field name="desktop_name" type="string" indexed="true" stored="true" />
+  <field name="width" type="pint" indexed="true" stored="true" />
+  <field name="height" type="pint" indexed="true" stored="true" />
+
+  <!--
+         * Stats::Info log support
+         * https://www.bro.org/sphinx/scripts/policy/misc/stats.bro.html#type-Stats::Info
+         *
+         * Notable Fields
+         *   Field:     peer
+         *   Notes:     Field exists in the Weird, CaptureLoss, and Stats logs
+         *
+         *   Field:     pkt_lag
+         *   Notes:     This field uses the "interval" type, which may need handled differently.
+         *              https://www.bro.org/sphinx-git/script-reference/types.html#type-interval
+  -->
+  <field name="mem" type="pint" indexed="true" stored="true" />
+  <field name="pkts_proc" type="pint" indexed="true" stored="true" />
+  <field name="bytes_recv" type="pint" indexed="true" stored="true" />
+  <field name="pkts_dropped" type="pint" indexed="true" stored="true" />
+  <field name="pkts_link" type="pint" indexed="true" stored="true" />
+  <field name="pkt_lag" type="string" indexed="true" stored="true" />
+  <field name="events_proc" type="pint" indexed="true" stored="true" />
+  <field name="events_queued" type="pint" indexed="true" stored="true" />
+  <field name="active_tcp_conns" type="pint" indexed="true" stored="true" />
+  <field name="active_udp_conns" type="pint" indexed="true" stored="true" />
+  <field name="active_icmp_conns" type="pint" indexed="true" stored="true" />
+  <field name="tcp_conns" type="pint" indexed="true" stored="true" />
+  <field name="udp_conns" type="pint" indexed="true" stored="true" />
+  <field name="icmp_conns" type="pint" indexed="true" stored="true" />
+  <field name="timers" type="pint" indexed="true" stored="true" />
+  <field name="active_timers" type="pint" indexed="true" stored="true" />
+  <field name="files" type="pint" indexed="true" stored="true" />
+  <field name="active_files" type="pint" indexed="true" stored="true" />
+  <field name="dns_requests" type="pint" indexed="true" stored="true" />
+  <field name="active_dns_requests" type="pint" indexed="true" stored="true" />
+  <field name="reassem_tcp_size" type="pint" indexed="true" stored="true" />
+  <field name="reassem_file_size" type="pint" indexed="true" stored="true" />
+  <field name="reassem_frag_size" type="pint" indexed="true" stored="true" />
+  <field name="reassem_unknown_size" type="pint" indexed="true" stored="true" />
+
+  <!--
+         * CaptureLoss::Info log support
+         * https://www.bro.org/sphinx/scripts/policy/misc/capture-loss.bro.html#type-CaptureLoss::Info
+         *
+         * Notable Fields
+         *   Field:     ts_delta
+         *   Notes:     This field uses the "interval" type, which may need handled differently.
+         *              https://www.bro.org/sphinx-git/script-reference/types.html#type-interval
+         *
+         *   Field:     peer
+         *   Notes:     Field exists in the Weird, CaptureLoss, and Stats logs
+  -->
+  <field name="ts_delta" type="string" indexed="true" stored="true" />
+  <field name="gaps" type="pint" indexed="true" stored="true" />
+  <field name="acks" type="pint" indexed="true" stored="true" />
+  <field name="percent_lost" type="pdouble" indexed="true" stored="true" />
+
+  <!--
+         * Reporter::Info log support
+         * https://www.bro.org/sphinx/scripts/base/frameworks/reporter/main.bro.html#type-Reporter::Info
+  -->
+  <field name="level" type="string" indexed="true" stored="true" />
+  <field name="message" type="string" indexed="true" stored="true" />
+  <field name="location" type="string" indexed="true" stored="true" />
+
+  <!--
+         * SIP::Info log support
+         * https://www.bro.org/sphinx/scripts/base/protocols/sip/main.bro.html#type-SIP::Info
+         *
+         * Notable Fields
+         *   Field:     trans_depth
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+         *
+         *   Field:     method
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     uri
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     date
+         *   Notes:     Field exists in the SMTP and SIP logs
+         *
+         *   Field:     reply_to
+         *   Notes:     Field exists in the SMTP and SIP logs
+         *
+         *   Field:     subject
+         *   Notes:     Field exists in the KnownCerts, SMTP, SIP, and SSL logs
+         *
+         *   Field:     user_agent
+         *   Notes:     Field exists in the HTTP, SMTP, and SIP logs
+         *
+         *   Field:     status_code
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     status_msg
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     request_body_len
+         *   Notes:     Field exists in the HTTP and SIP logs
+         *
+         *   Field:     response_body_len
+         *   Notes:     Field exists in the HTTP and SIP logs
+  -->
+  <field name="request_from" type="string" indexed="true" stored="true" />
+  <field name="request_to" type="string" indexed="true" stored="true" />
+  <field name="response_from" type="string" indexed="true" stored="true" />
+  <field name="response_to" type="string" indexed="true" stored="true" />
+  <field name="call_id" type="string" indexed="true" stored="true" />
+  <field name="seq" type="string" indexed="true" stored="true" />
+  <field name="request_path" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="response_path" type="string" indexed="true" stored="true" multiValued="true" />
+  <field name="warning" type="string" indexed="true" stored="true" />
+  <field name="content_type" type="string" indexed="true" stored="true" />
+
+  <!-- Geo Enrichment Fields -->
+  <dynamicField name="*.location_point" type="location" multiValued="false" docValues="false"/>
+  <dynamicField name="*_coordinate" type="pdouble" indexed="true" stored="false" docValues="false"/>
+  <dynamicField name="*.country" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.city" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.locID" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.dmaCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.postalCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.latitude" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.longitude" type="pfloat" multiValued="false" docValues="true"/>
+
+  <!-- Performance Debugging Fields -->
+  <dynamicField name="*.ts" type="timestamp" multiValued="false" docValues="true"/>
+
+  <!-- Threat Intel Scoring Fields -->
+  <field name="is_alert" type="boolean" indexed="true" stored="true" />
+  <dynamicField name="*score" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.reason" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.name" type="string" multiValued="false" docValues="true"/>
+
+  <!-- Comments field required for the UI -->
+  <field name="comments" type="string" indexed="true" stored="true" multiValued="true"/>
+
+  <!-- Metaalerts Field -->
+  <field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>
+
+  <!-- Catch all, if we don't know about it, it gets dropped. -->
+  <dynamicField name="*" type="ignored" multiValued="false" docValues="true"/>
+
+  <!-- Type Definitions -->
+  <fieldType name="string" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="boolean" stored="true" indexed="true" multiValued="false" class="solr.BoolField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="pint" stored="true" indexed="true" multiValued="false" class="solr.TrieIntField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pfloat" stored="true" indexed="true" multiValued="false" class="solr.TrieFloatField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="plong" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pdouble" stored="true" indexed="true" multiValued="false" class="solr.TrieDoubleField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <fieldType name="ip" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="timestamp" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="ignored" stored="true" indexed="true" multiValued="true" class="solr.StrField" sortMissingLast="false" docValues="false"/>
+</schema>
diff --git a/metron-platform/metron-solr/src/main/config/schema/bro/solrconfig.xml b/metron-platform/metron-solr/src/main/config/schema/bro/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/bro/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/main/config/schema/error/schema.xml b/metron-platform/metron-solr/src/main/config/schema/error/schema.xml
new file mode 100644
index 0000000..4aa80ef
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/error/schema.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="error_doc" version="1.6">
+  <field name="_version_" type="plong" indexed="true" stored="true"/>
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
+
+  <!-- The Exception Details-->
+  <field name="exception" type="string" indexed="true" stored="true" />
+  <field name="hostname" type="string" indexed="true" stored="true" />
+  <field name="stack" type="string" indexed="true" stored="true" />
+
+  <!-- The timestamp of when it happened -->
+  <field name="timestamp" type="timestamp" indexed="true" stored="true" />
+
+  <!-- The message in string form (this may be garbage if the message is a bytes rather than text -->
+  <field name="message" type="string" indexed="true" stored="true" />
+  <field name="raw_message_bytes" type="bytes" indexed="false" stored="true" />
+  <field name="error_fields" type="string" indexed="true" stored="true" />
+  <field name="error_hash" type="string" indexed="true" stored="true" />
+  <field name="failed_sensor_type" type="string" indexed="true" stored="true" />
+  <field name="error_type" type="string" indexed="true" stored="true" />
+
+  <!-- The GUID for the error -->
+  <field name="guid" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+  <uniqueKey>guid</uniqueKey>
+
+  <!-- Raw messages can be split into multiple fields -->
+  <dynamicField name="raw_message*" type="text_raw" indexed="false" stored="true"/>
+  <!-- Catch all, if we don't know about it, it gets dropped. -->
+  <dynamicField name="*" type="ignored" multiValued="false" docValues="true"/>
+
+
+  <fieldType name="string" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="boolean" stored="true" indexed="true" multiValued="false" class="solr.BoolField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="pint" stored="true" indexed="true" multiValued="false" class="solr.TrieIntField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pfloat" stored="true" indexed="true" multiValued="false" class="solr.TrieFloatField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="plong" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pdouble" stored="true" indexed="true" multiValued="false" class="solr.TrieDoubleField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="bytes" stored="true" indexed="true" multiValued="false" class="solr.BinaryField" sortMissingLast="false"/>
+  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <fieldType name="ip" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="timestamp" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="ignored" stored="true" indexed="true" multiValued="true" class="solr.StrField" sortMissingLast="false" docValues="false"/>
+  <fieldType name="text_raw" class="solr.TextField"/>
+</schema>
diff --git a/metron-platform/metron-solr/src/main/config/schema/error/solrconfig.xml b/metron-platform/metron-solr/src/main/config/schema/error/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/error/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/main/config/schema/metaalert/schema.xml b/metron-platform/metron-solr/src/main/config/schema/metaalert/schema.xml
new file mode 100644
index 0000000..63e729b
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/metaalert/schema.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<schema name="metaalert_doc" version="1.6">
+  <field name="_version_" type="plong" indexed="true" stored="true"/>
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false"/>
+  <field name="_childDocuments_" type="ignored" stored="true" docValues="true"/>
+
+  <field name="guid" type="string" indexed="true" stored="true" required="true"
+    multiValued="false"/>
+
+  <field name="source.type" type="string" indexed="true" stored="true"/>
+  <field name="timestamp" type="plong" indexed="true" stored="true"/>
+  <field name="score" type="pdouble" indexed="true" stored="true"/>
+  <field name="status" type="string" indexed="true" stored="true"/>
+  <field name="threat:triage:score" type="pdouble" indexed="true" stored="true"/>
+  <field name="average" type="pdouble" indexed="true" stored="true"/>
+  <field name="min" type="pdouble" indexed="true" stored="true"/>
+  <field name="median" type="pdouble" indexed="true" stored="true"/>
+  <field name="max" type="pdouble" indexed="true" stored="true"/>
+  <field name="sum" type="pdouble" indexed="true" stored="true"/>
+  <field name="count" type="pint" indexed="true" stored="true"/>
+  <field name="groups" type="string" indexed="true" stored="true" multiValued="true"/>
+
+  <!-- Ensure that metaalerts child field is multivalued -->
+  <field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>
+
+  <dynamicField name="*" type="ignored" indexed="true" stored="true" multiValued="false" docValues="true"/>
+
+  <uniqueKey>guid</uniqueKey>
+
+  <!-- Type Definitions -->
+  <fieldType name="string" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="boolean" stored="true" indexed="true" multiValued="false" class="solr.BoolField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="pint" stored="true" indexed="true" multiValued="false" class="solr.TrieIntField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pfloat" stored="true" indexed="true" multiValued="false" class="solr.TrieFloatField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="plong" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pdouble" stored="true" indexed="true" multiValued="false" class="solr.TrieDoubleField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <fieldType name="ip" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="timestamp" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="ignored" stored="true" indexed="true" multiValued="true" class="solr.StrField" sortMissingLast="false" docValues="false"/>
+</schema>
\ No newline at end of file
diff --git a/metron-platform/metron-solr/src/main/config/schema/metaalert/solrconfig.xml b/metron-platform/metron-solr/src/main/config/schema/metaalert/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/metaalert/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/main/config/schema/snort/schema.xml b/metron-platform/metron-solr/src/main/config/schema/snort/schema.xml
new file mode 100644
index 0000000..84855df
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/snort/schema.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="snort_doc" version="1.6">
+  <field name="_version_" type="plong" indexed="true" stored="true"/>
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
+
+  <!-- Metron specific fields -->
+  <field name="timestamp" type="timestamp" indexed="true" stored="true" />
+  <field name="source.type" type="string" indexed="true" stored="true" />
+  <field name="guid" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+  <uniqueKey>guid</uniqueKey>
+
+  <!-- Source/Destination information -->
+  <field name="ip_dst_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_dst_port" type="pint" indexed="true" stored="true" />
+  <field name="ip_src_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_src_port" type="pint" indexed="true" stored="true" />
+
+  <!-- Snort specific fields -->
+  <field name="dgmlen" type="pint" indexed="true" stored="true" />
+  <field name="ethdst" type="string" indexed="true" stored="true" />
+  <field name="ethlen" type="string" indexed="true" stored="true" />
+  <field name="ethsrc" type="string" indexed="true" stored="true" />
+  <field name="id" type="pint" indexed="true" stored="true" />
+  <field name="iplen" type="pint" indexed="true" stored="true" />
+  <field name="msg" type="string" indexed="true" stored="true" />
+  <field name="protocol" type="string" indexed="true" stored="true" />
+  <field name="sig_generator" type="string" indexed="true" stored="true" />
+  <field name="sig_id" type="pint" indexed="true" stored="true" />
+  <field name="sig_rev" type="string" indexed="true" stored="true" />
+  <field name="tcpack" type="string" indexed="true" stored="true" />
+  <field name="tcpflags" type="string" indexed="true" stored="true" />
+  <field name="tcpseq" type="string" indexed="true" stored="true" />
+  <field name="tcpwindow" type="string" indexed="true" stored="true" />
+  <field name="tos" type="pint" indexed="true" stored="true" />
+  <field name="ttl" type="pint" indexed="true" stored="true" />
+
+  <!-- Geo Enrichment Fields -->
+  <dynamicField name="*.location_point" type="location" multiValued="false" docValues="false"/>
+  <dynamicField name="*_coordinate" type="pdouble" indexed="true" stored="false" docValues="false"/>
+  <dynamicField name="*.country" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.city" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.locID" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.dmaCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.postalCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.latitude" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.longitude" type="pfloat" multiValued="false" docValues="true"/>
+
+  <!-- Performance Debugging Fields -->
+  <dynamicField name="*.ts" type="timestamp" multiValued="false" docValues="true"/>
+
+  <!-- Threat Intel Scoring Fields -->
+  <field name="is_alert" type="boolean" indexed="true" stored="true" />
+  <dynamicField name="*score" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.reason" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.name" type="string" multiValued="false" docValues="true"/>
+
+  <!-- Comments field required for the UI -->
+  <field name="comments" type="string" indexed="true" stored="true" multiValued="true"/>
+
+  <!-- Metaalerts Field -->
+  <field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>
+
+  <!-- Catch all, if we don't know about it, it gets dropped. -->
+  <dynamicField name="*" type="ignored" multiValued="false" docValues="true"/>
+
+  <!-- Type Definitions -->
+  <fieldType name="string" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="boolean" stored="true" indexed="true" multiValued="false" class="solr.BoolField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="pint" stored="true" indexed="true" multiValued="false" class="solr.TrieIntField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pfloat" stored="true" indexed="true" multiValued="false" class="solr.TrieFloatField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="plong" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pdouble" stored="true" indexed="true" multiValued="false" class="solr.TrieDoubleField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <fieldType name="ip" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="timestamp" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="ignored" stored="true" indexed="true" multiValued="true" class="solr.StrField" sortMissingLast="false" docValues="false"/>
+</schema>
diff --git a/metron-platform/metron-solr/src/main/config/schema/snort/solrconfig.xml b/metron-platform/metron-solr/src/main/config/schema/snort/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/snort/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/main/config/schema/yaf/schema.xml b/metron-platform/metron-solr/src/main/config/schema/yaf/schema.xml
new file mode 100644
index 0000000..5555a14
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/yaf/schema.xml
@@ -0,0 +1,99 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="yaf_doc" version="1.6">
+  <field name="_version_" type="plong" indexed="true" stored="true"/>
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false" />
+
+  <!-- Metron specific fields -->
+  <field name="timestamp" type="timestamp" indexed="true" stored="true" />
+  <field name="source.type" type="string" indexed="true" stored="true" />
+  <field name="guid" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+  <uniqueKey>guid</uniqueKey>
+
+  <!-- Source/Destination information -->
+  <field name="ip_dst_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_dst_port" type="pint" indexed="true" stored="true" />
+  <field name="ip_src_addr" type="ip" indexed="true" stored="true" />
+  <field name="ip_src_port" type="pint" indexed="true" stored="true" />
+
+  <!-- Yaf specific fields -->
+  <field name="start_time" type="timestamp" indexed="true" stored="true" />
+  <field name="end_time" type="timestamp" indexed="true" stored="true" />
+  <field name="duration" type="pdouble" indexed="true" stored="true" />
+  <field name="rtt" type="pdouble" indexed="true" stored="true" />
+  <field name="proto" type="string" indexed="true" stored="true" />
+  <field name="sip" type="string" indexed="true" stored="true" />
+  <field name="sp" type="string" indexed="true" stored="true" />
+  <field name="dip" type="string" indexed="true" stored="true" />
+  <field name="dp" type="string" indexed="true" stored="true" />
+  <field name="iflags" type="string" indexed="true" stored="true" />
+  <field name="uflags" type="string" indexed="true" stored="true" />
+  <field name="riflags" type="string" indexed="true" stored="true" />
+  <field name="ruflags" type="string" indexed="true" stored="true" />
+  <field name="isn" type="string" indexed="true" stored="true" />
+  <field name="risn" type="string" indexed="true" stored="true" />
+  <field name="tag" type="string" indexed="true" stored="true" />
+  <field name="rtag" type="string" indexed="true" stored="true" />
+  <field name="pkt" type="pint" indexed="true" stored="true" />
+  <field name="oct" type="pint" indexed="true" stored="true" />
+  <field name="rpkt" type="pint" indexed="true" stored="true" />
+  <field name="roct" type="pint" indexed="true" stored="true" />
+  <field name="app" type="string" indexed="true" stored="true" />
+  <field name="end-reason" type="string" indexed="true" stored="true" />
+
+  <!-- Geo Enrichment Fields -->
+  <dynamicField name="*.location_point" type="location" multiValued="false" docValues="false"/>
+  <dynamicField name="*_coordinate" type="pdouble" indexed="true" stored="false" docValues="false"/>
+  <dynamicField name="*.country" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.city" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.locID" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.dmaCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.postalCode" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.latitude" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.longitude" type="pfloat" multiValued="false" docValues="true"/>
+
+  <!-- Performance Debugging Fields -->
+  <dynamicField name="*.ts" type="timestamp" multiValued="false" docValues="true"/>
+
+  <!-- Threat Intel Scoring Fields -->
+  <field name="is_alert" type="boolean" indexed="true" stored="true" />
+  <dynamicField name="*score" type="pfloat" multiValued="false" docValues="true"/>
+  <dynamicField name="*.reason" type="string" multiValued="false" docValues="true"/>
+  <dynamicField name="*.name" type="string" multiValued="false" docValues="true"/>
+
+  <!-- Comments field required for the UI -->
+  <field name="comments" type="string" indexed="true" stored="true" multiValued="true"/>
+
+  <!-- Metaalerts Field -->
+  <field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>
+
+  <!-- Catch all, if we don't know about it, it gets dropped. -->
+  <dynamicField name="*" type="ignored" multiValued="false" docValues="true"/>
+
+  <!-- Type Definitions -->
+  <fieldType name="string" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="boolean" stored="true" indexed="true" multiValued="false" class="solr.BoolField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="pint" stored="true" indexed="true" multiValued="false" class="solr.TrieIntField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pfloat" stored="true" indexed="true" multiValued="false" class="solr.TrieFloatField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="plong" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="pdouble" stored="true" indexed="true" multiValued="false" class="solr.TrieDoubleField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <fieldType name="ip" stored="true" indexed="true" multiValued="false" class="solr.StrField" sortMissingLast="true" docValues="false"/>
+  <fieldType name="timestamp" stored="true" indexed="true" multiValued="false" class="solr.TrieLongField" sortMissingLast="false" docValues="true"/>
+  <fieldType name="ignored" stored="true" indexed="true" multiValued="true" class="solr.StrField" sortMissingLast="false" docValues="false"/>
+</schema>
diff --git a/metron-platform/metron-solr/src/main/config/schema/yaf/solrconfig.xml b/metron-platform/metron-solr/src/main/config/schema/yaf/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/config/schema/yaf/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/main/config/solr.properties.j2 b/metron-platform/metron-solr/src/main/config/solr.properties.j2
index 00ad9dc..170aef3 100644
--- a/metron-platform/metron-solr/src/main/config/solr.properties.j2
+++ b/metron-platform/metron-solr/src/main/config/solr.properties.j2
@@ -35,7 +35,7 @@
 indexing.error.topic={{indexing_error_topic}}
 
 ##### Indexing #####
-indexing.writer.class.name={{ra_indexing_writer_class_name}}
+indexing.writer.class.name=org.apache.metron.solr.writer.SolrWriter
 
 ##### Parallelism #####
 kafka.spout.parallelism={{ra_indexing_kafka_spout_parallelism}}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/SolrConstants.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/SolrConstants.java
index d5dc7a0..56f1413 100644
--- a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/SolrConstants.java
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/SolrConstants.java
@@ -26,4 +26,6 @@
   public static final String REQUEST_COLLECTION_CONFIG_NAME = "collection.configName";
   public static final String REQUEST_COLLECTIONS_PATH = "/admin/collections";
   public static final String RESPONSE_COLLECTIONS = "collections";
+  public static final String SOLR_WRITER_NAME = "solr";
+  public static final String SOLR_ZOOKEEPER = "solr.zookeeper";
 }
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrColumnMetadataDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrColumnMetadataDao.java
new file mode 100644
index 0000000..22c6efa
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrColumnMetadataDao.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import com.google.common.collect.Sets;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.apache.metron.indexing.dao.ColumnMetadataDao;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.LukeRequest;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest;
+import org.apache.solr.client.solrj.request.schema.SchemaRequest.DynamicFields;
+import org.apache.solr.client.solrj.response.LukeResponse;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.schema.SchemaRepresentation;
+import org.apache.solr.client.solrj.response.schema.SchemaResponse.DynamicFieldsResponse;
+import org.apache.solr.common.SolrException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrColumnMetadataDao implements ColumnMetadataDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static Map<String, FieldType> solrTypeMap;
+
+  static {
+    Map<String, FieldType> fieldTypeMap = new HashMap<>();
+    fieldTypeMap.put("string", FieldType.TEXT);
+    fieldTypeMap.put("pint", FieldType.INTEGER);
+    fieldTypeMap.put("plong", FieldType.LONG);
+    fieldTypeMap.put("pfloat", FieldType.FLOAT);
+    fieldTypeMap.put("pdouble", FieldType.DOUBLE);
+    fieldTypeMap.put("boolean", FieldType.BOOLEAN);
+    fieldTypeMap.put("ip", FieldType.IP);
+    solrTypeMap = Collections.unmodifiableMap(fieldTypeMap);
+  }
+
+  private transient SolrClient client;
+
+  public SolrColumnMetadataDao(SolrClient client) {
+    this.client = client;
+  }
+
+  @Override
+  public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
+    Map<String, FieldType> indexColumnMetadata = new HashMap<>();
+    Map<String, String> previousIndices = new HashMap<>();
+    Set<String> fieldBlackList = Sets.newHashSet(SolrDao.ROOT_FIELD, SolrDao.VERSION_FIELD);
+
+    for (String index : indices) {
+      try {
+        getIndexFields(index).forEach(field -> {
+          String name = (String) field.get("name");
+          if (!fieldBlackList.contains(name)) {
+            FieldType type = toFieldType((String) field.get("type"));
+            if (!indexColumnMetadata.containsKey(name)) {
+              indexColumnMetadata.put(name, type);
+
+              // record the last index in which a field exists, to be able to print helpful error message on type mismatch
+              previousIndices.put(name, index);
+            } else {
+              FieldType previousType = indexColumnMetadata.get(name);
+              if (!type.equals(previousType)) {
+                String previousIndexName = previousIndices.get(name);
+                LOG.error(String.format(
+                    "Field type mismatch: %s.%s has type %s while %s.%s has type %s.  Defaulting type to %s.",
+                    index, field, type.getFieldType(),
+                    previousIndexName, field, previousType.getFieldType(),
+                    FieldType.OTHER.getFieldType()));
+                indexColumnMetadata.put(name, FieldType.OTHER);
+
+                // the field is defined in multiple indices with different types; ignore the field as type has been set to OTHER
+                fieldBlackList.add(name);
+              }
+            }
+          }
+        });
+      } catch (SolrServerException e) {
+        throw new IOException(e);
+      } catch (SolrException e) {
+        // 400 means an index is missing so continue
+        if (e.code() != 400) {
+          throw new IOException(e);
+        }
+      }
+    }
+    return indexColumnMetadata;
+  }
+
+  protected List<Map<String, Object>> getIndexFields(String index)
+      throws IOException, SolrServerException {
+    List<Map<String, Object>> indexFields = new ArrayList<>();
+
+    // Get all the fields in use, including dynamic fields
+    LukeRequest lukeRequest = new LukeRequest();
+    LukeResponse lukeResponse = lukeRequest.process(client, index);
+    for (Entry<String, LukeResponse.FieldInfo> field : lukeResponse.getFieldInfo().entrySet()) {
+      Map<String, Object> fieldData = new HashMap<>();
+      fieldData.put("name", field.getValue().getName());
+      fieldData.put("type", field.getValue().getType());
+      indexFields.add(fieldData);
+
+    }
+
+    // Get all the schema fields
+    SchemaRepresentation schemaRepresentation = new SchemaRequest().process(client, index)
+        .getSchemaRepresentation();
+    indexFields.addAll(schemaRepresentation.getFields());
+
+    return indexFields;
+  }
+
+  /**
+   * Converts a string type to the corresponding FieldType.
+   *
+   * @param type The type to convert.
+   * @return The corresponding FieldType or FieldType.OTHER, if no match.
+   */
+  private FieldType toFieldType(String type) {
+    return solrTypeMap.getOrDefault(type, FieldType.OTHER);
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrDao.java
new file mode 100644
index 0000000..a840bb4
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrDao.java
@@ -0,0 +1,207 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+
+import com.google.common.base.Splitter;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.ColumnMetadataDao;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrDao implements IndexDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  public static final String ROOT_FIELD = "_root_";
+  public static final String VERSION_FIELD = "_version_";
+
+  private transient SolrClient client;
+  private SolrSearchDao solrSearchDao;
+  private SolrUpdateDao solrUpdateDao;
+  private SolrRetrieveLatestDao solrRetrieveLatestDao;
+  private ColumnMetadataDao solrColumnMetadataDao;
+
+  private AccessConfig accessConfig;
+
+  protected SolrDao(SolrClient client,
+      AccessConfig config,
+      SolrSearchDao solrSearchDao,
+      SolrUpdateDao solrUpdateDao,
+      SolrRetrieveLatestDao retrieveLatestDao,
+      SolrColumnMetadataDao solrColumnMetadataDao) {
+    this.client = client;
+    this.accessConfig = config;
+    this.solrSearchDao = solrSearchDao;
+    this.solrUpdateDao = solrUpdateDao;
+    this.solrRetrieveLatestDao = retrieveLatestDao;
+    this.solrColumnMetadataDao = solrColumnMetadataDao;
+  }
+
+  public SolrDao() {
+    //uninitialized.
+  }
+
+  @Override
+  public void init(AccessConfig config) {
+    if (config.getKerberosEnabled()) {
+      enableKerberos();
+    }
+    if (this.client == null) {
+      this.accessConfig = config;
+      this.client = getSolrClient(getZkHosts());
+      this.solrSearchDao = new SolrSearchDao(this.client, this.accessConfig);
+      this.solrRetrieveLatestDao = new SolrRetrieveLatestDao(this.client, this.accessConfig);
+      this.solrUpdateDao = new SolrUpdateDao(this.client, this.solrRetrieveLatestDao, this.accessConfig);
+      this.solrColumnMetadataDao = new SolrColumnMetadataDao(this.client);
+    }
+  }
+
+  public Optional<String> getIndex(String sensorName, Optional<String> index) {
+    if (index.isPresent()) {
+      return index;
+    } else {
+      String realIndex = accessConfig.getIndexSupplier().apply(sensorName);
+      return Optional.ofNullable(realIndex);
+    }
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    return this.solrSearchDao.search(searchRequest);
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    return this.solrSearchDao.group(groupRequest);
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) throws IOException {
+    return this.solrRetrieveLatestDao.getLatest(guid, sensorType);
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
+    return this.solrRetrieveLatestDao.getAllLatest(getRequests);
+  }
+
+  @Override
+  public void update(Document update, Optional<String> index) throws IOException {
+    this.solrUpdateDao.update(update, index);
+  }
+
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
+    this.solrUpdateDao.batchUpdate(updates);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    this.solrUpdateDao.addCommentToAlert(request);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    this.solrUpdateDao.removeCommentFromAlert(request);
+  }
+
+  @Override
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp)
+      throws OriginalNotFoundException, IOException {
+    solrUpdateDao.patch(retrieveLatestDao, request, timestamp);
+  }
+
+  @Override
+  public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
+    return this.solrColumnMetadataDao.getColumnMetadata(indices);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    this.solrUpdateDao.addCommentToAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    this.solrUpdateDao.removeCommentFromAlert(request, latest);
+  }
+
+  /**
+   * Builds a Solr client using the ZK hosts from the global config.
+   * @return SolrClient
+   */
+  public SolrClient getSolrClient() {
+    return new CloudSolrClient.Builder().withZkHost(getZkHosts()).build();
+  }
+
+  /**
+   * Builds a Solr client using the ZK hosts specified.
+   * @return SolrClient
+   */
+  public SolrClient getSolrClient(List<String> zkHosts) {
+    return new CloudSolrClient.Builder().withZkHost(zkHosts).build();
+  }
+
+  /**
+   * Get ZK hosts from the global config.
+   * @return List of ZkHosts
+   */
+  public List<String> getZkHosts() {
+    Map<String, Object> globalConfig = accessConfig.getGlobalConfigSupplier().get();
+    return Splitter.on(',').trimResults()
+        .splitToList((String) globalConfig.getOrDefault(SOLR_ZOOKEEPER, ""));
+  }
+
+  void enableKerberos() {
+    HttpClientUtil.addConfigurer(new Krb5HttpClientConfigurer());
+  }
+
+  public SolrSearchDao getSolrSearchDao() {
+    return solrSearchDao;
+  }
+
+  public SolrUpdateDao getSolrUpdateDao() {
+    return solrUpdateDao;
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertDao.java
new file mode 100644
index 0000000..4748315
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertDao.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Supplier;
+
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.InvalidCreateException;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.solr.client.solrj.SolrClient;
+
+public class SolrMetaAlertDao implements MetaAlertDao {
+
+  public static final String METAALERTS_COLLECTION = "metaalert";
+
+  private IndexDao indexDao;
+  private SolrDao solrDao;
+  private SolrMetaAlertSearchDao metaAlertSearchDao;
+  private SolrMetaAlertUpdateDao metaAlertUpdateDao;
+  private SolrMetaAlertRetrieveLatestDao metaAlertRetrieveLatestDao;
+  protected String metaAlertsCollection = METAALERTS_COLLECTION;
+  protected String threatSort = MetaAlertConstants.THREAT_SORT_DEFAULT;
+
+  /**
+   * Wraps an {@link org.apache.metron.indexing.dao.IndexDao} to handle meta alerts.
+   * @param indexDao The Dao to wrap
+   */
+  public SolrMetaAlertDao(IndexDao indexDao, SolrMetaAlertSearchDao metaAlertSearchDao,
+      SolrMetaAlertUpdateDao metaAlertUpdateDao,
+      SolrMetaAlertRetrieveLatestDao metaAlertRetrieveLatestDao) {
+    this(indexDao, metaAlertSearchDao, metaAlertUpdateDao, metaAlertRetrieveLatestDao,
+        METAALERTS_COLLECTION,
+        MetaAlertConstants.THREAT_SORT_DEFAULT);
+  }
+
+  /**
+   * Wraps an {@link org.apache.metron.indexing.dao.IndexDao} to handle meta alerts.
+   * @param indexDao The Dao to wrap
+   * @param threatSort The summary aggregation of all child threat triage scores used
+   *                   as the overall threat triage score for the metaalert. This
+   *                   can be either max, min, average, count, median, or sum.
+   */
+  public SolrMetaAlertDao(IndexDao indexDao, SolrMetaAlertSearchDao metaAlertSearchDao,
+      SolrMetaAlertUpdateDao metaAlertUpdateDao,
+      SolrMetaAlertRetrieveLatestDao metaAlertRetrieveLatestDao,
+      String metaAlertsCollection,
+      String threatSort) {
+    init(indexDao, Optional.of(threatSort));
+    this.metaAlertSearchDao = metaAlertSearchDao;
+    this.metaAlertUpdateDao = metaAlertUpdateDao;
+    this.metaAlertRetrieveLatestDao = metaAlertRetrieveLatestDao;
+    this.metaAlertsCollection = metaAlertsCollection;
+    this.threatSort = threatSort;
+  }
+
+  public SolrMetaAlertDao() {
+    //uninitialized.
+  }
+
+  /**
+   * Initializes this implementation by setting the supplied IndexDao and also setting a separate SolrDao.
+   * This is needed for some specific Solr functions (looking up an index from a GUID for example).
+   * @param indexDao The DAO to wrap for our queries
+   * @param threatSort The summary aggregation of the child threat triage scores used
+   *                   as the overall threat triage score for the metaalert. This
+   *                   can be either max, min, average, count, median, or sum.
+   */
+  @Override
+  public void init(IndexDao indexDao, Optional<String> threatSort) {
+    if (indexDao instanceof MultiIndexDao) {
+      this.indexDao = indexDao;
+      MultiIndexDao multiIndexDao = (MultiIndexDao) indexDao;
+      for (IndexDao childDao : multiIndexDao.getIndices()) {
+        if (childDao instanceof SolrDao) {
+          this.solrDao = (SolrDao) childDao;
+        }
+      }
+    } else if (indexDao instanceof SolrDao) {
+      this.indexDao = indexDao;
+      this.solrDao = (SolrDao) indexDao;
+    } else {
+      throw new IllegalArgumentException(
+          "Need a SolrDao when using SolrMetaAlertDao"
+      );
+    }
+    Supplier<Map<String, Object>> globalConfigSupplier = () -> new HashMap<>();
+    if(metaAlertSearchDao != null && metaAlertSearchDao.solrSearchDao != null && metaAlertSearchDao.solrSearchDao.getAccessConfig() != null) {
+      globalConfigSupplier = metaAlertSearchDao.solrSearchDao.getAccessConfig().getGlobalConfigSupplier();
+    }
+
+    MetaAlertConfig config = new MetaAlertConfig(
+        metaAlertsCollection,
+        this.threatSort,
+        globalConfigSupplier
+    ) {
+      @Override
+      protected String getDefaultThreatTriageField() {
+        return MetaAlertConstants.THREAT_FIELD_DEFAULT.replace(':', '.');
+      }
+
+      @Override
+      protected String getDefaultSourceTypeField() {
+        return Constants.SENSOR_TYPE;
+      }
+    };
+
+    SolrClient solrClient = solrDao.getSolrClient(solrDao.getZkHosts());
+    this.metaAlertSearchDao = new SolrMetaAlertSearchDao(solrClient, solrDao.getSolrSearchDao(), config);
+    this.metaAlertRetrieveLatestDao = new SolrMetaAlertRetrieveLatestDao(solrDao);
+    this.metaAlertUpdateDao = new SolrMetaAlertUpdateDao(
+        solrDao,
+        metaAlertSearchDao,
+        metaAlertRetrieveLatestDao,
+        config);
+
+    if (threatSort.isPresent()) {
+      this.threatSort = threatSort.get();
+    }
+  }
+
+  @Override
+  public void init(AccessConfig config) {
+    // Do nothing. We're just wrapping a child dao
+  }
+
+  @Override
+  public Map<String, FieldType> getColumnMetadata(List<String> indices) throws IOException {
+    return indexDao.getColumnMetadata(indices);
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) throws IOException {
+    return metaAlertRetrieveLatestDao.getLatest(guid, sensorType);
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
+    return metaAlertRetrieveLatestDao.getAllLatest(getRequests);
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    return metaAlertSearchDao.search(searchRequest);
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    return metaAlertSearchDao.group(groupRequest);
+  }
+
+  @Override
+  public void update(Document update, Optional<String> index) throws IOException {
+    metaAlertUpdateDao.update(update, index);
+  }
+
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) {
+    metaAlertUpdateDao.batchUpdate(updates);
+  }
+
+  @Override
+  public void patch(RetrieveLatestDao retrieveLatestDao, PatchRequest request,
+      Optional<Long> timestamp)
+      throws OriginalNotFoundException, IOException {
+    metaAlertUpdateDao.patch(retrieveLatestDao, request, timestamp);
+  }
+
+  @Override
+  public SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException {
+    return metaAlertSearchDao.getAllMetaAlertsForAlert(guid);
+  }
+
+  @Override
+  public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
+      throws InvalidCreateException, IOException {
+    return metaAlertUpdateDao.createMetaAlert(request);
+  }
+
+  @Override
+  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+    return metaAlertUpdateDao.addAlertsToMetaAlert(metaAlertGuid, alertRequests);
+  }
+
+  @Override
+  public boolean removeAlertsFromMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+    return metaAlertUpdateDao.removeAlertsFromMetaAlert(metaAlertGuid, alertRequests);
+  }
+
+  @Override
+  public boolean updateMetaAlertStatus(String metaAlertGuid, MetaAlertStatus status)
+      throws IOException {
+    return metaAlertUpdateDao.updateMetaAlertStatus(metaAlertGuid, status);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    solrDao.addCommentToAlert(request);
+  }
+
+    @Override
+    public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+        solrDao.removeCommentFromAlert(request);
+    }
+
+    @Override
+    public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+        solrDao.addCommentToAlert(request, latest);
+    }
+
+    @Override
+    public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+        solrDao.removeCommentFromAlert(request, latest);
+    }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertRetrieveLatestDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertRetrieveLatestDao.java
new file mode 100644
index 0000000..2797df2
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertRetrieveLatestDao.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.dao.SolrMetaAlertDao.METAALERTS_COLLECTION;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertRetrieveLatestDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrDocument;
+
+public class SolrMetaAlertRetrieveLatestDao implements
+    MetaAlertRetrieveLatestDao {
+
+  private SolrDao solrDao;
+
+  public SolrMetaAlertRetrieveLatestDao(SolrDao solrDao) {
+    this.solrDao = solrDao;
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) throws IOException {
+    if (MetaAlertConstants.METAALERT_TYPE.equals(sensorType)) {
+      // Unfortunately, we can't just defer to the indexDao for this. Child alerts in Solr end up
+      // having to be dug out.
+      String guidClause = Constants.GUID + ":" + guid;
+      SolrQuery query = new SolrQuery();
+      query.setQuery(guidClause)
+          .setFields("*", "[child parentFilter=" + guidClause + " limit=999]");
+
+      try {
+        QueryResponse response = solrDao.getSolrClient(solrDao.getZkHosts())
+            .query(METAALERTS_COLLECTION, query);
+        // GUID is unique, so it's definitely the first result
+        if (response.getResults().size() == 1) {
+          SolrDocument result = response.getResults().get(0);
+
+          return SolrUtilities.toDocument(result);
+        } else {
+          return null;
+        }
+      } catch (SolrServerException e) {
+        throw new IOException("Unable to retrieve metaalert", e);
+      }
+    } else {
+      return solrDao.getLatest(guid, sensorType);
+    }
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
+    return solrDao.getAllLatest(getRequests);
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertSearchDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertSearchDao.java
new file mode 100644
index 0000000..c1e3af6
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertSearchDao.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.dao.SolrMetaAlertDao.METAALERTS_COLLECTION;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertSearchDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.util.ClientUtils;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.params.CursorMarkParams;
+import org.apache.solr.common.params.MapSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrMetaAlertSearchDao implements MetaAlertSearchDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  transient SolrSearchDao solrSearchDao;
+  transient SolrClient solrClient;
+  private MetaAlertConfig config;
+
+  public SolrMetaAlertSearchDao(SolrClient solrClient, SolrSearchDao solrSearchDao, MetaAlertConfig config) {
+    this.solrClient = solrClient;
+    this.solrSearchDao = solrSearchDao;
+    this.config = config;
+  }
+
+  @Override
+  public SearchResponse getAllMetaAlertsForAlert(String guid) throws InvalidSearchException {
+    if (guid == null || guid.trim().isEmpty()) {
+      throw new InvalidSearchException("Guid cannot be empty");
+    }
+
+    // Searches for all alerts containing the meta alert guid in it's "metalerts" array
+    // The query has to match the parentFilter to avoid errors.  Guid must also be explicitly
+    // included.
+    String activeClause =
+        MetaAlertConstants.STATUS_FIELD + ":" + MetaAlertStatus.ACTIVE.getStatusString();
+    String guidClause = Constants.GUID + ":" + guid;
+    String fullClause = "{!parent which=" + activeClause + "}" + guidClause;
+    String metaalertTypeClause = config.getSourceTypeField() + ":" + MetaAlertConstants.METAALERT_TYPE;
+    SolrQuery solrQuery = new SolrQuery()
+        .setQuery(fullClause)
+        .setFields("*", "[child parentFilter=" + metaalertTypeClause + " limit=999]")
+        .addSort(Constants.GUID,
+            SolrQuery.ORDER.asc); // Just do basic sorting to track where we are
+
+    // Use Solr's Cursors to handle the paging, rather than doing it manually.
+    List<SearchResult> allResults = new ArrayList<>();
+    try {
+      String cursorMark = CursorMarkParams.CURSOR_MARK_START;
+      boolean done = false;
+      while (!done) {
+        solrQuery.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark);
+        QueryResponse rsp = solrClient.query(METAALERTS_COLLECTION, solrQuery);
+        String nextCursorMark = rsp.getNextCursorMark();
+        rsp.getResults().stream()
+            .map(solrDocument -> SolrUtilities.getSearchResult(solrDocument, null,
+                    solrSearchDao.getAccessConfig().getIndexSupplier()))
+            .forEachOrdered(allResults::add);
+        if (cursorMark.equals(nextCursorMark)) {
+          done = true;
+        }
+        cursorMark = nextCursorMark;
+      }
+    } catch (IOException | SolrServerException e) {
+      throw new InvalidSearchException("Unable to complete search", e);
+    }
+
+    SearchResponse searchResponse = new SearchResponse();
+    searchResponse.setResults(allResults);
+    searchResponse.setTotal(allResults.size());
+    return searchResponse;
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    // Need to wrap such that two things are true
+    // 1. The provided query is true OR nested query on the alert field is true
+    // 2. Metaalert is active OR it's not a metaalert
+
+    String activeStatusClause =
+        MetaAlertConstants.STATUS_FIELD + ":" + MetaAlertStatus.ACTIVE.getStatusString();
+
+    String metaalertTypeClause = config.getSourceTypeField() + ":" + MetaAlertConstants.METAALERT_TYPE;
+    // Use the 'v=' form in order to ensure complex clauses are properly handled.
+    // Per the docs, the 'which=' clause should be used to identify all metaalert parents, not to
+    //   filter
+    // Status is a filter on parents and must be done outside the '!parent' construct
+    String parentChildQuery =
+        "(+" + activeStatusClause + " +" + "{!parent which=" + metaalertTypeClause + " v='"
+            + searchRequest.getQuery() + "'})";
+
+    // Put everything together to get our full query
+    // The '-metaalert:[* TO *]' construct is to ensure the field doesn't exist on or is empty for
+    //   plain alerts.
+    // Also make sure that it's not a metaalert
+    String fullQuery =
+        "(" + searchRequest.getQuery() + " AND -" + MetaAlertConstants.METAALERT_FIELD + ":[* TO *]"
+            + " AND " + "-" + metaalertTypeClause + ")" + " OR " + parentChildQuery;
+
+    LOG.debug("MetaAlert search query {}", fullQuery);
+
+    searchRequest.setQuery(fullQuery);
+
+    // Build the custom field list
+    List<String> fields = searchRequest.getFields();
+    String fieldList = "*";
+    if (fields != null) {
+      fieldList = StringUtils.join(fields, ",");
+    }
+
+    LOG.debug("MetaAlert Search Field list {}", fullQuery);
+
+    SearchResponse results = solrSearchDao.search(searchRequest, fieldList);
+    LOG.debug("MetaAlert Search Number of results {}", results.getResults().size());
+
+    // Unfortunately, we can't get the full metaalert results at the same time
+    // Get them in a second query.
+    // However, we can only retrieve them if we have the source type field (either explicit or
+    // wildcard).
+    if (fieldList.contains("*") || fieldList.contains(config.getSourceTypeField())) {
+      List<String> metaalertGuids = new ArrayList<>();
+      for (SearchResult result : results.getResults()) {
+        if (result.getSource().get(config.getSourceTypeField())
+            .equals(MetaAlertConstants.METAALERT_TYPE)) {
+          // Then we need to add it to the list to retrieve child alerts in a second query.
+          metaalertGuids.add(result.getId());
+        }
+      }
+      LOG.debug("MetaAlert Search guids requiring retrieval: {}", metaalertGuids);
+
+      // If we have any metaalerts in our result, attach the full data.
+      if (metaalertGuids.size() > 0) {
+        Map<String, String> params = new HashMap<>();
+        params.put("fl", fieldList + ",[child parentFilter=" + metaalertTypeClause + " limit=999]");
+        SolrParams solrParams = new MapSolrParams(params);
+        try {
+          SolrDocumentList solrDocumentList = solrClient
+              .getById(METAALERTS_COLLECTION, metaalertGuids, solrParams);
+          Map<String, Document> guidToDocuments = new HashMap<>();
+          for (SolrDocument doc : solrDocumentList) {
+            Document document = SolrUtilities.toDocument(doc);
+            guidToDocuments.put(document.getGuid(), document);
+          }
+
+          // Run through our results and update them with the full metaalert
+          for (SearchResult result : results.getResults()) {
+            Document fullDoc = guidToDocuments.get(result.getId());
+            if (fullDoc != null) {
+              result.setSource(fullDoc.getDocument());
+            }
+          }
+        } catch (SolrServerException | IOException e) {
+          throw new InvalidSearchException("Error when retrieving child alerts for metaalerts", e);
+        }
+
+      }
+    }
+    return results;
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    // Make sure to escape any problematic characters here
+    String sourceType = ClientUtils.escapeQueryChars(config.getSourceTypeField());
+    String baseQuery = groupRequest.getQuery();
+    String adjustedQuery = baseQuery + " -" + MetaAlertConstants.METAALERT_FIELD + ":[* TO *]"
+        + " -" + sourceType + ":" + MetaAlertConstants.METAALERT_TYPE;
+    LOG.debug("MetaAlert group adjusted query: {}", adjustedQuery);
+    groupRequest.setQuery(adjustedQuery);
+    return solrSearchDao.group(groupRequest);
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertUpdateDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertUpdateDao.java
new file mode 100644
index 0000000..132d872
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrMetaAlertUpdateDao.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.dao.SolrMetaAlertDao.METAALERTS_COLLECTION;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateResponse;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertUpdateDao;
+import org.apache.metron.indexing.dao.metaalert.MetaScores;
+import org.apache.metron.indexing.dao.metaalert.lucene.AbstractLuceneMetaAlertUpdateDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.InvalidCreateException;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.UpdateDao;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+
+public class SolrMetaAlertUpdateDao extends AbstractLuceneMetaAlertUpdateDao implements
+    MetaAlertUpdateDao, UpdateDao {
+
+  private SolrClient solrClient;
+  private SolrMetaAlertSearchDao metaAlertSearchDao;
+
+  /**
+   * Constructor a SolrMetaAlertUpdateDao
+   * @param solrDao An SolrDao to defer queries to.
+   * @param metaAlertSearchDao A MetaAlert aware search DAO used in retrieving items being mutated.
+   * @param retrieveLatestDao A RetrieveLatestDao for getting the current state of items being
+   *     mutated.
+   */
+  public SolrMetaAlertUpdateDao(SolrDao solrDao,
+      SolrMetaAlertSearchDao metaAlertSearchDao,
+      SolrMetaAlertRetrieveLatestDao retrieveLatestDao,
+      MetaAlertConfig config) {
+    super(solrDao, retrieveLatestDao, config);
+    this.solrClient = solrDao.getSolrClient(solrDao.getZkHosts());
+    this.metaAlertSearchDao = metaAlertSearchDao;
+  }
+
+  @Override
+  public MetaAlertCreateResponse createMetaAlert(MetaAlertCreateRequest request)
+      throws InvalidCreateException, IOException {
+    List<GetRequest> alertRequests = request.getAlerts();
+    if (request.getAlerts().isEmpty()) {
+      throw new InvalidCreateException("MetaAlertCreateRequest must contain alerts");
+    }
+    if (request.getGroups().isEmpty()) {
+      throw new InvalidCreateException("MetaAlertCreateRequest must contain UI groups");
+    }
+
+    // Retrieve the documents going into the meta alert and build it
+    Iterable<Document> alerts = getRetrieveLatestDao().getAllLatest(alertRequests);
+
+    Document metaAlert = buildCreateDocument(alerts, request.getGroups(),
+        MetaAlertConstants.ALERT_FIELD);
+    MetaScores.calculateMetaScores(metaAlert, getConfig().getThreatTriageField(),
+        getConfig().getThreatSort());
+
+    // Add source type to be consistent with other sources and allow filtering
+    metaAlert.getDocument().put(getConfig().getSourceTypeField(), MetaAlertConstants.METAALERT_TYPE);
+
+    // Start a list of updates / inserts we need to run
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    updates.put(metaAlert, Optional.of(METAALERTS_COLLECTION));
+
+    try {
+      // We need to update the associated alerts with the new meta alerts, making sure existing
+      // links are maintained.
+      Map<String, Optional<String>> guidToIndices = alertRequests.stream().collect(Collectors.toMap(
+          GetRequest::getGuid, GetRequest::getIndex));
+      Map<String, String> guidToSensorTypes = alertRequests.stream().collect(Collectors.toMap(
+          GetRequest::getGuid, GetRequest::getSensorType));
+      for (Document alert : alerts) {
+        if (addMetaAlertToAlert(metaAlert.getGuid(), alert)) {
+          // Use the index in the request if it exists
+          Optional<String> index = guidToIndices.get(alert.getGuid());
+          if (!index.isPresent()) {
+            index = Optional.ofNullable(guidToSensorTypes.get(alert.getGuid()));
+            if (!index.isPresent()) {
+              throw new IllegalArgumentException("Could not find index for " + alert.getGuid());
+            }
+          }
+          updates.put(alert, index);
+        }
+      }
+
+      // Kick off any updates.
+      update(updates);
+
+      MetaAlertCreateResponse createResponse = new MetaAlertCreateResponse();
+      createResponse.setCreated(true);
+      createResponse.setGuid(metaAlert.getGuid());
+      solrClient.commit(METAALERTS_COLLECTION);
+      return createResponse;
+    } catch (IOException | SolrServerException e) {
+      throw new InvalidCreateException("Unable to create meta alert", e);
+    }
+  }
+
+
+  /**
+   * Updates a document in Solr for a given collection.  Collection is not optional for Solr.
+   * @param update The update to be run
+   * @param collection The index to be updated. Mandatory for Solr
+   * @throws IOException Thrown when an error occurs during the write.
+   */
+  @Override
+  public void update(Document update, Optional<String> collection) throws IOException {
+    if (MetaAlertConstants.METAALERT_TYPE.equals(update.getSensorType())) {
+      // We've been passed an update to the meta alert.
+      throw new UnsupportedOperationException("Meta alerts cannot be directly updated");
+    }
+    // Index can't be optional, or it won't be committed
+
+    Map<Document, Optional<String>> updates = new HashMap<>();
+    updates.put(update, collection);
+
+    // We need to update an alert itself. It cannot be delegated in Solr; we need to retrieve all
+    // metaalerts and update the entire document for each.
+    SearchResponse searchResponse;
+    try {
+      searchResponse = metaAlertSearchDao.getAllMetaAlertsForAlert(update.getGuid());
+    } catch (InvalidSearchException e) {
+      throw new IOException("Unable to retrieve metaalerts for alert", e);
+    }
+
+    ArrayList<Document> metaAlerts = new ArrayList<>();
+    for (SearchResult searchResult : searchResponse.getResults()) {
+      Document doc = new Document(searchResult.getSource(), searchResult.getId(),
+          MetaAlertConstants.METAALERT_TYPE, 0L);
+      metaAlerts.add(doc);
+    }
+
+    for (Document metaAlert : metaAlerts) {
+      if (replaceAlertInMetaAlert(metaAlert, update)) {
+        updates.put(metaAlert, Optional.of(METAALERTS_COLLECTION));
+      }
+    }
+
+    // Run the alert's update
+    getUpdateDao().batchUpdate(updates);
+
+    try {
+      solrClient.commit(METAALERTS_COLLECTION);
+      if (collection.isPresent()) {
+        solrClient.commit(collection.get());
+      }
+    } catch (SolrServerException e) {
+      throw new IOException("Unable to update document", e);
+    }
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    getUpdateDao().addCommentToAlert(request);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request) throws IOException {
+    getUpdateDao().removeCommentFromAlert(request);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    getUpdateDao().addCommentToAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    getUpdateDao().removeCommentFromAlert(request, latest);
+  }
+
+  protected boolean replaceAlertInMetaAlert(Document metaAlert, Document alert) {
+    boolean metaAlertUpdated = removeAlertsFromMetaAlert(metaAlert,
+        Collections.singleton(alert.getGuid()));
+    if (metaAlertUpdated) {
+      addAlertsToMetaAlert(metaAlert, Collections.singleton(alert));
+    }
+    return metaAlertUpdated;
+  }
+
+  @Override
+  public boolean addAlertsToMetaAlert(String metaAlertGuid, List<GetRequest> alertRequests)
+      throws IOException {
+    boolean success;
+    Document metaAlert = getRetrieveLatestDao()
+        .getLatest(metaAlertGuid, MetaAlertConstants.METAALERT_TYPE);
+    if (MetaAlertStatus.ACTIVE.getStatusString()
+        .equals(metaAlert.getDocument().get(MetaAlertConstants.STATUS_FIELD))) {
+      Iterable<Document> alerts = getRetrieveLatestDao().getAllLatest(alertRequests);
+      Map<Document, Optional<String>> updates = buildAddAlertToMetaAlertUpdates(metaAlert, alerts);
+      update(updates);
+      success = updates.size() != 0;
+    } else {
+      throw new IllegalStateException("Adding alerts to an INACTIVE meta alert is not allowed");
+    }
+    try {
+      solrClient.commit(METAALERTS_COLLECTION);
+    } catch (SolrServerException e) {
+      throw new IOException("Unable to commit alerts to metaalert: " + metaAlertGuid, e);
+    }
+    return success;
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrRetrieveLatestDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrRetrieveLatestDao.java
new file mode 100644
index 0000000..40262ba
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrRetrieveLatestDao.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrRetrieveLatestDao implements RetrieveLatestDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private transient SolrClient client;
+  private AccessConfig config;
+
+  public SolrRetrieveLatestDao(SolrClient client, AccessConfig config) {
+    this.client = client;
+    this.config = config;
+  }
+
+  @Override
+  public Document getLatest(String guid, String sensorType) throws IOException {
+    try {
+      Optional<String> index = SolrUtilities
+          .getIndex(config.getIndexSupplier(), sensorType, Optional.empty());
+      if (!index.isPresent()) {
+        LOG.debug("Unable to find index for sensorType {}", sensorType);
+        return null;
+      }
+
+      SolrDocument solrDocument = client.getById(index.get(), guid);
+      if (solrDocument == null) {
+        LOG.debug("Unable to find document for sensorType {} and guid {}", sensorType, guid);
+        return null;
+      }
+      return SolrUtilities.toDocument(solrDocument);
+    } catch (SolrServerException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
+    Map<String, Collection<String>> collectionIdMap = new HashMap<>();
+    for (GetRequest getRequest : getRequests) {
+      Optional<String> index = SolrUtilities
+          .getIndex(config.getIndexSupplier(), getRequest.getSensorType(), getRequest.getIndex());
+      if (index.isPresent()) {
+        Collection<String> ids = collectionIdMap.getOrDefault(index.get(), new HashSet<>());
+        ids.add(getRequest.getGuid());
+        collectionIdMap.put(index.get(), ids);
+      } else {
+        LOG.debug("Unable to find index for sensorType {}", getRequest.getSensorType());
+      }
+    }
+    try {
+      List<Document> documents = new ArrayList<>();
+      for (String collection : collectionIdMap.keySet()) {
+        SolrDocumentList solrDocumentList = client.getById(collectionIdMap.get(collection),
+            new SolrQuery().set("collection", collection));
+        documents.addAll(
+            solrDocumentList.stream().map(SolrUtilities::toDocument).collect(Collectors.toList()));
+      }
+      return documents;
+    } catch (SolrServerException e) {
+      throw new IOException(e);
+    }
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
new file mode 100644
index 0000000..4a8d482
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
@@ -0,0 +1,278 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.Group;
+import org.apache.metron.indexing.dao.search.GroupOrder;
+import org.apache.metron.indexing.dao.search.GroupOrderType;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.GroupResult;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchDao;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.search.SortField;
+import org.apache.metron.indexing.dao.search.SortOrder;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrQuery.ORDER;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.FacetField;
+import org.apache.solr.client.solrj.response.FacetField.Count;
+import org.apache.solr.client.solrj.response.PivotField;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrSearchDao implements SearchDao {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private transient SolrClient client;
+  private AccessConfig accessConfig;
+
+  public SolrSearchDao(SolrClient client, AccessConfig accessConfig) {
+    this.client = client;
+    this.accessConfig = accessConfig;
+  }
+
+  protected AccessConfig getAccessConfig() {
+    return accessConfig;
+  }
+
+  @Override
+  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
+    return search(searchRequest, null);
+  }
+
+  // Allow for the fieldList to be explicitly specified, letting things like metaalerts expand on them.
+  // If null, use whatever the searchRequest defines.
+  public SearchResponse search(SearchRequest searchRequest, String fieldList)
+      throws InvalidSearchException {
+    if (searchRequest.getQuery() == null) {
+      throw new InvalidSearchException("Search query is invalid: null");
+    }
+    if (client == null) {
+      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
+    }
+    if (searchRequest.getSize() > accessConfig.getMaxSearchResults()) {
+      throw new InvalidSearchException(
+          "Search result size must be less than " + accessConfig.getMaxSearchResults());
+    }
+    try {
+      SolrQuery query = buildSearchRequest(searchRequest, fieldList);
+      QueryResponse response = client.query(query);
+      return buildSearchResponse(searchRequest, response);
+    } catch (SolrException | IOException | SolrServerException e) {
+      String msg = e.getMessage();
+      LOG.error(msg, e);
+      throw new InvalidSearchException(msg, e);
+    }
+  }
+
+  @Override
+  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
+    try {
+      String groupNames = groupRequest.getGroups().stream().map(Group::getField).collect(
+          Collectors.joining(","));
+      SolrQuery query = new SolrQuery()
+          .setStart(0)
+          .setRows(0)
+          .setQuery(groupRequest.getQuery());
+
+      query.set("collection", getCollections(groupRequest.getIndices()));
+      Optional<String> scoreField = groupRequest.getScoreField();
+      if (scoreField.isPresent()) {
+        query.set("stats", true);
+        query.set("stats.field", String.format("{!tag=piv1 sum=true}%s", scoreField.get()));
+      }
+      query.set("facet", true);
+      query.set("facet.pivot", String.format("{!stats=piv1}%s", groupNames));
+      QueryResponse response = client.query(query);
+      return buildGroupResponse(groupRequest, response);
+    } catch (IOException | SolrServerException e) {
+      String msg = e.getMessage();
+      LOG.error(msg, e);
+      throw new InvalidSearchException(msg, e);
+    }
+  }
+
+  // An explicit, overriding fieldList can be provided.  This is useful for things like metaalerts,
+  // which may need to modify that parameter.
+  protected SolrQuery buildSearchRequest(
+      SearchRequest searchRequest, String fieldList) throws IOException, SolrServerException {
+    SolrQuery query = new SolrQuery()
+        .setStart(searchRequest.getFrom())
+        .setRows(searchRequest.getSize())
+        .setQuery(searchRequest.getQuery());
+
+    // handle sort fields
+    for (SortField sortField : searchRequest.getSort()) {
+      query.addSort(sortField.getField(), getSolrSortOrder(sortField.getSortOrder()));
+    }
+
+    // handle search fields
+    List<String> fields = searchRequest.getFields();
+    if (fieldList == null) {
+      fieldList = "*";
+      if (fields != null) {
+        fieldList = StringUtils.join(fields, ",");
+      }
+    }
+    query.set("fl", fieldList);
+
+    //handle facet fields
+    List<String> facetFields = searchRequest.getFacetFields();
+    if (facetFields != null) {
+      facetFields.forEach(query::addFacetField);
+    }
+
+    query.set("collection", getCollections(searchRequest.getIndices()));
+
+    return query;
+  }
+
+  private String getCollections(List<String> indices) throws IOException, SolrServerException {
+    List<String> existingCollections = CollectionAdminRequest.listCollections(client);
+    return indices.stream().filter(existingCollections::contains).collect(Collectors.joining(","));
+  }
+
+  private SolrQuery.ORDER getSolrSortOrder(
+      SortOrder sortOrder) {
+    return sortOrder == SortOrder.DESC
+        ? ORDER.desc : ORDER.asc;
+  }
+
+  protected SearchResponse buildSearchResponse(
+      SearchRequest searchRequest,
+      QueryResponse solrResponse) {
+
+    SearchResponse searchResponse = new SearchResponse();
+    SolrDocumentList solrDocumentList = solrResponse.getResults();
+    searchResponse.setTotal(solrDocumentList.getNumFound());
+
+    // search hits --> search results
+    List<SearchResult> results = solrDocumentList.stream()
+        .map(solrDocument -> SolrUtilities.getSearchResult(solrDocument, searchRequest.getFields(),
+                accessConfig.getIndexSupplier()))
+        .collect(Collectors.toList());
+    searchResponse.setResults(results);
+
+    // handle facet fields
+    List<String> facetFields = searchRequest.getFacetFields();
+    if (facetFields != null) {
+      searchResponse.setFacetCounts(getFacetCounts(facetFields, solrResponse));
+    }
+
+    if (LOG.isDebugEnabled()) {
+      String response;
+      try {
+        response = JSONUtils.INSTANCE.toJSON(searchResponse, false);
+      } catch (JsonProcessingException e) {
+        response = e.getMessage();
+      }
+      LOG.debug("Built search response; response={}", response);
+    }
+    return searchResponse;
+  }
+
+  protected Map<String, Map<String, Long>> getFacetCounts(List<String> fields,
+      QueryResponse solrResponse) {
+    Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
+    for (String field : fields) {
+      Map<String, Long> valueCounts = new HashMap<>();
+      FacetField facetField = solrResponse.getFacetField(field);
+      for (Count facetCount : facetField.getValues()) {
+        valueCounts.put(facetCount.getName(), facetCount.getCount());
+      }
+      fieldCounts.put(field, valueCounts);
+    }
+    return fieldCounts;
+  }
+
+  /**
+   * Build a group response.
+   * @param groupRequest The original group request.
+   * @param response The search response.
+   * @return A group response.
+   */
+  protected GroupResponse buildGroupResponse(
+      GroupRequest groupRequest,
+      QueryResponse response) {
+    String groupNames = groupRequest.getGroups().stream().map(Group::getField).collect(
+        Collectors.joining(","));
+    List<PivotField> pivotFields = response.getFacetPivot().get(groupNames);
+    GroupResponse groupResponse = new GroupResponse();
+    groupResponse.setGroupedBy(groupRequest.getGroups().get(0).getField());
+    groupResponse.setGroupResults(getGroupResults(groupRequest, 0, pivotFields));
+    return groupResponse;
+  }
+
+  protected List<GroupResult> getGroupResults(GroupRequest groupRequest, int index,
+      List<PivotField> pivotFields) {
+    List<Group> groups = groupRequest.getGroups();
+    List<GroupResult> searchResultGroups = new ArrayList<>();
+    final GroupOrder groupOrder = groups.get(index).getOrder();
+    pivotFields.sort((o1, o2) -> {
+      String s1 = groupOrder.getGroupOrderType() == GroupOrderType.TERM
+          ? o1.getValue().toString() : Integer.toString(o1.getCount());
+      String s2 = groupOrder.getGroupOrderType() == GroupOrderType.TERM
+          ? o2.getValue().toString() : Integer.toString(o2.getCount());
+      if (groupOrder.getSortOrder() == SortOrder.ASC) {
+        return s1.compareTo(s2);
+      } else {
+        return s2.compareTo(s1);
+      }
+    });
+
+    for (PivotField pivotField : pivotFields) {
+      GroupResult groupResult = new GroupResult();
+      groupResult.setKey(pivotField.getValue().toString());
+      groupResult.setTotal(pivotField.getCount());
+      Optional<String> scoreField = groupRequest.getScoreField();
+      if (scoreField.isPresent()) {
+        groupResult
+            .setScore((Double) pivotField.getFieldStatsInfo().get(scoreField.get()).getSum());
+      }
+      if (index < groups.size() - 1) {
+        groupResult.setGroupedBy(groups.get(index + 1).getField());
+        groupResult
+            .setGroupResults(getGroupResults(groupRequest, index + 1, pivotField.getPivot()));
+      }
+      searchResultGroups.add(groupResult);
+    }
+    return searchResultGroups;
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUpdateDao.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUpdateDao.java
new file mode 100644
index 0000000..2f83921
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUpdateDao.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.UpdateDao;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.common.SolrInputDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SolrUpdateDao implements UpdateDao {
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private transient SolrClient client;
+  private AccessConfig config;
+  private transient SolrRetrieveLatestDao retrieveLatestDao;
+
+  public SolrUpdateDao(SolrClient client, SolrRetrieveLatestDao retrieveLatestDao, AccessConfig config) {
+    this.client = client;
+    this.retrieveLatestDao = retrieveLatestDao;
+    this.config = config;
+  }
+
+  @Override
+  public void update(Document update, Optional<String> rawIndex) throws IOException {
+    Document newVersion = update;
+    // Handle any case where we're given comments in Map form, instead of raw String
+    Object commentsObj = update.getDocument().get(COMMENTS_FIELD);
+    if ( commentsObj instanceof List &&
+        ((List<Object>) commentsObj).size() > 0 &&
+      ((List<Object>) commentsObj).get(0) instanceof Map) {
+      newVersion = new Document(update);
+      convertCommentsToRaw(newVersion.getDocument());
+    }
+    try {
+      SolrInputDocument solrInputDocument = SolrUtilities.toSolrInputDocument(newVersion);
+      Optional<String> index = SolrUtilities
+          .getIndex(config.getIndexSupplier(), newVersion.getSensorType(), rawIndex);
+      if (index.isPresent()) {
+        this.client.add(index.get(), solrInputDocument);
+        this.client.commit(index.get());
+      } else {
+        throw new IllegalStateException("Index must be specified or inferred.");
+      }
+    } catch (SolrServerException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void batchUpdate(Map<Document, Optional<String>> updates) throws IOException {
+    // updates with a collection specified
+    Map<String, Collection<SolrInputDocument>> solrCollectionUpdates = new HashMap<>();
+    Set<String> collectionsUpdated = new HashSet<>();
+
+    for (Entry<Document, Optional<String>> entry : updates.entrySet()) {
+      SolrInputDocument solrInputDocument = SolrUtilities.toSolrInputDocument(entry.getKey());
+      Optional<String> index = SolrUtilities
+          .getIndex(config.getIndexSupplier(), entry.getKey().getSensorType(), entry.getValue());
+      if (index.isPresent()) {
+        Collection<SolrInputDocument> solrInputDocuments = solrCollectionUpdates
+            .getOrDefault(index.get(), new ArrayList<>());
+        solrInputDocuments.add(solrInputDocument);
+        solrCollectionUpdates.put(index.get(), solrInputDocuments);
+        collectionsUpdated.add(index.get());
+      } else {
+        String lookupIndex = config.getIndexSupplier().apply(entry.getKey().getSensorType());
+        Collection<SolrInputDocument> solrInputDocuments = solrCollectionUpdates
+            .getOrDefault(lookupIndex, new ArrayList<>());
+        solrInputDocuments.add(solrInputDocument);
+        solrCollectionUpdates.put(lookupIndex, solrInputDocuments);
+        collectionsUpdated.add(lookupIndex);
+      }
+    }
+    try {
+      for (Entry<String, Collection<SolrInputDocument>> entry : solrCollectionUpdates
+          .entrySet()) {
+        this.client.add(entry.getKey(), entry.getValue());
+      }
+      for (String collection : collectionsUpdated) {
+        this.client.commit(collection);
+      }
+    } catch (SolrServerException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request) throws IOException {
+    Document latest = retrieveLatestDao.getLatest(request.getGuid(), request.getSensorType());
+    addCommentToAlert(request, latest);
+  }
+
+  @Override
+  public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) throws IOException {
+    if (latest == null) {
+      return;
+    }
+
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> comments = (List<Map<String, Object>>) latest.getDocument()
+        .getOrDefault(COMMENTS_FIELD, new ArrayList<>());
+    List<Map<String, Object>> originalComments = new ArrayList<>(comments);
+
+    // Convert all comments back to raw JSON before updating.
+    List<String> commentStrs = new ArrayList<>();
+    for (Map<String, Object> comment : originalComments) {
+      commentStrs.add(new AlertComment(comment).asJson());
+    }
+    commentStrs.add(new AlertComment(
+        request.getComment(),
+        request.getUsername(),
+        request.getTimestamp()
+    ).asJson());
+
+    Document newVersion = new Document(latest);
+    newVersion.getDocument().put(COMMENTS_FIELD, commentStrs);
+    update(newVersion, Optional.empty());
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request)
+      throws IOException {
+    Document latest = retrieveLatestDao.getLatest(request.getGuid(), request.getSensorType());
+    removeCommentFromAlert(request, latest);
+  }
+
+  @Override
+  public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest)
+      throws IOException {
+    if (latest == null) {
+      return;
+    }
+
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> commentMap = (List<Map<String, Object>>) latest.getDocument()
+        .get(COMMENTS_FIELD);
+    // Can't remove anything if there's nothing there
+    if (commentMap == null) {
+      LOG.debug("Provided alert had no comments to be able to remove from");
+      return;
+    }
+    List<Map<String, Object>> originalComments = new ArrayList<>(commentMap);
+    List<AlertComment> comments = new ArrayList<>();
+    for (Map<String, Object> commentStr : originalComments) {
+      comments.add(new AlertComment(commentStr));
+    }
+
+    comments.remove(
+        new AlertComment(request.getComment(), request.getUsername(), request.getTimestamp()));
+    List<String> commentsAsJson = comments.stream().map(AlertComment::asJson)
+        .collect(Collectors.toList());
+    Document newVersion = new Document(latest);
+    newVersion.getDocument().put(COMMENTS_FIELD, commentsAsJson);
+    update(newVersion, Optional.empty());
+  }
+
+  public void convertCommentsToRaw(Map<String,Object> source) {
+    @SuppressWarnings("unchecked")
+    List<Map<String, Object>> comments = (List<Map<String, Object>>) source.get(COMMENTS_FIELD);
+    if (comments == null || comments.isEmpty()) {
+      return;
+    }
+    List<String> asJson = new ArrayList<>();
+    for (Map<String, Object> comment : comments) {
+      asJson.add((new AlertComment(comment)).asJson());
+    }
+    source.put(COMMENTS_FIELD, asJson);
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUtilities.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUtilities.java
new file mode 100644
index 0000000..d41b7e4
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrUtilities.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+
+import java.util.stream.Collectors;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrInputDocument;
+import org.json.simple.parser.ParseException;
+
+public class SolrUtilities {
+
+  public static SearchResult getSearchResult(SolrDocument solrDocument, List<String> fields, Function<String, String> indexSupplier) {
+    SearchResult searchResult = new SearchResult();
+    searchResult.setId((String) solrDocument.getFieldValue(Constants.GUID));
+    searchResult.setIndex(indexSupplier.apply((String) solrDocument.getFieldValue(Constants.SENSOR_TYPE)));
+    Map<String, Object> docSource = toDocument(solrDocument).getDocument();
+    final Map<String, Object> source = new HashMap<>();
+    if (fields != null) {
+      fields.forEach(field -> source.put(field, docSource.get(field)));
+    } else {
+      source.putAll(docSource);
+    }
+    searchResult.setSource(source);
+    return searchResult;
+  }
+
+  public static Document toDocument(SolrDocument solrDocument) {
+    Map<String, Object> document = new HashMap<>();
+    solrDocument.getFieldNames().stream()
+        .filter(name -> !name.equals(SolrDao.VERSION_FIELD))
+        .forEach(name -> document.put(name, solrDocument.getFieldValue(name)));
+
+    reformatComments(solrDocument, document);
+    insertChildAlerts(solrDocument, document);
+
+    return new Document(document,
+        (String) solrDocument.getFieldValue(Constants.GUID),
+        (String) solrDocument.getFieldValue(Constants.SENSOR_TYPE), 0L);
+  }
+
+  protected static void reformatComments(SolrDocument solrDocument, Map<String, Object> document) {
+    // Make sure comments are in the proper format
+    @SuppressWarnings("unchecked")
+    List<String> commentStrs = (List<String>) solrDocument.get(COMMENTS_FIELD);
+    if (commentStrs != null) {
+      try {
+        List<AlertComment> comments = new ArrayList<>();
+        for (String commentStr : commentStrs) {
+          comments.add(new AlertComment(commentStr));
+        }
+        document.put(COMMENTS_FIELD,
+            comments.stream().map(AlertComment::asMap).collect(Collectors.toList()));
+      } catch (ParseException e) {
+        throw new IllegalStateException("Unable to parse comment", e);
+      }
+    }
+  }
+
+  protected static void insertChildAlerts(SolrDocument solrDocument, Map<String, Object> document) {
+    // Make sure to put child alerts in
+    if (solrDocument.hasChildDocuments() && solrDocument
+        .getFieldValue(Constants.SENSOR_TYPE)
+        .equals(MetaAlertConstants.METAALERT_TYPE)) {
+      List<Map<String, Object>> childDocuments = new ArrayList<>();
+      for (SolrDocument childDoc : solrDocument.getChildDocuments()) {
+        Map<String, Object> childDocMap = new HashMap<>();
+        childDoc.getFieldNames().stream()
+            .filter(name -> !name.equals(SolrDao.VERSION_FIELD))
+            .forEach(name -> childDocMap.put(name, childDoc.getFieldValue(name)));
+        childDocuments.add(childDocMap);
+      }
+
+      document.put(MetaAlertConstants.ALERT_FIELD, childDocuments);
+    }
+  }
+
+  public static SolrInputDocument toSolrInputDocument(Document document) {
+    SolrInputDocument solrInputDocument = new SolrInputDocument();
+    for (Map.Entry<String, Object> field : document.getDocument().entrySet()) {
+      if (field.getKey().equals(MetaAlertConstants.ALERT_FIELD)) {
+        // We have a children, that needs to be translated as a child doc, not a field.
+        List<Map<String, Object>> alerts = (List<Map<String, Object>>) field.getValue();
+        for (Map<String, Object> alert : alerts) {
+          SolrInputDocument childDocument = new SolrInputDocument();
+          for (Map.Entry<String, Object> alertField : alert.entrySet()) {
+            childDocument.addField(alertField.getKey(), alertField.getValue());
+          }
+          solrInputDocument.addChildDocument(childDocument);
+        }
+      } else {
+        solrInputDocument.addField(field.getKey(), field.getValue());
+      }
+    }
+    return solrInputDocument;
+  }
+
+  /**
+   * Gets the actual collection for the given sensor type
+   * @param indexSupplier The function to employ in the lookup
+   * @param sensorName The sensor type to be looked up
+   * @param index An index to use, if present.
+   * @return An Optional containing the actual collection
+   */
+  public static Optional<String> getIndex(Function<String, String> indexSupplier, String sensorName,
+      Optional<String> index) {
+    if (index.isPresent()) {
+      return index;
+    } else {
+      String realIndex = indexSupplier.apply(sensorName);
+      return Optional.ofNullable(realIndex);
+    }
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/FieldType.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/FieldType.java
new file mode 100644
index 0000000..1bb2283
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/FieldType.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.schema;
+
+public class FieldType {
+  private String name;
+  private String solrClass;
+  private boolean sortMissingLast;
+  private boolean docValues;
+  private boolean multiValued;
+  private boolean stored;
+  private boolean indexed;
+
+  /**
+   * Create a new field type.  The default values for the fields are
+   * the implicit values from
+   * https://lucene.apache.org/solr/guide/6_6/field-type-definitions-and-properties.html#FieldTypeDefinitionsandProperties-FieldTypeDefinitionsinschema.xml
+   * @param name
+   * @param solrClass
+   */
+  public FieldType(String name, String solrClass) {
+    this(name, solrClass, false, false, false, true, true);
+  }
+
+  private FieldType(String name
+                         , String solrClass
+                         , boolean sortMissingLast
+                         , boolean docValues
+                         , boolean multiValued
+                         , boolean indexed
+                         , boolean stored
+  ) {
+    this.name = name;
+    this.solrClass = solrClass;
+    this.sortMissingLast = sortMissingLast;
+    this.docValues = docValues;
+    this.multiValued = multiValued;
+    this.indexed = indexed;
+    this.stored = stored;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public FieldType sortMissingLast() {
+    this.sortMissingLast = true;
+    return this;
+  }
+
+  public FieldType docValues() {
+    this.docValues = true;
+    return this;
+  }
+
+  public FieldType multiValued() {
+    this.multiValued= true;
+    return this;
+  }
+
+  public FieldType indexed() {
+    this.indexed = true;
+    return this;
+  }
+
+  public FieldType stored() {
+    this.stored = true;
+    return this;
+  }
+  @Override
+  public String toString() {
+    return String.format("<fieldType name=\"%s\" " +
+                    "stored=\"%s\" " +
+                    "indexed=\"%s\" " +
+                    "multiValued=\"%s\" " +
+                    "class=\"%s\" " +
+                    "sortMissingLast=\"%s\" " +
+                    "docValues=\"%s\"" +
+                    "/>"
+            , name, stored + "", indexed + "", multiValued + "", solrClass + "", sortMissingLast + "", docValues + ""
+    );
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/SchemaTranslator.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/SchemaTranslator.java
new file mode 100644
index 0000000..e17adc6
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/schema/SchemaTranslator.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.schema;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import org.apache.metron.common.utils.JSONUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.*;
+
+public class SchemaTranslator {
+
+  public static final String TAB = "  ";
+  public static final String PREAMBLE="<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" +
+          "<!--\n" +
+          " Licensed to the Apache Software Foundation (ASF) under one or more\n" +
+          " contributor license agreements.  See the NOTICE file distributed with\n" +
+          " this work for additional information regarding copyright ownership.\n" +
+          " The ASF licenses this file to You under the Apache License, Version 2.0\n" +
+          " (the \"License\"); you may not use this file except in compliance with\n" +
+          " the License.  You may obtain a copy of the License at\n" +
+          "\n" +
+          "     http://www.apache.org/licenses/LICENSE-2.0\n" +
+          "\n" +
+          " Unless required by applicable law or agreed to in writing, software\n" +
+          " distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+          " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+          " See the License for the specific language governing permissions and\n" +
+          " limitations under the License.\n" +
+          "-->";
+  public static final String VERSION_FIELD =
+          "<field name=\"_version_\" type=\"" + SolrFields.LONG.solrType.getName() + "\" indexed=\"false\" stored=\"false\"/>";
+  public static final String ROOT_FIELD =
+          "<field name=\"_root_\" type=\"" + SolrFields.STRING.solrType.getName() + "\" indexed=\"true\" stored=\"false\" docValues=\"false\" />";
+  public static final String DYNAMIC_FIELD_CATCHALL = "<dynamicField name=\"*\" type=\"ignored\" multiValued=\"false\" docValues=\"true\"/>";
+  public static final String UNIQUE_KEY = "<uniqueKey>guid</uniqueKey>";
+  public static final String PROPERTIES_KEY = "properties";
+  public static final String DYNAMIC_TEMPLATES_KEY = "dynamic_templates";
+  public static final String SCHEMA_FORMAT="<schema name=\"%s\" version=\"1.6\">";
+  public static final String TEMPLATE_KEY = "template";
+
+  public enum SolrFields {
+    STRING( new FieldType("string", "solr.StrField").sortMissingLast()
+          , ImmutableSet.of("text", "keyword")),
+    BOOLEAN( new FieldType("boolean", "solr.BoolField").sortMissingLast()
+           , ImmutableSet.of("boolean")),
+    INTEGER( new FieldType("pint", "solr.IntPointField").docValues()
+           , ImmutableSet.of("integer")),
+    FLOAT( new FieldType("pfloat", "solr.FloatPointField").docValues()
+           , ImmutableSet.of("float")),
+    LONG( new FieldType("plong", "solr.LongPointField").docValues()
+           , ImmutableSet.of("long")),
+    DOUBLE( new FieldType("pdouble", "solr.DoublePointField").docValues()
+           , ImmutableSet.of("double")),
+    BINARY( new FieldType("bytes", "solr.BinaryField").docValues()
+           , ImmutableSet.of("binary")),
+    LOCATION( new FieldType("location", "solr.LatLonPointSpatialField").docValues()
+            , ImmutableSet.of("geo_point")),
+    IP(new FieldType("ip", "solr.StrField").sortMissingLast()
+      , ImmutableSet.of("ip")),
+    TIMESTAMP(new FieldType("timestamp", "solr.LongPointField").docValues()
+             , ImmutableSet.of("date")),
+    IGNORE( new FieldType("ignored", "solr.StrField").multiValued(), new HashSet<>())
+    ;
+    FieldType solrType;
+    Set<String> elasticsearchTypes;
+
+    SolrFields(FieldType solrType, Set<String> elasticsearchTypes) {
+      this.solrType = solrType;
+      this.elasticsearchTypes = elasticsearchTypes;
+    }
+
+    public String getTypeDeclaration() {
+      return solrType.toString();
+    }
+
+    public static SolrFields byElasticsearchType(String type) {
+      for(SolrFields f : values()) {
+        if(f.elasticsearchTypes.contains(type)) {
+          return f;
+        }
+      }
+      return null;
+    }
+
+
+    public static void printTypes(PrintWriter pw) {
+      for(SolrFields f : values()) {
+        pw.println(TAB + f.getTypeDeclaration());
+      }
+    }
+  }
+
+  public static String normalizeField(String fieldName) {
+    return fieldName.replace(':', '.');
+  }
+
+  public static void processProperties(PrintWriter pw, Map<String, Object> properties) {
+    for(Map.Entry<String, Object> property : properties.entrySet()) {
+      String fieldName = normalizeField(property.getKey());
+      System.out.println("Processing property: " + fieldName);
+      if(fieldName.equals("guid")) {
+        pw.println(TAB + "<field name=\"guid\" type=\"" + SolrFields.STRING.solrType.getName()
+                + "\" indexed=\"true\" stored=\"true\" required=\"true\" multiValued=\"false\" />");
+      }
+      else {
+        String type = (String) ((Map<String, Object>) property.getValue()).get("type");
+        SolrFields solrField = SolrFields.byElasticsearchType(type);
+        if(solrField == null) {
+          System.out.println("Skipping " + fieldName + " because I can't find solr type for " + type);
+          continue;
+        }
+        pw.println(TAB + String.format("<field name=\"%s\" type=\"%s\" indexed=\"true\" stored=\"true\" />", fieldName, solrField.solrType.getName()));
+      }
+    }
+  }
+
+  public static void processDynamicMappings(PrintWriter pw, List<Map<String, Object>> properties) {
+    for(Map<String, Object> dynamicProperty : properties) {
+      for(Map.Entry<String, Object> dynamicFieldDef : dynamicProperty.entrySet()) {
+        System.out.println("Processing dynamic property: " + dynamicFieldDef.getKey());
+        Map<String, Object> def = (Map<String, Object>) dynamicFieldDef.getValue();
+        String match = (String) def.get("match");
+        if(match == null) {
+          match = (String) def.get("path_match");
+        }
+        match = normalizeField(match);
+        String type = (String)((Map<String, Object>)def.get("mapping")).get("type");
+        SolrFields solrField = SolrFields.byElasticsearchType(type);
+        if(solrField == null) {
+          System.out.println("Skipping " + match + " because I can't find solr type for " + type);
+          continue;
+        }
+        if(solrField == null) {
+          throw new IllegalStateException("Unable to find associated solr type for " + type + " with dynamic property " + solrField);
+        }
+        pw.println(TAB + String.format("<dynamicField name=\"%s\" type=\"%s\" multiValued=\"false\" docValues=\"true\"/>", match, solrField.solrType.getName()));
+      }
+    }
+  }
+
+  public static void translate(PrintWriter pw, Map<String, Object> template) {
+    pw.println(PREAMBLE);
+    System.out.println("Processing " + template.getOrDefault(TEMPLATE_KEY, "unknown template"));
+    Map<String, Object> mappings = (Map<String, Object>) template.getOrDefault("mappings", new HashMap<>());
+    if (mappings.size() != 1) {
+      System.err.println("Unable to process mappings. We expect exactly 1 mapping, there are " + mappings.size() + " mappings specified");
+    }
+    String docName = Iterables.getFirst(mappings.keySet(), null);
+    pw.println(String.format(SCHEMA_FORMAT, docName));
+    pw.println(TAB + VERSION_FIELD);
+    pw.println(TAB + ROOT_FIELD);
+    for (Map.Entry<String, Object> docTypeToMapping : mappings.entrySet()) {
+      System.out.println("Processing " + docTypeToMapping.getKey() + " doc type");
+      Map<String, Object> actualMappings = (Map<String, Object>) docTypeToMapping.getValue();
+      Map<String, Object> properties = (Map<String, Object>) actualMappings.getOrDefault(PROPERTIES_KEY, new HashMap<>());
+      processProperties(pw, properties);
+      List<Map<String, Object>> dynamicMappings = (List<Map<String, Object>>) actualMappings.getOrDefault(DYNAMIC_TEMPLATES_KEY, new ArrayList<>());
+      processDynamicMappings(pw, dynamicMappings);
+      pw.println(TAB + DYNAMIC_FIELD_CATCHALL);
+      pw.println(TAB + UNIQUE_KEY);
+      SolrFields.printTypes(pw);
+    }
+    pw.println("</schema>");
+    pw.flush();
+  }
+
+  public static void main(String... argv) throws IOException {
+    String templateFile = argv[0];
+    String schemaFile = argv[1];
+    Map<String, Object> template = JSONUtils.INSTANCE.load(new File(templateFile), JSONUtils.MAP_SUPPLIER);
+    try(PrintWriter pw = new PrintWriter(new File(schemaFile))) {
+      translate(pw, template);
+    }
+  }
+}
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/MetronSolrClient.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/MetronSolrClient.java
index d3ef36f..5c27cce 100644
--- a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/MetronSolrClient.java
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/MetronSolrClient.java
@@ -17,18 +17,22 @@
  */
 package org.apache.metron.solr.writer;
 
+import com.google.common.collect.Iterables;
 import org.apache.metron.solr.SolrConstants;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 
 public class MetronSolrClient extends CloudSolrClient {
 
@@ -40,6 +44,36 @@
     super(zkHost);
   }
 
+  public MetronSolrClient(String zkHost, Map<String, Object> solrHttpConfig) {
+    super(zkHost, HttpClientUtil.createClient(toSolrProps(solrHttpConfig)));
+  }
+
+  public static SolrParams toSolrProps(Map<String, Object> config) {
+    if(config == null || config.isEmpty()) {
+      return null;
+    }
+
+    ModifiableSolrParams ret = new ModifiableSolrParams();
+    for(Map.Entry<String, Object> kv : config.entrySet()) {
+      Object v = kv.getValue();
+      if(v instanceof Boolean) {
+        ret.set(kv.getKey(), (Boolean)v);
+      }
+      else if(v instanceof Integer) {
+        ret.set(kv.getKey(), (Integer)v);
+      }
+      else if(v instanceof Iterable) {
+        Iterable vals = (Iterable)v;
+        String[] strVals = new String[Iterables.size(vals)];
+        int i = 0;
+        for(Object o : (Iterable)v) {
+          strVals[i++] = o.toString();
+        }
+      }
+    }
+    return ret;
+  }
+
   public void createCollection(String name, int numShards, int replicationFactor) throws IOException, SolrServerException {
     if (!listCollections().contains(name)) {
       request(getCreateCollectionsRequest(name, numShards, replicationFactor));
diff --git a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/SolrWriter.java b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/SolrWriter.java
index 4e3246b..0289398 100644
--- a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/SolrWriter.java
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/writer/SolrWriter.java
@@ -17,108 +17,242 @@
  */
 package org.apache.metron.solr.writer;
 
-import org.apache.storm.task.TopologyContext;
-import org.apache.storm.tuple.Tuple;
-import org.apache.metron.common.configuration.Configurations;
-import org.apache.metron.common.configuration.EnrichmentConfigurations;
+import static org.apache.metron.solr.SolrConstants.SOLR_WRITER_NAME;
+
+import com.google.common.base.Joiner;
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.UUID;
+import java.util.function.Supplier;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.common.Constants;
 import org.apache.metron.common.configuration.writer.WriterConfiguration;
 import org.apache.metron.common.writer.BulkMessageWriter;
 import org.apache.metron.common.writer.BulkWriterResponse;
+import org.apache.metron.solr.SolrConstants;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
+import org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer;
 import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.storm.task.TopologyContext;
+import org.apache.storm.tuple.Tuple;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
 public class SolrWriter implements BulkMessageWriter<JSONObject>, Serializable {
 
-  public static final String DEFAULT_COLLECTION = "metron";
+  public static final String JAVA_SECURITY_CONFIG_PROPERTY = "java.security.auth.login.config";
 
-  private static final Logger LOG = LoggerFactory
-          .getLogger(SolrWriter.class);
+  public enum SolrProperties {
+    ZOOKEEPER_QUORUM(SolrConstants.SOLR_ZOOKEEPER),
+    COMMIT_PER_BATCH("solr.commitPerBatch", Optional.of(true)),
+    COMMIT_WAIT_SEARCHER("solr.commit.waitSearcher", Optional.of(true)),
+    COMMIT_WAIT_FLUSH("solr.commit.waitFlush", Optional.of(true)),
+    COMMIT_SOFT("solr.commit.soft", Optional.of(false)),
+    DEFAULT_COLLECTION("solr.collection", Optional.of("metron")),
+    HTTP_CONFIG("solr.http.config", Optional.of(new HashMap<>()))
+    ;
+    String name;
+    Optional<Object> defaultValue;
 
-  private boolean shouldCommit = false;
-  private MetronSolrClient solr;
+    SolrProperties(String name) {
+      this(name, Optional.empty());
+    }
+    SolrProperties(String name, Optional<Object> defaultValue) {
+      this.name = name;
+      this.defaultValue = defaultValue;
+    }
 
-  public SolrWriter withShouldCommit(boolean shouldCommit) {
-    this.shouldCommit = shouldCommit;
-    return this;
+    public <T> Optional<T> coerceOrDefault(Map<String, Object> globalConfig, Class<T> clazz) {
+      Object val = globalConfig.get(name);
+      if(val != null) {
+        T ret = null;
+        try {
+          ret = ConversionUtils.convert(val, clazz);
+        }
+        catch(ClassCastException cce) {
+          ret = null;
+        }
+        if(ret == null) {
+          //unable to convert value
+          LOG.warn("Unable to convert {} to {}, was {}", name, clazz.getName(), "" + val);
+          if(defaultValue.isPresent()) {
+            return Optional.ofNullable(ConversionUtils.convert(defaultValue.get(), clazz));
+          }
+          else {
+            return Optional.empty();
+          }
+        }
+        else {
+          return Optional.ofNullable(ret);
+        }
+      }
+      else {
+        if(defaultValue.isPresent()) {
+          return Optional.ofNullable(ConversionUtils.convert(defaultValue.get(), clazz));
+        }
+        else {
+          return Optional.empty();
+        }
+      }
+    }
+
+    public Supplier<IllegalArgumentException> errorOut(Map<String, Object> globalConfig) {
+      String message = "Unable to retrieve " + name + " from global config, value associated is " + globalConfig.get(name);
+      return () -> new IllegalArgumentException(message);
+    }
+
+    public <T> T coerceOrDefaultOrExcept(Map<String, Object> globalConfig, Class<T> clazz) {
+         return this.coerceOrDefault(globalConfig, clazz).orElseThrow(this.errorOut(globalConfig));
+    }
+
   }
 
+
+  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private Boolean shouldCommit;
+  private Boolean softCommit;
+  private Boolean waitSearcher;
+  private Boolean waitFlush;
+  private String zookeeperUrl;
+  private String defaultCollection;
+  private Map<String, Object> solrHttpConfig;
+
+  private MetronSolrClient solr;
+
   public SolrWriter withMetronSolrClient(MetronSolrClient solr) {
     this.solr = solr;
     return this;
   }
 
+  public void initializeFromGlobalConfig(Map<String, Object> globalConfiguration) {
+    zookeeperUrl = SolrProperties.ZOOKEEPER_QUORUM.coerceOrDefaultOrExcept(globalConfiguration, String.class);
+    defaultCollection = SolrProperties.DEFAULT_COLLECTION.coerceOrDefaultOrExcept(globalConfiguration, String.class);
+    solrHttpConfig = SolrProperties.HTTP_CONFIG.coerceOrDefaultOrExcept(globalConfiguration, Map.class);
+    shouldCommit = SolrProperties.COMMIT_PER_BATCH.coerceOrDefaultOrExcept(globalConfiguration, Boolean.class);
+    softCommit = SolrProperties.COMMIT_SOFT.coerceOrDefaultOrExcept(globalConfiguration, Boolean.class);
+    waitSearcher = SolrProperties.COMMIT_WAIT_SEARCHER.coerceOrDefaultOrExcept(globalConfiguration, Boolean.class);
+    waitFlush = SolrProperties.COMMIT_WAIT_FLUSH.coerceOrDefaultOrExcept(globalConfiguration, Boolean.class);
+  }
+
   @Override
   public void init(Map stormConf, TopologyContext topologyContext, WriterConfiguration configurations) throws IOException, SolrServerException {
     Map<String, Object> globalConfiguration = configurations.getGlobalConfig();
-    if(solr == null) solr = new MetronSolrClient((String) globalConfiguration.get("solr.zookeeper"));
-    String collection = getCollection(configurations);
-    solr.createCollection(collection, (Integer) globalConfiguration.get("solr.numShards"), (Integer) globalConfiguration.get("solr.replicationFactor"));
-    solr.setDefaultCollection(collection);
+    initializeFromGlobalConfig(globalConfiguration);
+    LOG.info("Initializing SOLR writer: {}", zookeeperUrl);
+    LOG.info("Forcing commit per batch: {}", shouldCommit);
+    LOG.info("Soft commit: {}", softCommit);
+    LOG.info("Commit Wait Searcher: {}", waitSearcher);
+    LOG.info("Commit Wait Flush: {}", waitFlush);
+    LOG.info("Default Collection: {}", "" + defaultCollection );
+    if(solr == null) {
+      if (isKerberosEnabled(stormConf)) {
+        HttpClientUtil.addConfigurer(new Krb5HttpClientConfigurer());
+      }
+      solr = new MetronSolrClient(zookeeperUrl, solrHttpConfig);
+    }
+    solr.setDefaultCollection(defaultCollection);
+
+  }
+
+  public Collection<SolrInputDocument> toDocs(Iterable<JSONObject> messages) {
+    Collection<SolrInputDocument> ret = new ArrayList<>();
+    for(JSONObject message: messages) {
+      SolrInputDocument document = new SolrInputDocument();
+      for (Object key : message.keySet()) {
+        Object value = message.get(key);
+        if (value instanceof Iterable) {
+          for (Object v : (Iterable) value) {
+            document.addField("" + key, v);
+          }
+        } else {
+          document.addField("" + key, value);
+        }
+      }
+      if (!document.containsKey(Constants.GUID)) {
+        document.addField(Constants.GUID, UUID.randomUUID().toString());
+      }
+      ret.add(document);
+    }
+    return ret;
+  }
+
+  protected String getCollection(String sourceType, WriterConfiguration configurations) {
+    String collection = configurations.getIndex(sourceType);
+    if(StringUtils.isEmpty(collection)) {
+      return solr.getDefaultCollection();
+    }
+    return collection;
   }
 
   @Override
   public BulkWriterResponse write(String sourceType, WriterConfiguration configurations, Iterable<Tuple> tuples, List<JSONObject> messages) throws Exception {
-    for(JSONObject message: messages) {
-      SolrInputDocument document = new SolrInputDocument();
-      document.addField("id", getIdValue(message));
-      document.addField("sensorType", sourceType);
-      for(Object key: message.keySet()) {
-        Object value = message.get(key);
-        document.addField(getFieldName(key, value), value);
+    String collection = getCollection(sourceType, configurations);
+    BulkWriterResponse bulkResponse = new BulkWriterResponse();
+    Collection<SolrInputDocument> docs = toDocs(messages);
+    try {
+      Optional<SolrException> exceptionOptional = fromUpdateResponse(solr.add(collection, docs));
+      // Solr commits the entire batch or throws an exception for it.  There's no way to get partial failures.
+      if(exceptionOptional.isPresent()) {
+        bulkResponse.addAllErrors(exceptionOptional.get(), tuples);
       }
-      UpdateResponse response = solr.add(document);
+      else {
+        if (shouldCommit) {
+          exceptionOptional = fromUpdateResponse(solr.commit(collection, waitFlush, waitSearcher, softCommit));
+          if(exceptionOptional.isPresent()) {
+            bulkResponse.addAllErrors(exceptionOptional.get(), tuples);
+          }
+        }
+        if(!exceptionOptional.isPresent()) {
+          bulkResponse.addAllSuccesses(tuples);
+        }
+      }
     }
-    if (shouldCommit) {
-      solr.commit(getCollection(configurations));
+    catch(HttpSolrClient.RemoteSolrException sse) {
+      bulkResponse.addAllErrors(sse, tuples);
     }
 
-    // Solr commits the entire batch or throws an exception for it.  There's no way to get partial failures.
-    BulkWriterResponse response = new BulkWriterResponse();
-    response.addAllSuccesses(tuples);
-    return response;
+    return bulkResponse;
+  }
+
+  protected Optional<SolrException> fromUpdateResponse(UpdateResponse response) {
+    if(response != null && response.getStatus() > 0) {
+      String message = "Solr Update response: " + Joiner.on(",").join(response.getResponse());
+      return Optional.of(new SolrException(SolrException.ErrorCode.BAD_REQUEST, message));
+    }
+    return Optional.empty();
   }
 
   @Override
   public String getName() {
-    return "solr";
-  }
-
-  protected String getCollection(WriterConfiguration configurations) {
-    String collection = (String) configurations.getGlobalConfig().get("solr.collection");
-    return collection != null ? collection : DEFAULT_COLLECTION;
-  }
-
-  private int getIdValue(JSONObject message) {
-    return message.toJSONString().hashCode();
-  }
-
-  protected String getFieldName(Object key, Object value) {
-    String field;
-    if (value instanceof Integer) {
-      field = key + "_i";
-    } else if (value instanceof Long) {
-      field = key + "_l";
-    } else if (value instanceof Float) {
-      field = key + "_f";
-    } else if (value instanceof Double) {
-      field = key + "_d";
-    } else {
-      field = key + "_s";
-    }
-    return field;
+    return SOLR_WRITER_NAME;
   }
 
   @Override
   public void close() throws Exception {
-    solr.close();
+    if(solr != null) {
+      solr.close();
+    }
+  }
+
+  private boolean isKerberosEnabled(Map stormConfig) {
+    if (stormConfig == null) {
+      return false;
+    }
+    String value = (String) stormConfig.get(JAVA_SECURITY_CONFIG_PROPERTY);
+    return value != null && !value.isEmpty();
   }
 }
diff --git a/metron-platform/metron-solr/src/main/scripts/create_collection.sh b/metron-platform/metron-solr/src/main/scripts/create_collection.sh
new file mode 100755
index 0000000..7693646
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/scripts/create_collection.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+METRON_VERSION=${project.version}
+METRON_HOME=/usr/metron/$METRON_VERSION
+ZOOKEEPER=${ZOOKEEPER:-localhost:2181}
+ZOOKEEPER_HOME=${ZOOKEEPER_HOME:-/usr/hdp/current/zookeeper-client}
+SECURITY_ENABLED=${SECURITY_ENABLED:-false}
+NEGOTIATE=''
+if [ ${SECURITY_ENABLED,,} == 'true' ]; then
+    NEGOTIATE=' --negotiate -u : '
+fi
+
+# Get the first Solr node from the list of live nodes in Zookeeper
+SOLR_NODE=`$ZOOKEEPER_HOME/bin/zkCli.sh -server $ZOOKEEPER ls /live_nodes | tail -n 1 | sed 's/\[\([^,]*\).*\]/\1/' | sed 's/_solr//'`
+
+# Upload the collection config set
+zip -rj - $METRON_HOME/config/schema/$1 | curl -X POST $NEGOTIATE --header "Content-Type:text/xml" --data-binary @- "http://$SOLR_NODE/solr/admin/configs?action=UPLOAD&name=$1"
+
+# Create the collection
+curl -X GET $NEGOTIATE "http://$SOLR_NODE/solr/admin/collections?action=CREATE&name=$1&numShards=1"
diff --git a/metron-platform/metron-solr/src/main/scripts/delete_collection.sh b/metron-platform/metron-solr/src/main/scripts/delete_collection.sh
new file mode 100755
index 0000000..c8b45e7
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/scripts/delete_collection.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+METRON_VERSION=${project.version}
+METRON_HOME=/usr/metron/$METRON_VERSION
+ZOOKEEPER=${ZOOKEEPER:-localhost:2181}
+ZOOKEEPER_HOME=${ZOOKEEPER_HOME:-/usr/hdp/current/zookeeper-client}
+SECURITY_ENABLED=${SECURITY_ENABLED:-false}
+NEGOTIATE=''
+if [ ${SECURITY_ENABLED,,} == 'true' ]; then
+    NEGOTIATE=' --negotiate -u : '
+fi
+
+# Get the first Solr node from the list of live nodes in Zookeeper
+SOLR_NODE=`$ZOOKEEPER_HOME/bin/zkCli.sh -server $ZOOKEEPER ls /live_nodes | tail -n 1 | sed 's/\[\([^,]*\).*\]/\1/' | sed 's/_solr//'`
+
+# Delete the collection
+curl -X GET $NEGOTIATE "http://$SOLR_NODE/solr/admin/collections?action=DELETE&name=$1"
diff --git a/metron-platform/metron-solr/src/main/scripts/install_solr.sh b/metron-platform/metron-solr/src/main/scripts/install_solr.sh
new file mode 100755
index 0000000..da04557
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/scripts/install_solr.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is provided for development purposes
+
+# Full dev env setup script for Solr Cloud 6.6.2
+# - Stops ES and Kibana
+# - Downloads Solr
+# - Installs Solr
+# - Starts Solr Cloud
+
+# Note: for production mode, see https://lucene.apache.org/solr/guide/6_6/taking-solr-to-production.html
+
+service kibana stop
+service elasticsearch stop
+
+SOLR_VERSION=${global_solr_version}
+SOLR_USER=solr
+SOLR_SERVICE=$SOLR_USER
+SOLR_VAR_DIR="/var/$SOLR_SERVICE"
+
+# create user if not exists
+solr_uid="`id -u "$SOLR_USER"`"
+if [ $? -ne 0 ]; then
+  echo "Creating new user: $SOLR_USER"
+  adduser --system -U -m --home-dir "$SOLR_VAR_DIR" "$SOLR_USER"
+fi
+cd $SOLR_VAR_DIR
+wget http://archive.apache.org/dist/lucene/solr/${SOLR_VERSION}/solr-${SOLR_VERSION}.tgz
+tar zxvf solr-${SOLR_VERSION}.tgz
+chown -R $SOLR_USER:$SOLR_USER solr-${SOLR_VERSION}
+cd solr-${SOLR_VERSION}
+su $SOLR_USER -c "bin/solr -e cloud -noprompt"
+sleep 5
+bin/solr status
+bin/solr healthcheck -c gettingstarted
+
+# These commands can be used for running multiple Solr services on a single node for cloud mode
+# This approach extracts the install script from the tarball and will setup the solr user along
+# with init.d service scripts and then startup the services.
+
+# tar xzf solr-${SOLR_VERSION}.tgz solr-${SOLR_VERSION}/bin/install_solr_service.sh --strip-components=2
+# sudo bash ./install_solr_service.sh solr-${SOLR_VERSION}.tgz -n
+# sudo bash ./install_solr_service.sh solr-${SOLR_VERSION}.tgz -s solr2 -p 8984 -n
+# echo "export ZK_HOST=node1:2181" >> /etc/default/solr.in.sh
+
diff --git a/metron-platform/metron-solr/src/main/scripts/start_solr.sh b/metron-platform/metron-solr/src/main/scripts/start_solr.sh
new file mode 100755
index 0000000..6a89508
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/scripts/start_solr.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+METRON_VERSION=${project.version}
+METRON_HOME=/usr/metron/$METRON_VERSION
+SOLR_VERSION=${global_solr_version}
+SOLR_USER=solr
+SOLR_SERVICE=$SOLR_USER
+SOLR_VAR_DIR="/var/$SOLR_SERVICE"
+
+cd $SOLR_VAR_DIR/solr-${SOLR_VERSION}
+su $SOLR_USER -c "bin/solr -e cloud -noprompt"
diff --git a/metron-platform/metron-solr/src/main/scripts/start_solr_topology.sh b/metron-platform/metron-solr/src/main/scripts/start_solr_topology.sh
index cae0c3c..614423e 100755
--- a/metron-platform/metron-solr/src/main/scripts/start_solr_topology.sh
+++ b/metron-platform/metron-solr/src/main/scripts/start_solr_topology.sh
@@ -19,4 +19,4 @@
 METRON_VERSION=${project.version}
 METRON_HOME=/usr/metron/$METRON_VERSION
 TOPOLOGY_JAR=${project.artifactId}-$METRON_VERSION-uber.jar
-storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/indexing/remote.yaml --filter $METRON_HOME/config/solr.properties
+storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/indexing/random_access/remote.yaml --filter $METRON_HOME/config/solr.properties
diff --git a/metron-platform/metron-solr/src/main/scripts/stop_solr.sh b/metron-platform/metron-solr/src/main/scripts/stop_solr.sh
new file mode 100755
index 0000000..5fe8d9c
--- /dev/null
+++ b/metron-platform/metron-solr/src/main/scripts/stop_solr.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+METRON_VERSION=${project.version}
+METRON_HOME=/usr/metron/$METRON_VERSION
+SOLR_VERSION=${global_solr_version}
+SOLR_USER=solr
+SOLR_SERVICE=$SOLR_USER
+SOLR_VAR_DIR="/var/$SOLR_SERVICE"
+
+cd $SOLR_VAR_DIR/solr-${SOLR_VERSION}
+su $SOLR_USER -c "bin/solr stop -all"
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrColumnMetadataTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrColumnMetadataTest.java
new file mode 100644
index 0000000..df471c8
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrColumnMetadataTest.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.common.SolrException;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+
+public class SolrColumnMetadataTest {
+
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
+  private SolrColumnMetadataDao solrColumnMetadataDao;
+
+  @SuppressWarnings("unchecked")
+  @Before
+  public void setUp() throws Exception {
+    solrColumnMetadataDao = new SolrColumnMetadataDao(null);
+  }
+
+  @Test
+  public void getColumnMetadataShouldProperlyReturnColumnMetadata() throws Exception {
+    List<Map<String, Object>> broFields = new ArrayList<>();
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "string");
+      put("type", "string");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "int");
+      put("type", "pint");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "float");
+      put("type", "pfloat");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "double");
+      put("type", "pdouble");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "boolean");
+      put("type", "boolean");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "broField");
+      put("type", "string");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "conflict");
+      put("type", "string");
+    }});
+
+
+    List<Map<String, Object>> snortFields = new ArrayList<>();
+    snortFields.add(new HashMap<String, Object>(){{
+      put("name", "long");
+      put("type", "plong");
+    }});
+    snortFields.add(new HashMap<String, Object>(){{
+      put("name", "snortField");
+      put("type", "plong");
+    }});
+    snortFields.add(new HashMap<String, Object>(){{
+      put("name", "unknown");
+      put("type", "unknown");
+    }});
+    broFields.add(new HashMap<String, Object>(){{
+      put("name", "conflict");
+      put("type", "plong");
+    }});
+
+    solrColumnMetadataDao = spy(new SolrColumnMetadataDao(null));
+    doReturn(broFields).when(solrColumnMetadataDao).getIndexFields("bro");
+    doReturn(snortFields).when(solrColumnMetadataDao).getIndexFields("snort");
+
+    Map<String, FieldType> columnMetadata = solrColumnMetadataDao.getColumnMetadata(Arrays.asList("bro", "snort"));
+
+    assertEquals(FieldType.BOOLEAN, columnMetadata.get("boolean"));
+    assertEquals(FieldType.TEXT, columnMetadata.get("string"));
+    assertEquals(FieldType.TEXT, columnMetadata.get("broField"));
+    assertEquals(FieldType.DOUBLE, columnMetadata.get("double"));
+    assertEquals(FieldType.LONG, columnMetadata.get("long"));
+    assertEquals(FieldType.FLOAT, columnMetadata.get("float"));
+    assertEquals(FieldType.INTEGER, columnMetadata.get("int"));
+    assertEquals(FieldType.LONG, columnMetadata.get("snortField"));
+    assertEquals(FieldType.OTHER, columnMetadata.get("conflict"));
+    assertEquals(FieldType.OTHER, columnMetadata.get("unknown"));
+
+  }
+
+  @Test
+  public void getColumnMetadataShouldThrowSolrException() throws Exception {
+    exception.expect(IOException.class);
+    exception.expectMessage("solr exception");
+
+    solrColumnMetadataDao = spy(new SolrColumnMetadataDao(null));
+    doThrow(new SolrServerException("solr exception")).when(solrColumnMetadataDao).getIndexFields("bro");
+
+    solrColumnMetadataDao.getColumnMetadata(Arrays.asList("bro", "snort"));
+  }
+
+  @Test
+  public void getColumnMetadataShouldHandle400Exception() throws Exception {
+    solrColumnMetadataDao = spy(new SolrColumnMetadataDao(null));
+    SolrException solrException = new SolrException(SolrException.ErrorCode.BAD_REQUEST, "solr exception");
+
+    doThrow(solrException).when(solrColumnMetadataDao).getIndexFields("bro");
+
+    Map<String, FieldType> columnMetadata = solrColumnMetadataDao.getColumnMetadata(Collections.singletonList("bro"));
+
+    assertNotNull(columnMetadata);
+  }
+
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrDaoTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrDaoTest.java
new file mode 100644
index 0000000..9d84669
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrDaoTest.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.verify;
+import static org.mockito.internal.verification.VerificationModeFactory.times;
+import static org.powermock.api.mockito.PowerMockito.doNothing;
+import static org.powermock.api.mockito.PowerMockito.doReturn;
+import static org.powermock.api.mockito.PowerMockito.mock;
+import static org.powermock.api.mockito.PowerMockito.spy;
+import static org.powermock.api.mockito.PowerMockito.whenNew;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.client.solrj.SolrClient;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({SolrDao.class})
+public class SolrDaoTest {
+
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
+  private SolrClient client;
+  private SolrSearchDao solrSearchDao;
+  private SolrUpdateDao solrUpdateDao;
+  private SolrRetrieveLatestDao solrRetrieveLatestDao;
+  private SolrColumnMetadataDao solrColumnMetadataDao;
+  private SolrDao solrDao;
+
+  @SuppressWarnings("unchecked")
+  @Before
+  public void setUp() {
+    client = mock(SolrClient.class);
+    solrSearchDao = mock(SolrSearchDao.class);
+    solrUpdateDao = mock(SolrUpdateDao.class);
+    solrRetrieveLatestDao = mock(SolrRetrieveLatestDao.class);
+    solrColumnMetadataDao = mock(SolrColumnMetadataDao.class);
+  }
+
+  @Test
+  public void initShouldEnableKerberos() {
+    AccessConfig accessConfig = new AccessConfig();
+
+    solrDao = spy(new SolrDao(
+        client,
+        accessConfig,
+        solrSearchDao,
+        solrUpdateDao,
+        solrRetrieveLatestDao,
+        solrColumnMetadataDao));
+    doNothing().when(solrDao).enableKerberos();
+
+    solrDao.init(accessConfig);
+
+    verify(solrDao, times(0)).enableKerberos();
+
+    accessConfig.setKerberosEnabled(true);
+
+    solrDao.init(accessConfig);
+    verify(solrDao).enableKerberos();
+  }
+
+  @Test
+  public void initShouldCreateDaos() throws Exception {
+    AccessConfig accessConfig = new AccessConfig();
+    accessConfig.setGlobalConfigSupplier(() ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, "zookeeper:2181");
+        }}
+    );
+
+    solrDao = spy(new SolrDao());
+    doReturn(client).when(solrDao).getSolrClient(Collections.singletonList("zookeeper:2181"));
+    whenNew(SolrSearchDao.class).withArguments(client, accessConfig).thenReturn(solrSearchDao);
+    whenNew(SolrRetrieveLatestDao.class).withArguments(client, accessConfig)
+        .thenReturn(solrRetrieveLatestDao);
+    whenNew(SolrUpdateDao.class).withArguments(client, solrRetrieveLatestDao, accessConfig)
+        .thenReturn(solrUpdateDao);
+    whenNew(SolrColumnMetadataDao.class).withArguments(client)
+        .thenReturn(solrColumnMetadataDao);
+
+    solrDao.init(accessConfig);
+
+    SearchRequest searchRequest = mock(SearchRequest.class);
+    solrDao.search(searchRequest);
+    verify(solrSearchDao).search(searchRequest);
+
+    GroupRequest groupRequest = mock(GroupRequest.class);
+    solrDao.group(groupRequest);
+    verify(solrSearchDao).group(groupRequest);
+
+    solrDao.getLatest("guid", "collection");
+    verify(solrRetrieveLatestDao).getLatest("guid", "collection");
+
+    GetRequest getRequest1 = mock(GetRequest.class);
+    GetRequest getRequest2 = mock(GetRequest.class);
+    solrDao.getAllLatest(Arrays.asList(getRequest1, getRequest2));
+    verify(solrRetrieveLatestDao).getAllLatest(Arrays.asList(getRequest1, getRequest2));
+
+    Document document = mock(Document.class);
+    solrDao.update(document, Optional.of("bro"));
+    verify(solrUpdateDao).update(document, Optional.of("bro"));
+
+    Map<Document, Optional<String>> updates = new HashMap<Document, Optional<String>>() {{
+      put(document, Optional.of("bro"));
+    }};
+    solrDao.batchUpdate(updates);
+    verify(solrUpdateDao).batchUpdate(updates);
+
+    solrDao.getColumnMetadata(Arrays.asList("bro", "snort"));
+    verify(solrColumnMetadataDao).getColumnMetadata(Arrays.asList("bro", "snort"));
+  }
+
+  @Test
+  public void testGetZkHostsSingle() {
+    AccessConfig accessConfig = new AccessConfig();
+    accessConfig.setGlobalConfigSupplier(() ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, "   zookeeper:2181   ");
+        }}
+    );
+
+    SolrDao solrDao = new SolrDao();
+    solrDao.init(accessConfig);
+
+    List<String> actual = solrDao.getZkHosts();
+    List<String> expected = new ArrayList<>();
+    expected.add("zookeeper:2181");
+    assertEquals(expected, actual);
+  }
+
+  @Test
+  public void testGetZkHostsMultiple() {
+    AccessConfig accessConfig = new AccessConfig();
+    accessConfig.setGlobalConfigSupplier(() ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, "   zookeeper:2181    ,   zookeeper2:2181    ");
+        }}
+    );
+
+    SolrDao solrDao = new SolrDao();
+    solrDao.init(accessConfig);
+
+    List<String> actual = solrDao.getZkHosts();
+    List<String> expected = new ArrayList<>();
+    expected.add("zookeeper:2181");
+    expected.add("zookeeper2:2181");
+    assertEquals(expected, actual);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrMetaAlertDaoTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrMetaAlertDaoTest.java
new file mode 100644
index 0000000..43bf1b1
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrMetaAlertDaoTest.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.HBaseDao;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.RetrieveLatestDao;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertCreateRequest;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.InvalidCreateException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.update.CommentAddRemoveRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class SolrMetaAlertDaoTest {
+  private static AccessConfig accessConfig = new AccessConfig();
+
+  @BeforeClass
+  public static void setupBefore() {
+    accessConfig.setGlobalConfigSupplier(() ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, "zookeeper:2181");
+        }}
+    );
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testInvalidInit() {
+    IndexDao dao = new IndexDao() {
+      @Override
+      public SearchResponse search(SearchRequest searchRequest) {
+        return null;
+      }
+
+      @Override
+      public GroupResponse group(GroupRequest groupRequest) {
+        return null;
+      }
+
+      @Override
+      public void init(AccessConfig config) {
+      }
+
+      @Override
+      public Document getLatest(String guid, String sensorType) {
+        return null;
+      }
+
+      @Override
+      public Iterable<Document> getAllLatest(
+          List<GetRequest> getRequests) {
+        return null;
+      }
+
+      @Override
+      public void update(Document update, Optional<String> index) {
+      }
+
+      @Override
+      public void batchUpdate(Map<Document, Optional<String>> updates) {
+      }
+
+      @Override
+      public void addCommentToAlert(CommentAddRemoveRequest request) {
+      }
+
+      @Override
+      public void removeCommentFromAlert(CommentAddRemoveRequest request) {
+      }
+
+      @Override
+      public void addCommentToAlert(CommentAddRemoveRequest request, Document latest) {
+      }
+
+      @Override
+      public void removeCommentFromAlert(CommentAddRemoveRequest request, Document latest) {
+      }
+
+      @Override
+      public void patch(RetrieveLatestDao dao, PatchRequest request, Optional<Long> timestamp) {
+      }
+
+      @Override
+      public Map<String, FieldType> getColumnMetadata(List<String> indices) {
+        return null;
+      }
+    };
+    SolrMetaAlertDao metaAlertDao = new SolrMetaAlertDao();
+    metaAlertDao.init(dao);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testInitInvalidDao() {
+    HBaseDao dao = new HBaseDao();
+    SolrMetaAlertDao solrDao = new SolrMetaAlertDao();
+    solrDao.init(dao, Optional.empty());
+  }
+
+  @Test(expected = InvalidCreateException.class)
+  public void testCreateMetaAlertEmptyGuids() throws InvalidCreateException, IOException {
+    SolrDao solrDao = new SolrDao();
+    solrDao.init(accessConfig);
+    SolrMetaAlertDao emaDao = new SolrMetaAlertDao();
+    emaDao.init(solrDao);
+
+    MetaAlertCreateRequest createRequest = new MetaAlertCreateRequest();
+    emaDao.createMetaAlert(createRequest);
+  }
+
+  @Test(expected = InvalidCreateException.class)
+  public void testCreateMetaAlertEmptyGroups() throws InvalidCreateException, IOException {
+    SolrDao solrDao = new SolrDao();
+    solrDao.init(accessConfig);
+    MultiIndexDao miDao = new MultiIndexDao(solrDao);
+    SolrMetaAlertDao emaDao = new SolrMetaAlertDao();
+    emaDao.init(miDao);
+
+    MetaAlertCreateRequest createRequest = new MetaAlertCreateRequest();
+    createRequest.setAlerts(Collections.singletonList(new GetRequest("don't", "care")));
+    emaDao.createMetaAlert(createRequest);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
new file mode 100644
index 0000000..fe27a55
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
@@ -0,0 +1,478 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsCollectionContaining.hasItems;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+import static org.powermock.api.mockito.PowerMockito.mockStatic;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.Group;
+import org.apache.metron.indexing.dao.search.GroupOrder;
+import org.apache.metron.indexing.dao.search.GroupRequest;
+import org.apache.metron.indexing.dao.search.GroupResponse;
+import org.apache.metron.indexing.dao.search.GroupResult;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SearchResult;
+import org.apache.metron.indexing.dao.search.SortField;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.solr.matcher.ModifiableSolrParamsMatcher;
+import org.apache.metron.solr.matcher.SolrQueryMatcher;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.response.FacetField;
+import org.apache.solr.client.solrj.response.FieldStatsInfo;
+import org.apache.solr.client.solrj.response.PivotField;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({CollectionAdminRequest.class})
+public class SolrSearchDaoTest {
+
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
+  private SolrClient client;
+  private AccessConfig accessConfig;
+  private SolrSearchDao solrSearchDao;
+  private SolrRetrieveLatestDao solrRetrieveLatestDao;
+
+  @SuppressWarnings("unchecked")
+  @Before
+  public void setUp() throws Exception {
+    client = mock(SolrClient.class);
+    accessConfig = mock(AccessConfig.class);
+    when(accessConfig.getIndexSupplier()).thenReturn(sensorType -> sensorType);
+    solrSearchDao = new SolrSearchDao(client, accessConfig);
+    solrRetrieveLatestDao = new SolrRetrieveLatestDao(client, accessConfig);
+    mockStatic(CollectionAdminRequest.class);
+    when(CollectionAdminRequest.listCollections(client)).thenReturn(Arrays.asList("bro", "snort"));
+  }
+
+  @Test
+  public void searchShouldProperlyReturnSearchResponse() throws Exception {
+    SearchRequest searchRequest = mock(SearchRequest.class);
+    SearchResponse searchResponse = mock(SearchResponse.class);
+    SolrQuery solrQuery = mock(SolrQuery.class);
+    QueryResponse queryResponse = mock(QueryResponse.class);
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    when(searchRequest.getQuery()).thenReturn("query");
+    doReturn(solrQuery).when(solrSearchDao).buildSearchRequest(searchRequest, "*");
+    when(client.query(solrQuery)).thenReturn(queryResponse);
+    doReturn(searchResponse).when(solrSearchDao).buildSearchResponse(searchRequest, queryResponse);
+
+    assertEquals(searchResponse, solrSearchDao.search(searchRequest, "*"));
+    verify(solrSearchDao).buildSearchRequest(searchRequest, "*");
+    verify(client).query(solrQuery);
+    verify(solrSearchDao).buildSearchResponse(searchRequest, queryResponse);
+    verifyNoMoreInteractions(client);
+  }
+
+  @Test
+  public void searchShouldThrowInvalidSearchExceptionOnEmptyQuery() throws Exception {
+    exception.expect(InvalidSearchException.class);
+    exception.expectMessage("Search query is invalid: null");
+
+    solrSearchDao.search(new SearchRequest());
+  }
+
+  @Test
+  public void searchShouldThrowInvalidSearchExceptionOnEmptyClient() throws Exception {
+    exception.expect(InvalidSearchException.class);
+    exception.expectMessage("Uninitialized Dao!  You must call init() prior to use.");
+
+    SearchRequest searchRequest = new SearchRequest();
+    searchRequest.setQuery("query");
+    new SolrSearchDao(null, accessConfig).search(searchRequest);
+  }
+
+  @Test
+  public void searchShouldThrowSearchResultSizeException() throws Exception {
+    exception.expect(InvalidSearchException.class);
+    exception.expectMessage("Search result size must be less than 100");
+
+    when(accessConfig.getMaxSearchResults()).thenReturn(100);
+    SearchRequest searchRequest = new SearchRequest();
+    searchRequest.setQuery("query");
+    searchRequest.setSize(200);
+    solrSearchDao.search(searchRequest);
+  }
+
+  @Test
+  public void groupShouldProperlyReturnGroupResponse() throws Exception {
+    GroupRequest groupRequest = mock(GroupRequest.class);
+    QueryResponse queryResponse = mock(QueryResponse.class);
+    GroupResponse groupResponse = mock(GroupResponse.class);
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    Group group1 = new Group();
+    group1.setField("field1");
+    Group group2 = new Group();
+    group2.setField("field2");
+    when(groupRequest.getQuery()).thenReturn("query");
+    when(groupRequest.getGroups()).thenReturn(Arrays.asList(group1, group2));
+    when(groupRequest.getScoreField()).thenReturn(Optional.of("scoreField"));
+    when(groupRequest.getIndices()).thenReturn(Arrays.asList("bro", "snort"));
+    when(client.query(any())).thenReturn(queryResponse);
+    doReturn(groupResponse).when(solrSearchDao).buildGroupResponse(groupRequest, queryResponse);
+    SolrQuery expectedSolrQuery = new SolrQuery()
+        .setStart(0)
+        .setRows(0)
+        .setQuery("query");
+    expectedSolrQuery.set("collection", "bro,snort");
+    expectedSolrQuery.set("stats", true);
+    expectedSolrQuery.set("stats.field", "{!tag=piv1 sum=true}scoreField");
+    expectedSolrQuery.set("facet", true);
+    expectedSolrQuery.set("facet.pivot", "{!stats=piv1}field1,field2");
+
+    assertEquals(groupResponse, solrSearchDao.group(groupRequest));
+    verify(client).query(argThat(new SolrQueryMatcher(expectedSolrQuery)));
+    verify(solrSearchDao).buildGroupResponse(groupRequest, queryResponse);
+
+    verifyNoMoreInteractions(client);
+  }
+
+  @Test
+  public void getLatestShouldProperlyReturnDocument() throws Exception {
+    SolrDocument solrDocument = mock(SolrDocument.class);
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    when(client.getById("collection", "guid")).thenReturn(solrDocument);
+    Document document = SolrUtilities.toDocument(solrDocument);
+
+    assertEquals(document, solrRetrieveLatestDao.getLatest("guid", "collection"));
+
+    verify(client).getById("collection", "guid");
+    verifyNoMoreInteractions(client);
+  }
+
+  @Test
+  public void getAllLatestShouldProperlyReturnDocuments() throws Exception {
+    GetRequest broRequest1 = new GetRequest("bro-1", "bro");
+    GetRequest broRequest2 = new GetRequest("bro-2", "bro");
+    GetRequest snortRequest1 = new GetRequest("snort-1", "snort");
+    GetRequest snortRequest2 = new GetRequest("snort-2", "snort");
+    SolrDocument broSolrDoc1 = mock(SolrDocument.class);
+    SolrDocument broSolrDoc2 = mock(SolrDocument.class);
+    SolrDocument snortSolrDoc1 = mock(SolrDocument.class);
+    SolrDocument snortSolrDoc2 = mock(SolrDocument.class);
+    Document broDoc1 = SolrUtilities.toDocument(broSolrDoc1);
+    Document broDoc2 = SolrUtilities.toDocument(broSolrDoc2);
+    Document snortDoc1 = SolrUtilities.toDocument(snortSolrDoc1);
+    Document snortDoc2 = SolrUtilities.toDocument(snortSolrDoc2);
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    SolrDocumentList broList = new SolrDocumentList();
+    broList.add(broSolrDoc1);
+    broList.add(broSolrDoc2);
+    SolrDocumentList snortList = new SolrDocumentList();
+    snortList.add(snortSolrDoc1);
+    snortList.add(snortSolrDoc2);
+    when(client.getById((Collection<String>) argThat(hasItems("bro-1", "bro-2")),
+        argThat(
+            new ModifiableSolrParamsMatcher(new ModifiableSolrParams().set("collection", "bro")))))
+        .thenReturn(broList);
+    when(client.getById((Collection<String>) argThat(hasItems("snort-1", "snort-2")),
+        argThat(new ModifiableSolrParamsMatcher(
+            new ModifiableSolrParams().set("collection", "snort"))))).thenReturn(snortList);
+    assertEquals(Arrays.asList(broDoc1, broDoc2, snortDoc1, snortDoc2), solrRetrieveLatestDao
+        .getAllLatest(Arrays.asList(broRequest1, broRequest2, snortRequest1, snortRequest2)));
+  }
+
+  @Test
+  public void buildSearchRequestShouldReturnSolrQuery() throws Exception {
+    SearchRequest searchRequest = new SearchRequest();
+    searchRequest.setIndices(Arrays.asList("bro", "snort"));
+    searchRequest.setSize(5);
+    searchRequest.setFrom(10);
+    searchRequest.setQuery("query");
+    SortField sortField = new SortField();
+    sortField.setField("sortField");
+    sortField.setSortOrder("ASC");
+    searchRequest.setSort(Collections.singletonList(sortField));
+    searchRequest.setFields(Arrays.asList("field1", "field2"));
+    searchRequest.setFacetFields(Arrays.asList("facetField1", "facetField2"));
+
+    SolrQuery exceptedSolrQuery = new SolrQuery()
+        .setStart(10)
+        .setRows(5)
+        .setQuery("query")
+        .addSort("sortField", SolrQuery.ORDER.asc)
+        .addField("field1").addField("field2")
+        .addFacetField("facetField1", "facetField2");
+    exceptedSolrQuery.set("collection", "bro,snort");
+
+    SolrQuery solrQuery = solrSearchDao.buildSearchRequest(searchRequest, "field1,field2");
+    assertThat(solrQuery, new SolrQueryMatcher(exceptedSolrQuery));
+  }
+
+  @Test
+  public void buildSearchResponseShouldReturnSearchResponse() {
+    SearchRequest searchRequest = new SearchRequest();
+    searchRequest.setFields(Collections.singletonList("id"));
+    searchRequest.setFacetFields(Collections.singletonList("facetField"));
+    QueryResponse queryResponse = mock(QueryResponse.class);
+    SolrDocument solrDocument1 = new SolrDocument();
+    solrDocument1.setField(Constants.GUID, "id1");
+    solrDocument1.setField("id", "id1");
+    SolrDocument solrDocument2 = new SolrDocument();
+    solrDocument2.setField(Constants.GUID, "id2");
+    solrDocument2.setField("id", "id2");
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    SolrDocumentList solrDocumentList = new SolrDocumentList();
+    solrDocumentList.add(solrDocument1);
+    solrDocumentList.add(solrDocument2);
+    solrDocumentList.setNumFound(100);
+    when(queryResponse.getResults()).thenReturn(solrDocumentList);
+    SearchResult searchResult1 = new SearchResult();
+    searchResult1.setId("id1");
+    HashMap<String, Object> source1 = new HashMap<>();
+    source1.put("id", "id1");
+    searchResult1.setSource(source1);
+    SearchResult searchResult2 = new SearchResult();
+    searchResult2.setId("id2");
+    HashMap<String, Object> source2 = new HashMap<>();
+    source2.put("id", "id2");
+    searchResult2.setSource(source2);
+    Map<String, Map<String, Long>> facetCounts = new HashMap<String, Map<String, Long>>() {{
+      put("id", new HashMap<String, Long>() {{
+        put("id1", 1L);
+        put("id2", 1L);
+      }});
+    }};
+    doReturn(facetCounts).when(solrSearchDao)
+        .getFacetCounts(Collections.singletonList("facetField"), queryResponse);
+    SearchResponse expectedSearchResponse = new SearchResponse();
+    SearchResult expectedSearchResult1 = new SearchResult();
+    expectedSearchResult1.setId("id1");
+    expectedSearchResult1.setSource(source1);
+    SearchResult expectedSearchResult2 = new SearchResult();
+    expectedSearchResult2.setId("id2");
+    expectedSearchResult2.setSource(source2);
+
+    expectedSearchResponse.setResults(Arrays.asList(expectedSearchResult1, expectedSearchResult2));
+    expectedSearchResponse.setTotal(100);
+    expectedSearchResponse.setFacetCounts(facetCounts);
+
+    assertEquals(expectedSearchResponse,
+        solrSearchDao.buildSearchResponse(searchRequest, queryResponse));
+  }
+
+  @Test
+  public void getSearchResultShouldProperlyReturnResults() {
+    SolrDocument solrDocument = mock(SolrDocument.class);
+
+    when(solrDocument.getFieldValue(Constants.GUID)).thenReturn("guid");
+    when(solrDocument.getFieldValue(Constants.SENSOR_TYPE)).thenReturn("sensorType");
+    when(solrDocument.getFieldValue("field1")).thenReturn("value1");
+    when(solrDocument.getFieldValue("field2")).thenReturn("value2");
+    when(solrDocument.getFieldNames()).thenReturn(Arrays.asList("field1", "field2"));
+
+    SearchResult expectedSearchResult = new SearchResult();
+    expectedSearchResult.setId("guid");
+    expectedSearchResult.setIndex("sensorType");
+    expectedSearchResult.setSource(new HashMap<String, Object>() {{
+      put("field1", "value1");
+    }});
+
+    assertEquals(expectedSearchResult, SolrUtilities.getSearchResult(solrDocument,
+        Collections.singletonList("field1"), solrSearchDao.getAccessConfig().getIndexSupplier()));
+
+    SearchResult expectedSearchResultAllFields = new SearchResult();
+    expectedSearchResultAllFields.setId("guid");
+    expectedSearchResultAllFields.setIndex("sensorType");
+    expectedSearchResultAllFields.setSource(new HashMap<String, Object>() {{
+      put("field1", "value1");
+      put("field2", "value2");
+    }});
+
+    assertEquals(expectedSearchResultAllFields,
+        SolrUtilities.getSearchResult(solrDocument, null, solrSearchDao.getAccessConfig().getIndexSupplier()));
+  }
+
+  @Test
+  public void getFacetCountsShouldProperlyReturnFacetCounts() {
+    QueryResponse queryResponse = mock(QueryResponse.class);
+
+    FacetField facetField1 = new FacetField("field1");
+    facetField1.add("value1", 1);
+    facetField1.add("value2", 2);
+    FacetField facetField2 = new FacetField("field2");
+    facetField2.add("value3", 3);
+    facetField2.add("value4", 4);
+    when(queryResponse.getFacetField("field1")).thenReturn(facetField1);
+    when(queryResponse.getFacetField("field2")).thenReturn(facetField2);
+
+    Map<String, Map<String, Long>> expectedFacetCounts = new HashMap<String, Map<String, Long>>() {{
+      put("field1", new HashMap<String, Long>() {{
+        put("value1", 1L);
+        put("value2", 2L);
+      }});
+      put("field2", new HashMap<String, Long>() {{
+        put("value3", 3L);
+        put("value4", 4L);
+      }});
+    }};
+
+    assertEquals(expectedFacetCounts,
+        solrSearchDao.getFacetCounts(Arrays.asList("field1", "field2"), queryResponse));
+  }
+
+  @Test
+  public void buildGroupResponseShouldProperlyReturnGroupReponse() {
+    GroupRequest groupRequest = mock(GroupRequest.class);
+    QueryResponse queryResponse = mock(QueryResponse.class);
+    NamedList namedList = mock(NamedList.class);
+    List pivotFields = mock(List.class);
+    List groupResults = mock(List.class);
+
+    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
+    Group group1 = new Group();
+    group1.setField("field1");
+    Group group2 = new Group();
+    group2.setField("field2");
+    when(groupRequest.getGroups()).thenReturn(Arrays.asList(group1, group2));
+    when(queryResponse.getFacetPivot()).thenReturn(namedList);
+    when(namedList.get("field1,field2")).thenReturn(pivotFields);
+    doReturn(groupResults).when(solrSearchDao).getGroupResults(groupRequest, 0, pivotFields);
+
+    GroupResponse groupResponse = solrSearchDao.buildGroupResponse(groupRequest, queryResponse);
+    assertEquals("field1", groupResponse.getGroupedBy());
+    verify(namedList).get("field1,field2");
+    verify(solrSearchDao).getGroupResults(groupRequest, 0, pivotFields);
+
+  }
+
+  @Test
+  public void getGroupResultsShouldProperlyReturnGroupResults() {
+    GroupRequest groupRequest = new GroupRequest();
+    Group group1 = new Group();
+    group1.setField("field1");
+    GroupOrder groupOrder1 = new GroupOrder();
+    groupOrder1.setSortOrder("ASC");
+    groupOrder1.setGroupOrderType("TERM");
+    group1.setOrder(groupOrder1);
+    Group group2 = new Group();
+    group2.setField("field2");
+    GroupOrder groupOrder2 = new GroupOrder();
+    groupOrder2.setSortOrder("DESC");
+    groupOrder2.setGroupOrderType("COUNT");
+    group2.setOrder(groupOrder2);
+    groupRequest.setGroups(Arrays.asList(group1, group2));
+    groupRequest.setScoreField("score");
+
+    PivotField level1Pivot1 = mock(PivotField.class);
+    PivotField level1Pivot2 = mock(PivotField.class);
+    PivotField level2Pivot1 = mock(PivotField.class);
+    PivotField level2Pivot2 = mock(PivotField.class);
+    FieldStatsInfo level1Pivot1FieldStatsInfo = mock(FieldStatsInfo.class);
+    FieldStatsInfo level1Pivot2FieldStatsInfo = mock(FieldStatsInfo.class);
+    FieldStatsInfo level2Pivot1FieldStatsInfo = mock(FieldStatsInfo.class);
+    FieldStatsInfo level2Pivot2FieldStatsInfo = mock(FieldStatsInfo.class);
+    List<PivotField> level1Pivots = Arrays.asList(level1Pivot1, level1Pivot2);
+    List<PivotField> level2Pivots = Arrays.asList(level2Pivot1, level2Pivot2);
+
+    when(level1Pivot1.getValue()).thenReturn("field1value1");
+    when(level1Pivot1.getCount()).thenReturn(1);
+    when(level1Pivot1FieldStatsInfo.getSum()).thenReturn(1.0);
+    when(level1Pivot1.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>() {{
+      put("score", level1Pivot1FieldStatsInfo);
+    }});
+    when(level1Pivot2.getValue()).thenReturn("field1value2");
+    when(level1Pivot2.getCount()).thenReturn(2);
+    when(level1Pivot2FieldStatsInfo.getSum()).thenReturn(2.0);
+    when(level1Pivot2.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>() {{
+      put("score", level1Pivot2FieldStatsInfo);
+    }});
+    when(level2Pivot1.getValue()).thenReturn("field2value1");
+    when(level2Pivot1.getCount()).thenReturn(3);
+    when(level2Pivot1FieldStatsInfo.getSum()).thenReturn(3.0);
+    when(level2Pivot1.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>() {{
+      put("score", level2Pivot1FieldStatsInfo);
+    }});
+    when(level2Pivot2.getValue()).thenReturn("field2value2");
+    when(level2Pivot2.getCount()).thenReturn(4);
+    when(level2Pivot2FieldStatsInfo.getSum()).thenReturn(4.0);
+    when(level2Pivot2.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>() {{
+      put("score", level2Pivot2FieldStatsInfo);
+    }});
+    when(level1Pivot1.getPivot()).thenReturn(level2Pivots);
+
+    List<GroupResult> level1GroupResults = solrSearchDao
+        .getGroupResults(groupRequest, 0, level1Pivots);
+
+    assertEquals("field1value1", level1GroupResults.get(0).getKey());
+    assertEquals(1, level1GroupResults.get(0).getTotal());
+    assertEquals(1.0, level1GroupResults.get(0).getScore(), 0.00001);
+    assertEquals("field2", level1GroupResults.get(0).getGroupedBy());
+    assertEquals("field1value2", level1GroupResults.get(1).getKey());
+    assertEquals(2, level1GroupResults.get(1).getTotal());
+    assertEquals(2.0, level1GroupResults.get(1).getScore(), 0.00001);
+    assertEquals("field2", level1GroupResults.get(1).getGroupedBy());
+    assertEquals(0, level1GroupResults.get(1).getGroupResults().size());
+
+    List<GroupResult> level2GroupResults = level1GroupResults.get(0).getGroupResults();
+    assertEquals("field2value2", level2GroupResults.get(0).getKey());
+    assertEquals(4, level2GroupResults.get(0).getTotal());
+    assertEquals(4.0, level2GroupResults.get(0).getScore(), 0.00001);
+    assertNull(level2GroupResults.get(0).getGroupedBy());
+    assertNull(level2GroupResults.get(0).getGroupResults());
+    assertEquals("field2value1", level2GroupResults.get(1).getKey());
+    assertEquals(3, level2GroupResults.get(1).getTotal());
+    assertEquals(3.0, level2GroupResults.get(1).getScore(), 0.00001);
+    assertNull(level2GroupResults.get(1).getGroupedBy());
+    assertNull(level2GroupResults.get(1).getGroupResults());
+  }
+
+
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUpdateDaoTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUpdateDaoTest.java
new file mode 100644
index 0000000..bed43ae
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUpdateDaoTest.java
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.dao;
+
+import static org.apache.metron.indexing.dao.IndexDao.COMMENTS_FIELD;
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.powermock.api.mockito.PowerMockito.doReturn;
+import static org.powermock.api.mockito.PowerMockito.spy;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.configuration.IndexingConfigurations;
+import org.apache.metron.common.zookeeper.ConfigurationsCache;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.search.AlertComment;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.dao.update.OriginalNotFoundException;
+import org.apache.metron.indexing.dao.update.PatchRequest;
+import org.apache.metron.indexing.util.IndexingCacheUtil;
+import org.apache.metron.solr.matcher.SolrInputDocumentListMatcher;
+import org.apache.metron.solr.matcher.SolrInputDocumentMatcher;
+import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.common.SolrInputDocument;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({CollectionAdminRequest.class})
+public class SolrUpdateDaoTest {
+
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
+  private SolrClient client;
+  private SolrRetrieveLatestDao solrRetrieveLatestDao;
+  private SolrUpdateDao solrUpdateDao;
+
+  private static AccessConfig accessConfig = new AccessConfig();
+
+  @BeforeClass
+  public static void setupBefore() {
+    accessConfig.setGlobalConfigSupplier(() ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, "zookeeper:2181");
+        }}
+    );
+    IndexingConfigurations indexingConfigs = mock(IndexingConfigurations.class);
+    ConfigurationsCache cache = mock(ConfigurationsCache.class);
+
+    Map<String, Object> broIndexingConfig = new HashMap<String, Object>() {{
+      put("solr", new HashMap<String, Object>() {{
+      }});
+    }};
+    when(indexingConfigs.getSensorIndexingConfig("bro")).thenReturn(broIndexingConfig);
+    when(cache.get(IndexingConfigurations.class)).thenReturn(indexingConfigs);
+
+    accessConfig.setIndexSupplier(IndexingCacheUtil.getIndexLookupFunction(cache, "solr"));
+  }
+
+  @SuppressWarnings("unchecked")
+  @Before
+  public void setUp() throws Exception {
+    client = mock(SolrClient.class);
+    solrRetrieveLatestDao = new SolrRetrieveLatestDao(client, accessConfig);
+    solrUpdateDao = new SolrUpdateDao(client, solrRetrieveLatestDao, accessConfig);
+  }
+
+  @Test
+  public void updateShouldProperlyUpdateDocumentImplicitIndex() throws Exception {
+    Document document = new Document(new HashMap<String, Object>(){{
+      put("field", "value");
+    }}, "guid", "bro", 0L);
+
+    SolrInputDocument solrInputDocument = new SolrInputDocument();
+    solrInputDocument.addField("field", "value");
+
+    solrUpdateDao.update(document, Optional.empty());
+
+    verify(client).add(eq("bro"), argThat(new SolrInputDocumentMatcher(solrInputDocument)));
+
+  }
+
+  @Test
+  public void updateShouldProperlyUpdateDocumentExplicitIndex() throws Exception {
+    Document document = new Document(new HashMap<String, Object>(){{
+      put("field", "value");
+    }}, "guid", "bro", 0L);
+
+    SolrInputDocument solrInputDocument = new SolrInputDocument();
+    solrInputDocument.addField("field", "value");
+
+    solrUpdateDao.update(document, Optional.of("bro"));
+
+    verify(client).add(eq("bro"), argThat(new SolrInputDocumentMatcher(solrInputDocument)));
+  }
+
+  @Test
+  public void batchUpdateShouldProperlyUpdateDocuments() throws Exception {
+    Document broDocument1 = new Document(new HashMap<String, Object>(){{
+      put("broField1", "value");
+      put("guid", "broGuid1");
+    }}, "broGuid1", "bro", 0L);
+    Document broDocument2 = new Document(new HashMap<String, Object>(){{
+      put("broField2", "value");
+      put("guid", "broGuid2");
+    }}, "broGuid2", "bro", 0L);
+
+    Map<Document, Optional<String>> updates = new HashMap<Document, Optional<String>>(){{
+      put(broDocument1, Optional.of("bro"));
+      put(broDocument2, Optional.of("bro"));
+    }};
+
+    SolrInputDocument broSolrInputDocument1 = new SolrInputDocument();
+    broSolrInputDocument1.addField("broField1", "value");
+    broSolrInputDocument1.addField("guid", "broGuid1");
+    SolrInputDocument broSolrInputDocument2 = new SolrInputDocument();
+    broSolrInputDocument2.addField("broField2", "value");
+    broSolrInputDocument2.addField("guid", "broGuid2");
+
+    solrUpdateDao.batchUpdate(updates);
+
+    verify(client).add(eq("bro"), argThat(new SolrInputDocumentListMatcher(Arrays.asList(broSolrInputDocument1, broSolrInputDocument2))));
+  }
+
+  @Test
+  public void batchUpdateShouldProperlyUpdateDocumentsWithoutIndex() throws Exception {
+    Document snortDocument1 = new Document(new HashMap<String, Object>(){{
+      put("snortField1", "value");
+      put("guid", "snortGuid1");
+    }}, "snortGuid1", "snort", 0L);
+    Document snortDocument2 = new Document(new HashMap<String, Object>(){{
+      put("snortField2", "value");
+      put("guid", "snortGuid2");
+    }}, "snortGuid2", "snort", 0L);
+
+    Map<Document, Optional<String>> updates = new HashMap<Document, Optional<String>>(){{
+      put(snortDocument1, Optional.empty());
+      put(snortDocument2, Optional.empty());
+    }};
+
+    SolrInputDocument snortSolrInputDocument1 = new SolrInputDocument();
+    snortSolrInputDocument1.addField("snortField1", "value");
+    snortSolrInputDocument1.addField("guid", "snortGuid1");
+    SolrInputDocument snortSolrInputDocument2 = new SolrInputDocument();
+    snortSolrInputDocument2.addField("snortField2", "value");
+    snortSolrInputDocument2.addField("guid", "snortGuid2");
+
+    solrUpdateDao.batchUpdate(updates);
+
+    verify(client).add(eq("snort"), argThat(new SolrInputDocumentListMatcher(Arrays.asList(snortSolrInputDocument1, snortSolrInputDocument2))));
+  }
+
+  @Test
+  public void testConvertCommentsToRaw() {
+    List<Map<String, Object>> commentList = new ArrayList<>();
+    Map<String, Object> comments = new HashMap<>();
+    comments.put("comment", "test comment");
+    comments.put("username", "test username");
+    comments.put("timestamp", 1526424323279L);
+    commentList.add(comments);
+
+    Map<String, Object> document = new HashMap<>();
+    document.put("testField", "testValue");
+    document.put(COMMENTS_FIELD, commentList);
+    solrUpdateDao.convertCommentsToRaw(document);
+
+    @SuppressWarnings("unchecked")
+    List<String> actualComments = (List<String>) document.get(COMMENTS_FIELD);
+    String expectedComment = "{\"comment\":\"test comment\",\"username\":\"test username\",\"timestamp\":1526424323279}";
+    assertEquals(expectedComment, actualComments.get(0));
+    assertEquals(1, actualComments.size());
+    assertEquals("testValue", document.get("testField"));
+  }
+
+  @Test
+  public void getPatchedDocument() throws IOException, OriginalNotFoundException {
+    // Create the document to be patched. Including comments
+    Map<String, Object> latestDoc = new HashMap<>();
+    latestDoc.put(Constants.GUID, "guid");
+    List<Map<String, Object>> comments = new ArrayList<>();
+    comments.add(new AlertComment("comment", "user", 0L).asMap());
+    comments.add(new AlertComment("comment_2", "user_2", 0L).asMap());
+    latestDoc.put(COMMENTS_FIELD, comments);
+    Document latest = new Document(latestDoc, "guid", "bro", 0L);
+
+    SolrRetrieveLatestDao retrieveLatestDao = spy(new SolrRetrieveLatestDao(null, accessConfig));
+    doReturn(latest).when(retrieveLatestDao).getLatest("guid", "bro");
+
+    // Create the patch
+    PatchRequest request = new PatchRequest();
+    request.setIndex("bro");
+    request.setSensorType("bro");
+    request.setGuid("guid");
+    List<Map<String, Object>> patchList = new ArrayList<>();
+    Map<String, Object> patch = new HashMap<>();
+    patch.put("op", "add");
+    patch.put("path", "/project");
+    patch.put("value", "metron");
+    patchList.add(patch);
+    request.setPatch(patchList);
+    Document actual = solrUpdateDao.getPatchedDocument(retrieveLatestDao, request, Optional.of(0L));
+
+    // Add the patch to our original document
+    latest.getDocument().put("project", "metron");
+    assertEquals(actual, latest);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUtilitiesTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUtilitiesTest.java
new file mode 100644
index 0000000..f284f25
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrUtilitiesTest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.dao;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.HashMap;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.solr.common.SolrDocument;
+import org.junit.Test;
+
+public class SolrUtilitiesTest {
+
+  @Test
+  public void toDocumentShouldProperlyReturnDocument() throws Exception {
+    SolrDocument solrDocument = new SolrDocument();
+    solrDocument.addField(SolrDao.VERSION_FIELD, 1.0);
+    solrDocument.addField(Constants.GUID, "guid");
+    solrDocument.addField(Constants.SENSOR_TYPE, "bro");
+    solrDocument.addField("field", "value");
+
+    Document expectedDocument = new Document(new HashMap<String, Object>() {{
+      put("field", "value");
+      put(Constants.GUID, "guid");
+      put(Constants.SENSOR_TYPE, "bro");
+    }}, "guid", "bro", 0L);
+
+    Document actualDocument = SolrUtilities.toDocument(solrDocument);
+    assertEquals(expectedDocument, actualDocument);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrIndexingIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrIndexingIntegrationTest.java
index 256f23b..2f9b285 100644
--- a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrIndexingIntegrationTest.java
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrIndexingIntegrationTest.java
@@ -17,7 +17,13 @@
  */
 package org.apache.metron.solr.integration;
 
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+
 import com.google.common.base.Function;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import javax.annotation.Nullable;
 import org.apache.metron.common.configuration.Configurations;
 import org.apache.metron.common.configuration.ConfigurationsUtils;
 import org.apache.metron.common.field.FieldNameConverter;
@@ -33,14 +39,11 @@
 import org.apache.metron.integration.components.ZKServerComponent;
 import org.apache.metron.solr.integration.components.SolrComponent;
 
-import javax.annotation.Nullable;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
 
 public class SolrIndexingIntegrationTest extends IndexingIntegrationTest {
 
-  private String collection = "metron";
+  private String collection = "yaf";
+
   private FieldNameConverter fieldNameConverter = fieldName -> fieldName;
   @Override
   public FieldNameConverter getFieldNameConverter() {
@@ -50,8 +53,8 @@
   @Override
   public InMemoryComponent getSearchComponent(final Properties topologyProperties) throws Exception {
     SolrComponent solrComponent = new SolrComponent.Builder()
-            .addCollection(collection, "../metron-solr/src/test/resources/solr/conf")
-            .withPostStartCallback(new Function<SolrComponent, Void>() {
+        .addInitialCollection(collection, "../metron-solr/src/main/config/schema/yaf")
+        .withPostStartCallback(new Function<SolrComponent, Void>() {
               @Nullable
               @Override
               public Void apply(@Nullable SolrComponent solrComponent) {
@@ -60,7 +63,7 @@
                   String testZookeeperUrl = topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY);
                   Configurations configurations = SampleUtil.getSampleConfigs();
                   Map<String, Object> globalConfig = configurations.getGlobalConfig();
-                  globalConfig.put("solr.zookeeper", solrComponent.getZookeeperUrl());
+                  globalConfig.put(SOLR_ZOOKEEPER, solrComponent.getZookeeperUrl());
                   ConfigurationsUtils.writeGlobalConfigToZookeeper(JSONUtils.INSTANCE.toJSONPretty(globalConfig), testZookeeperUrl);
                 } catch (Exception e) {
                   e.printStackTrace();
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrMetaAlertIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrMetaAlertIntegrationTest.java
new file mode 100644
index 0000000..6687e9a
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrMetaAlertIntegrationTest.java
@@ -0,0 +1,412 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.integration;
+
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.ALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_FIELD;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.METAALERT_TYPE;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_FIELD_DEFAULT;
+import static org.apache.metron.indexing.dao.metaalert.MetaAlertConstants.THREAT_SORT_DEFAULT;
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.apache.metron.solr.dao.SolrMetaAlertDao.METAALERTS_COLLECTION;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConfig;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertIntegrationTest;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertStatus;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.indexing.dao.search.SortField;
+import org.apache.metron.solr.dao.SolrDao;
+import org.apache.metron.solr.dao.SolrMetaAlertDao;
+import org.apache.metron.solr.dao.SolrMetaAlertRetrieveLatestDao;
+import org.apache.metron.solr.dao.SolrMetaAlertSearchDao;
+import org.apache.metron.solr.dao.SolrMetaAlertUpdateDao;
+import org.apache.metron.solr.integration.components.SolrComponent;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class SolrMetaAlertIntegrationTest extends MetaAlertIntegrationTest {
+
+  private static final String COLLECTION = "test";
+
+  private static SolrDao solrDao;
+  private static SolrComponent solr;
+
+  @BeforeClass
+  public static void setupBefore() throws Exception {
+    // Solr doesn't need retries, it'll show up after a commit.
+
+    MAX_RETRIES = 1;
+    // setup the client
+    solr = new SolrComponent.Builder().build();
+    solr.start();
+
+    AccessConfig accessConfig = new AccessConfig();
+    Map<String, Object> globalConfig = new HashMap<String, Object>() {
+      {
+        put("solr.clustername", "metron");
+        put("solr.port", "9300");
+        put("solr.ip", "localhost");
+        put("solr.date.format", DATE_FORMAT);
+        put(SOLR_ZOOKEEPER, solr.getZookeeperUrl());
+      }
+    };
+    accessConfig.setMaxSearchResults(1000);
+    accessConfig.setGlobalConfigSupplier(() -> globalConfig);
+    accessConfig.setMaxSearchGroups(100);
+    // Just use sensorType directly as the collection name.
+    accessConfig.setIndexSupplier(s -> s);
+
+    solrDao = new SolrDao();
+    solrDao.init(accessConfig);
+
+    MetaAlertConfig config = new MetaAlertConfig(METAALERTS_COLLECTION
+                             , THREAT_SORT_DEFAULT
+                             , () -> ImmutableMap.of(Constants.SENSOR_TYPE_FIELD_PROPERTY, Constants.SENSOR_TYPE
+                                                    , Constants.THREAT_SCORE_FIELD_PROPERTY, THREAT_FIELD_DEFAULT
+                                                    )
+    ) {
+
+      @Override
+      protected String getDefaultThreatTriageField() {
+        return THREAT_FIELD_DEFAULT.replace(':', '.');
+      }
+
+      @Override
+      protected String getDefaultSourceTypeField() {
+        return Constants.SENSOR_TYPE;
+      }
+    };
+
+
+    SolrMetaAlertSearchDao searchDao = new SolrMetaAlertSearchDao(
+        solrDao.getSolrClient(solrDao.getZkHosts()),
+        solrDao.getSolrSearchDao(), config);
+    SolrMetaAlertRetrieveLatestDao retrieveLatestDao = new SolrMetaAlertRetrieveLatestDao(solrDao);
+    SolrMetaAlertUpdateDao updateDao = new SolrMetaAlertUpdateDao(solrDao, searchDao,
+        retrieveLatestDao, config);
+    metaDao = new SolrMetaAlertDao(solrDao, searchDao, updateDao, retrieveLatestDao);
+  }
+
+  @Before
+  public void setup()
+      throws IOException, InterruptedException, SolrServerException, KeeperException {
+    solr.addCollection(METAALERTS_COLLECTION,
+        "../metron-solr/src/main/config/schema//metaalert");
+    solr.addCollection(SENSOR_NAME, "../metron-solr/src/test/resources/config/test/conf");
+  }
+
+  @AfterClass
+  public static void teardown() {
+    if (solr != null) {
+      solr.stop();
+    }
+  }
+
+  @After
+  public void reset() {
+    solr.reset();
+  }
+
+  @Test
+  @Override
+  @SuppressWarnings("unchecked")
+  public void shouldSearchByNestedAlert() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(4);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(0).put("ip_src_addr", "192.168.1.1");
+    alerts.get(0).put("ip_src_port", 8010);
+    alerts.get(1).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(1).put("ip_src_addr", "192.168.1.2");
+    alerts.get(1).put("ip_src_port", 8009);
+    alerts.get(2).put("ip_src_addr", "192.168.1.3");
+    alerts.get(2).put("ip_src_port", 8008);
+    alerts.get(3).put("ip_src_addr", "192.168.1.4");
+    alerts.get(3).put("ip_src_port", 8007);
+    addRecords(alerts, getTestIndexName(), SENSOR_NAME);
+
+    // Put the nested type into the test index, so that it'll match appropriately
+    setupTypings();
+
+    // Load metaAlerts
+    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0), alerts.get(1))));
+    Map<String, Object> inactiveMetaAlert = buildMetaAlert("meta_inactive",
+        MetaAlertStatus.INACTIVE,
+        Optional.of(Arrays.asList(alerts.get(2), alerts.get(3))));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Arrays.asList(activeMetaAlert, inactiveMetaAlert), METAALERTS_COLLECTION,
+        METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Arrays.asList(
+        new GetRequest("message_0", SENSOR_NAME),
+        new GetRequest("message_1", SENSOR_NAME),
+        new GetRequest("message_2", SENSOR_NAME),
+        new GetRequest("message_3", SENSOR_NAME),
+        new GetRequest("meta_active", METAALERT_TYPE),
+        new GetRequest("meta_inactive", METAALERT_TYPE)));
+
+    SearchResponse searchResponse = metaDao.search(new SearchRequest() {
+      {
+        setQuery(
+            "ip_src_addr:192.168.1.1 AND ip_src_port:8010");
+        setIndices(Collections.singletonList(METAALERT_TYPE));
+        setFrom(0);
+        setSize(5);
+        setSort(Collections.singletonList(new SortField() {
+          {
+            setField(Constants.GUID);
+          }
+        }));
+      }
+    });
+    // Should have one result because Solr will return the parent.
+    Assert.assertEquals(1, searchResponse.getTotal());
+    // Ensure we returned the child alerts
+    List<Map<String, Object>> actualAlerts = (List<Map<String, Object>>) searchResponse.getResults()
+        .get(0).getSource()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    Assert.assertEquals(2, actualAlerts.size());
+    Assert.assertEquals("meta_active",
+        searchResponse.getResults().get(0).getSource().get("guid"));
+
+    // Query against all indices. Only the single active meta alert should be returned.
+    // The child alerts should be hidden.
+    searchResponse = metaDao.search(new SearchRequest() {
+      {
+        setQuery(
+            "ip_src_addr:192.168.1.1 AND ip_src_port:8010");
+        setIndices(queryIndices);
+        setFrom(0);
+        setSize(5);
+        setSort(Collections.singletonList(new SortField() {
+          {
+            setField(Constants.GUID);
+          }
+        }));
+      }
+    });
+
+    // Query should match a parent alert
+    Assert.assertEquals(1, searchResponse.getTotal());
+    // Ensure we returned the child alerts
+    actualAlerts = (List<Map<String, Object>>) searchResponse.getResults().get(0).getSource()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    Assert.assertEquals(2, actualAlerts.size());
+    Assert.assertEquals("meta_active",
+        searchResponse.getResults().get(0).getSource().get("guid"));
+
+    // Query against all indices. The child alert has no actual attached meta alerts, and should
+    // be returned on its own.
+    searchResponse = metaDao.search(new SearchRequest() {
+      {
+        setQuery(
+            "ip_src_addr:192.168.1.3 AND ip_src_port:8008");
+        setIndices(queryIndices);
+        setFrom(0);
+        setSize(1);
+        setSort(Collections.singletonList(new SortField() {
+          {
+            setField(Constants.GUID);
+          }
+        }));
+      }
+    });
+
+    // Query should match a plain alert
+    Assert.assertEquals(1, searchResponse.getTotal());
+    // Ensure we have no child alerts
+    actualAlerts = (List<Map<String, Object>>) searchResponse.getResults()
+        .get(0).getSource()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    Assert.assertNull(actualAlerts);
+    Assert.assertEquals("message_2",
+        searchResponse.getResults().get(0).getSource().get("guid"));
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void shouldNotRetrieveFullChildrenWithoutSourceType() throws Exception {
+    // Load alerts
+    List<Map<String, Object>> alerts = buildAlerts(1);
+    alerts.get(0).put(METAALERT_FIELD, Collections.singletonList("meta_active"));
+    alerts.get(0).put("ip_src_addr", "192.168.1.1");
+    alerts.get(0).put("ip_src_port", 8010);
+    addRecords(alerts, getTestIndexName(), SENSOR_NAME);
+
+    // Put the nested type into the test index, so that it'll match appropriately
+    setupTypings();
+
+    // Load metaAlerts
+    Map<String, Object> activeMetaAlert = buildMetaAlert("meta_active", MetaAlertStatus.ACTIVE,
+        Optional.of(Arrays.asList(alerts.get(0))));
+    // We pass MetaAlertDao.METAALERT_TYPE, because the "_doc" gets appended automatically.
+    addRecords(Collections.singletonList(activeMetaAlert), METAALERTS_COLLECTION, METAALERT_TYPE);
+
+    // Verify load was successful
+    findCreatedDocs(Collections.singletonList(new GetRequest("meta_active", METAALERT_TYPE)));
+
+    SearchResponse searchResponse = metaDao.search(new SearchRequest() {
+      {
+        setQuery(
+            "ip_src_addr:192.168.1.1 AND ip_src_port:8010");
+        setIndices(Collections.singletonList(METAALERT_TYPE));
+        setFrom(0);
+        setSize(5);
+        setFields(Collections.singletonList(Constants.GUID));
+        setSort(Collections.singletonList(new SortField() {
+          {
+            setField(Constants.GUID);
+          }
+        }));
+      }
+    });
+
+    // Should have one result because Solr will return the parent.
+    Assert.assertEquals(1, searchResponse.getTotal());
+    // Ensure we returned didn't return the child alerts
+    List<Map<String, Object>> actualAlerts = (List<Map<String, Object>>) searchResponse.getResults()
+        .get(0).getSource()
+        .get(MetaAlertConstants.ALERT_FIELD);
+    Assert.assertNull(actualAlerts);
+    Assert.assertEquals("meta_active",
+        searchResponse.getResults().get(0).getSource().get("guid"));
+  }
+
+  @Override
+  protected long getMatchingAlertCount(String fieldName, Object fieldValue)
+      throws InterruptedException {
+    long cnt = 0;
+    for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
+      List<Map<String, Object>> docs = solr.getAllIndexedDocs(getTestIndexName());
+      cnt = docs
+          .stream()
+          .filter(d -> {
+            Object newfield = d.get(fieldName);
+            return newfield != null && newfield.equals(fieldValue);
+          }).count();
+    }
+    return cnt;
+  }
+
+  @Override
+  protected long getMatchingMetaAlertCount(String fieldName, String fieldValue)
+      throws InterruptedException {
+    long cnt = 0;
+    for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
+      List<Map<String, Object>> docs = solr.getAllIndexedDocs(METAALERTS_COLLECTION);
+      cnt = docs
+          .stream()
+          .filter(d -> {
+            @SuppressWarnings("unchecked")
+            List<Map<String, Object>> alerts = (List<Map<String, Object>>) d
+                .get(ALERT_FIELD);
+
+            for (Map<String, Object> alert : alerts) {
+              Object newField = alert.get(fieldName);
+              if (newField != null && newField.equals(fieldValue)) {
+                return true;
+              }
+            }
+
+            return false;
+          }).count();
+    }
+    return cnt;
+  }
+
+  @Override
+  protected void addRecords(List<Map<String, Object>> inputData, String index, String docType)
+      throws IOException {
+    // Ignore docType for Solr. It's unused.
+    try {
+      solr.addDocs(index, inputData);
+    } catch (SolrServerException e) {
+      throw new IOException("Unable to load Solr Docs", e);
+    }
+  }
+
+  @Override
+  protected void setupTypings() {
+
+  }
+
+  @Override
+  protected String getTestIndexName() {
+    return COLLECTION;
+  }
+
+  @Override
+  protected String getMetaAlertIndex() {
+    return METAALERTS_COLLECTION;
+  }
+
+  @Override
+  protected String getSourceTypeField() {
+    return Constants.SENSOR_TYPE;
+  }
+
+  @Override
+  protected void commit() throws IOException {
+    try {
+      List<String> collections = solr.getSolrClient().listCollections();
+      for (String collection : collections) {
+        solr.getSolrClient().commit(collection);
+      }
+    } catch (SolrServerException e) {
+      throw new IOException("Unable to commit", e);
+    }
+  }
+
+  @Override
+  protected void setEmptiedMetaAlertField(Map<String, Object> docMap) {
+    docMap.remove(METAALERT_FIELD);
+  }
+
+  @Override
+  protected boolean isFiniteDoubleOnly() {
+    return false;
+  }
+
+  @Override
+  protected boolean isEmptyMetaAlertList() {
+    return false;
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrRetrieveLatestIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrRetrieveLatestIntegrationTest.java
new file mode 100644
index 0000000..f7c2e86
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrRetrieveLatestIntegrationTest.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.solr.integration;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.search.GetRequest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.solr.dao.SolrDao;
+import org.apache.metron.solr.integration.components.SolrComponent;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class SolrRetrieveLatestIntegrationTest {
+
+  private static SolrComponent solrComponent;
+
+  protected static final String TEST_COLLECTION = "test";
+  protected static final String TEST_SENSOR = "test_sensor";
+  protected static final String BRO_SENSOR = "bro";
+
+  private static IndexDao dao;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    solrComponent = new SolrComponent.Builder().build();
+    solrComponent.start();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    solrComponent
+        .addCollection(TEST_COLLECTION, "../metron-solr/src/test/resources/config/test/conf");
+    solrComponent.addCollection(BRO_SENSOR, "../metron-solr/src/main/config/schema/bro");
+
+    AccessConfig accessConfig = new AccessConfig();
+    Map<String, Object> globalConfig = new HashMap<>();
+    globalConfig.put(SOLR_ZOOKEEPER, solrComponent.getZookeeperUrl());
+    accessConfig.setGlobalConfigSupplier(() -> globalConfig);
+    // Map the sensor name to the collection name for test.
+    accessConfig.setIndexSupplier(s -> s.equals(TEST_SENSOR) ? TEST_COLLECTION : s);
+
+    dao = new SolrDao();
+    dao.init(accessConfig);
+    addData(BRO_SENSOR, BRO_SENSOR);
+    addData(TEST_COLLECTION, TEST_SENSOR);
+  }
+
+  @After
+  public void reset() {
+    solrComponent.reset();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    solrComponent.stop();
+  }
+
+  @Test
+  public void testGetLatest() throws IOException {
+    Document actual = dao.getLatest("message_1_bro", BRO_SENSOR);
+    assertEquals(buildExpectedDocument(BRO_SENSOR, 1), actual);
+  }
+
+  @Test
+  public void testGetMissing() throws IOException {
+    Document actual = dao.getLatest("message_1_bro", TEST_SENSOR);
+    assertNull(actual);
+  }
+
+  @Test
+  public void testGetBrokenMapping() throws IOException {
+    AccessConfig accessConfig = new AccessConfig();
+    Map<String, Object> globalConfig = new HashMap<>();
+    globalConfig.put(SOLR_ZOOKEEPER, solrComponent.getZookeeperUrl());
+    accessConfig.setGlobalConfigSupplier(() -> globalConfig);
+    // Map the sensor name to the collection name for test.
+    accessConfig.setIndexSupplier(s -> null);
+
+    dao = new SolrDao();
+    dao.init(accessConfig);
+
+    Document actual = dao.getLatest("message_1_bro", TEST_SENSOR);
+    assertNull(actual);
+  }
+
+  @Test
+  public void testGetLatestCollectionSensorDiffer() throws IOException {
+    Document actual = dao.getLatest("message_1_test_sensor", TEST_SENSOR);
+    assertEquals(buildExpectedDocument(TEST_SENSOR, 1), actual);
+  }
+
+  @Test
+  public void testGetAllLatest() throws IOException {
+    List<GetRequest> requests = new ArrayList<>();
+    requests.add(buildGetRequest(BRO_SENSOR, 1));
+    requests.add(buildGetRequest(BRO_SENSOR, 2));
+
+    Iterable<Document> actual = dao.getAllLatest(requests);
+    assertTrue(Iterables.contains(actual, buildExpectedDocument(BRO_SENSOR, 1)));
+    assertTrue(Iterables.contains(actual, buildExpectedDocument(BRO_SENSOR, 2)));
+    assertEquals(2, Iterables.size(actual));
+  }
+
+  @Test
+  public void testGetAllLatestCollectionExplicitIndex() throws IOException {
+    List<GetRequest> requests = new ArrayList<>();
+    GetRequest getRequestOne = buildGetRequest(TEST_SENSOR, 1);
+    // Explicitly use the incorrect index. This forces it to prefer the explicit index over the
+    // implicit one.
+    getRequestOne.setIndex(BRO_SENSOR);
+    requests.add(getRequestOne);
+
+    Iterable<Document> actual = dao.getAllLatest(requests);
+    // Expect 0 because the explicit index was incorrect.
+    assertEquals(0, Iterables.size(actual));
+  }
+
+  @Test
+  public void testGetAllLatestCollectionSensorMixed() throws IOException {
+    List<GetRequest> requests = new ArrayList<>();
+    requests.add(buildGetRequest(TEST_SENSOR, 1));
+    requests.add(buildGetRequest(BRO_SENSOR, 2));
+
+    Iterable<Document> actual = dao.getAllLatest(requests);
+    assertTrue(Iterables.contains(actual, buildExpectedDocument(TEST_SENSOR, 1)));
+    assertTrue(Iterables.contains(actual, buildExpectedDocument(BRO_SENSOR, 2)));
+    assertEquals(2, Iterables.size(actual));
+  }
+
+  @Test
+  public void testGetAllLatestCollectionOneMissing() throws IOException {
+    List<GetRequest> requests = new ArrayList<>();
+    requests.add(buildGetRequest(TEST_SENSOR, 1));
+    GetRequest brokenRequest= new GetRequest();
+    brokenRequest.setGuid(buildGuid(BRO_SENSOR, 2));
+    brokenRequest.setSensorType(TEST_SENSOR);
+    requests.add(brokenRequest);
+
+    Iterable<Document> actual = dao.getAllLatest(requests);
+    assertTrue(Iterables.contains(actual, buildExpectedDocument(TEST_SENSOR, 1)));
+    assertEquals(1, Iterables.size(actual));
+  }
+
+  protected Document buildExpectedDocument(String sensor, int i) {
+    Map<String, Object> expectedMapOne = new HashMap<>();
+    expectedMapOne.put("source.type", sensor);
+    expectedMapOne.put(Constants.GUID, buildGuid(sensor, i));
+    return new Document(expectedMapOne, buildGuid(sensor, i), sensor, 0L);
+  }
+
+  protected GetRequest buildGetRequest(String sensor, int i) {
+    GetRequest requestOne = new GetRequest();
+    requestOne.setGuid(buildGuid(sensor, i));
+    requestOne.setSensorType(sensor);
+    return requestOne;
+  }
+
+  protected static void addData(String collection, String sensorName)
+      throws IOException, SolrServerException {
+    List<Map<String, Object>> inputData = new ArrayList<>();
+    for (int i = 0; i < 3; ++i) {
+      final String name = buildGuid(sensorName, i);
+      HashMap<String, Object> inputMap = new HashMap<>();
+      inputMap.put("source.type", sensorName);
+      inputMap.put(Constants.GUID, name);
+      inputData.add(inputMap);
+    }
+    solrComponent.addDocs(collection, inputData);
+  }
+
+  protected static String buildGuid(String sensorName, int i) {
+    return "message_" + i + "_" + sensorName;
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrSearchIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrSearchIntegrationTest.java
new file mode 100644
index 0000000..4390fd1
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrSearchIntegrationTest.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.integration;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.SearchIntegrationTest;
+import org.apache.metron.indexing.dao.search.FieldType;
+import org.apache.metron.indexing.dao.search.InvalidSearchException;
+import org.apache.metron.indexing.dao.search.SearchRequest;
+import org.apache.metron.indexing.dao.search.SearchResponse;
+import org.apache.metron.integration.InMemoryComponent;
+import org.apache.metron.solr.dao.SolrDao;
+import org.apache.metron.solr.integration.components.SolrComponent;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.json.simple.JSONArray;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class SolrSearchIntegrationTest extends SearchIntegrationTest {
+  private static SolrComponent solrComponent;
+  private static IndexDao dao;
+
+  @BeforeClass
+  public static void setupClass() throws Exception {
+    indexComponent = startIndex();
+    dao = createDao();
+    // The data is all static for searches, so we can set it up here, and not do anything between tests.
+    broData = SearchIntegrationTest.broData.replace("source:type", "source.type");
+    snortData = SearchIntegrationTest.snortData.replace("source:type", "source.type");
+    solrComponent.addCollection("bro", "../metron-solr/src/main/config/schema/bro");
+    solrComponent.addCollection("snort", "../metron-solr/src/main/config/schema/snort");
+    loadTestData();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    if (solrComponent != null) {
+      solrComponent.stop();
+    }
+  }
+
+  @Override
+  public IndexDao getIndexDao() {
+    return dao;
+  }
+
+  protected static IndexDao createDao() {
+    AccessConfig config = new AccessConfig();
+    config.setMaxSearchResults(100);
+    config.setMaxSearchGroups(100);
+    config.setGlobalConfigSupplier( () ->
+        new HashMap<String, Object>() {{
+          put(SOLR_ZOOKEEPER, solrComponent.getZookeeperUrl());
+        }}
+    );
+
+    config.setIndexSupplier( sensorType -> sensorType);
+    IndexDao dao = new SolrDao();
+    dao.init(config);
+    return dao;
+  }
+
+  protected static InMemoryComponent startIndex() throws Exception {
+    solrComponent = new SolrComponent.Builder().build();
+    solrComponent.start();
+    return solrComponent;
+  }
+
+  @SuppressWarnings("unchecked")
+  protected static void loadTestData() throws ParseException, IOException, SolrServerException {
+    JSONArray broArray = (JSONArray) new JSONParser().parse(broData);
+    solrComponent.addDocs("bro", broArray);
+    JSONArray snortArray = (JSONArray) new JSONParser().parse(snortData);
+    solrComponent.addDocs("snort", snortArray);
+  }
+
+  @Override
+  @Test
+  public void returns_column_metadata_for_specified_indices() throws Exception {
+    // getColumnMetadata with only bro
+    {
+      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("bro"));
+      // Don't test all fields, just test a sample of different fields
+      Assert.assertEquals(263, fieldTypes.size());
+
+      // Fields present in both with same type
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("guid"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source.type"));
+      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+
+      // Bro only field
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("username"));
+
+      // A dynamic field present in both with same type
+      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("score"));
+
+      // Dyanamic field present in both with nonstandard types.
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+
+      // Field with nonstandard type
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("timestamp"));
+
+      // Bro only field in the dynamic catch all
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("bro_field"));
+
+      // A field is in both bro and snort and they have different types.
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("ttl"));
+
+      // Field only present in Snort
+      Assert.assertEquals(null, fieldTypes.get("dgmlen"));
+
+      // Field that doesn't exist
+      Assert.assertEquals(null, fieldTypes.get("fake.field"));
+    }
+    // getColumnMetadata with only snort
+    {
+      Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Collections.singletonList("snort"));
+      Assert.assertEquals(33, fieldTypes.size());
+
+      // Fields present in both with same type
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("guid"));
+      Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source.type"));
+      Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+      Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+
+      // Snort only field
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("dgmlen"));
+
+      // A dynamic field present in both with same type
+      Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("score"));
+
+      // Dyanamic field present in both with nonstandard types.
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+
+      // Field with nonstandard type
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("timestamp"));
+
+      // Snort only field in the dynamic catch all
+      Assert.assertEquals(FieldType.OTHER, fieldTypes.get("snort_field"));
+
+      // A field is in both bro and snort and they have different types.
+      Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ttl"));
+
+      // Field only present in Bro
+      Assert.assertEquals(null, fieldTypes.get("username"));
+
+      // Field that doesn't exist
+      Assert.assertEquals(null, fieldTypes.get("fake.field"));
+    }
+  }
+
+  @Override
+  @Test
+  public void returns_column_data_for_multiple_indices() throws Exception {
+    Map<String, FieldType> fieldTypes = dao.getColumnMetadata(Arrays.asList("bro", "snort"));
+    // Don't test everything, just test a variety of fields, including fields across collections.
+
+    // Fields present in both with same type
+    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("guid"));
+    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("source.type"));
+    Assert.assertEquals(FieldType.IP, fieldTypes.get("ip_src_addr"));
+    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("ip_src_port"));
+    Assert.assertEquals(FieldType.BOOLEAN, fieldTypes.get("is_alert"));
+
+    // Bro only field
+    Assert.assertEquals(FieldType.TEXT, fieldTypes.get("username"));
+
+    // Snort only field
+    Assert.assertEquals(FieldType.INTEGER, fieldTypes.get("dgmlen"));
+
+    // A dynamic field present in both with same type
+    Assert.assertEquals(FieldType.FLOAT, fieldTypes.get("score"));
+
+    // Dyanamic field present in both with nonstandard types.
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("location_point"));
+
+    // Field present in both with nonstandard type
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("timestamp"));
+
+    // Bro only field in the dynamic catch all
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("bro_field"));
+
+    // Snort only field in the dynamic catch all
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("snort_field"));
+
+    // A field is in both bro and snort and they have different types.
+    Assert.assertEquals(FieldType.OTHER, fieldTypes.get("ttl"));
+
+    // Field that doesn't exist
+    Assert.assertEquals(null, fieldTypes.get("fake.field"));
+  }
+
+  @Test
+  public void different_type_filter_query() throws Exception {
+    thrown.expect(InvalidSearchException.class);
+    SearchRequest request = JSONUtils.INSTANCE.load(differentTypeFilterQuery, SearchRequest.class);
+    SearchResponse response = dao.search(request);
+  }
+
+  @Override
+  protected String getSourceTypeField() {
+    return Constants.SENSOR_TYPE;
+  }
+
+  @Override
+  protected String getIndexName(String sensorType) {
+    return sensorType;
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrUpdateIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrUpdateIntegrationTest.java
new file mode 100644
index 0000000..5b96559
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/SolrUpdateIntegrationTest.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.integration;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.metron.common.configuration.ConfigurationsUtils;
+import org.apache.metron.common.zookeeper.ZKConfigurationsCache;
+import org.apache.metron.hbase.mock.MockHBaseTableProvider;
+import org.apache.metron.hbase.mock.MockHTable;
+import org.apache.metron.indexing.dao.AccessConfig;
+import org.apache.metron.indexing.dao.HBaseDao;
+import org.apache.metron.indexing.dao.IndexDao;
+import org.apache.metron.indexing.dao.MultiIndexDao;
+import org.apache.metron.indexing.dao.UpdateIntegrationTest;
+import org.apache.metron.indexing.dao.update.Document;
+import org.apache.metron.indexing.util.IndexingCacheUtil;
+import org.apache.metron.solr.dao.SolrDao;
+import org.apache.metron.solr.integration.components.SolrComponent;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+public class SolrUpdateIntegrationTest extends UpdateIntegrationTest {
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+
+  private static SolrComponent solrComponent;
+
+  private static final String TABLE_NAME = "modifications";
+  private static final String CF = "p";
+  private static MockHTable table;
+  private static IndexDao hbaseDao;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    solrComponent = new SolrComponent.Builder().build();
+    solrComponent.start();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    solrComponent.addCollection(SENSOR_NAME, "../metron-solr/src/test/resources/config/test/conf");
+    solrComponent.addCollection("error", "../metron-solr/src/main/config/schema/error");
+
+    Configuration config = HBaseConfiguration.create();
+    MockHBaseTableProvider tableProvider = new MockHBaseTableProvider();
+    MockHBaseTableProvider.addToCache(TABLE_NAME, CF);
+    table = (MockHTable) tableProvider.getTable(config, TABLE_NAME);
+
+    hbaseDao = new HBaseDao();
+    AccessConfig accessConfig = new AccessConfig();
+    accessConfig.setTableProvider(tableProvider);
+    Map<String, Object> globalConfig = createGlobalConfig();
+    globalConfig.put(HBaseDao.HBASE_TABLE, TABLE_NAME);
+    globalConfig.put(HBaseDao.HBASE_CF, CF);
+    accessConfig.setGlobalConfigSupplier(() -> globalConfig);
+    accessConfig.setIndexSupplier(s -> s);
+
+    CuratorFramework client = ConfigurationsUtils
+        .getClient(solrComponent.getZookeeperUrl());
+    client.start();
+    ZKConfigurationsCache cache = new ZKConfigurationsCache(client);
+    cache.start();
+    accessConfig.setIndexSupplier(IndexingCacheUtil.getIndexLookupFunction(cache, "solr"));
+
+    MultiIndexDao dao = new MultiIndexDao(hbaseDao, new SolrDao());
+    dao.init(accessConfig);
+    setDao(dao);
+  }
+
+  @After
+  public void reset() {
+    solrComponent.reset();
+    table.clear();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    solrComponent.stop();
+  }
+
+  @Override
+  protected String getIndexName() {
+    return SENSOR_NAME;
+  }
+
+  @Override
+  protected MockHTable getMockHTable() {
+    return table;
+  }
+
+  private static Map<String, Object> createGlobalConfig() {
+    return new HashMap<String, Object>() {{
+      put(SOLR_ZOOKEEPER, solrComponent.getZookeeperUrl());
+    }};
+  }
+
+  @Override
+  protected void addTestData(String indexName, String sensorType,
+      List<Map<String, Object>> docs) throws Exception {
+    solrComponent.addDocs(indexName, docs);
+  }
+
+  @Override
+  protected List<Map<String, Object>> getIndexedTestData(String indexName, String sensorType) {
+    return solrComponent.getAllIndexedDocs(indexName);
+  }
+
+  @Test
+  public void suppress_expanded_fields() throws Exception {
+    Map<String, Object> fields = new HashMap<>();
+    fields.put("guid", "bro_1");
+    fields.put("source.type", SENSOR_NAME);
+    fields.put("ip_src_port", 8010);
+    fields.put("long_field", 10000);
+    fields.put("latitude", 48.5839);
+    fields.put("score", 10.0);
+    fields.put("is_alert", true);
+    fields.put("field.location_point", "48.5839,7.7455");
+
+    Document document = new Document(fields, "bro_1", SENSOR_NAME, 0L);
+    getDao().update(document, Optional.of(SENSOR_NAME));
+
+    Document indexedDocument = getDao().getLatest("bro_1", SENSOR_NAME);
+
+    // assert no extra expanded fields are included
+    assertEquals(8, indexedDocument.getDocument().size());
+  }
+
+  @Test
+  public void testHugeErrorFields() throws Exception {
+    String hugeString = StringUtils.repeat("test ", 1_000_000);
+    String hugeStringTwo = hugeString + "-2";
+
+    Map<String, Object> documentMap = new HashMap<>();
+    documentMap.put("guid", "error_guid");
+    // Needs to be over 32kb
+    documentMap.put("raw_message", hugeString);
+    documentMap.put("raw_message_1", hugeStringTwo);
+    Document errorDoc = new Document(documentMap, "error", "error", 0L);
+    getDao().update(errorDoc, Optional.of("error"));
+
+    // Ensure that the huge string is returned when not a string field
+    Document latest = getDao().getLatest("error_guid", "error");
+    @SuppressWarnings("unchecked")
+    String actual = (String) latest.getDocument().get("raw_message");
+    assertEquals(actual, hugeString);
+    String actualTwo = (String) latest.getDocument().get("raw_message_1");
+    assertEquals(actualTwo, hugeStringTwo);
+
+    // Validate that error occurs for string fields.
+    documentMap.put("error_hash", hugeString);
+    errorDoc = new Document(documentMap, "error", "error", 0L);
+
+    exception.expect(IOException.class);
+    exception.expectMessage("Document contains at least one immense term in field=\"error_hash\"");
+    getDao().update(errorDoc, Optional.of("error"));
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/components/SolrComponent.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/components/SolrComponent.java
index 58976a3..4bc9f8a 100644
--- a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/components/SolrComponent.java
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/components/SolrComponent.java
@@ -18,14 +18,22 @@
 package org.apache.metron.solr.integration.components;
 
 import com.google.common.base.Function;
+import java.util.Collection;
+import java.util.Map.Entry;
+import java.util.stream.Collectors;
+import org.apache.metron.common.Constants;
+import org.apache.metron.indexing.dao.metaalert.MetaAlertConstants;
 import org.apache.metron.integration.InMemoryComponent;
 import org.apache.metron.integration.UnableToStartException;
+import org.apache.metron.solr.dao.SolrUtilities;
 import org.apache.metron.solr.writer.MetronSolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.embedded.JettyConfig;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.client.solrj.response.UpdateResponse;
 import org.apache.solr.cloud.MiniSolrCloudCluster;
 import org.apache.solr.common.SolrDocument;
 
@@ -36,13 +44,16 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.zookeeper.KeeperException;
 
 public class SolrComponent implements InMemoryComponent {
 
   public static class Builder {
+
     private int port = 8983;
     private String solrXmlPath = "../metron-solr/src/test/resources/solr/solr.xml";
-    private Map<String, String> collections = new HashMap<>();
+    private Map<String, String> initialCollections = new HashMap<>();
     private Function<SolrComponent, Void> postStartCallback;
 
     public Builder withPort(int port) {
@@ -55,8 +66,8 @@
       return this;
     }
 
-    public Builder addCollection(String name, String configPath) {
-      collections.put(name, configPath);
+    public Builder addInitialCollection(String name, String configPath) {
+      initialCollections.put(name, configPath);
       return this;
     }
 
@@ -65,9 +76,8 @@
       return this;
     }
 
-    public SolrComponent build() throws Exception {
-      if (collections.isEmpty()) throw new Exception("Must add at least 1 collection");
-      return new SolrComponent(port, solrXmlPath, collections, postStartCallback);
+    public SolrComponent build() {
+      return new SolrComponent(port, solrXmlPath, initialCollections, postStartCallback);
     }
   }
 
@@ -77,7 +87,8 @@
   private MiniSolrCloudCluster miniSolrCloudCluster;
   private Function<SolrComponent, Void> postStartCallback;
 
-  private SolrComponent(int port, String solrXmlPath, Map<String, String> collections, Function<SolrComponent, Void> postStartCallback) throws Exception {
+  private SolrComponent(int port, String solrXmlPath, Map<String, String> collections,
+      Function<SolrComponent, Void> postStartCallback) {
     this.port = port;
     this.solrXmlPath = solrXmlPath;
     this.collections = collections;
@@ -89,14 +100,17 @@
     try {
       File baseDir = Files.createTempDirectory("solrcomponent").toFile();
       baseDir.deleteOnExit();
-      miniSolrCloudCluster = new MiniSolrCloudCluster(1, baseDir, new File(solrXmlPath), JettyConfig.builder().setPort(port).build());
+      miniSolrCloudCluster = new MiniSolrCloudCluster(1, baseDir.toPath(),
+          JettyConfig.builder().setPort(port).build());
       for(String name: collections.keySet()) {
         String configPath = collections.get(name);
-        miniSolrCloudCluster.uploadConfigDir(new File(configPath), name);
+        miniSolrCloudCluster.uploadConfigSet(new File(configPath).toPath(), name);
+        CollectionAdminRequest.createCollection(name, 1, 1).process(miniSolrCloudCluster.getSolrClient());
       }
-      miniSolrCloudCluster.createCollection("metron", 1, 1, "metron", new HashMap<String, String>());
-      if (postStartCallback != null) postStartCallback.apply(this);
-    } catch(Exception e) {
+      if (postStartCallback != null) {
+        postStartCallback.apply(this);
+      }
+    } catch (Exception e) {
       throw new UnableToStartException(e.getMessage(), e);
     }
   }
@@ -104,16 +118,18 @@
   @Override
   public void stop() {
     try {
+      miniSolrCloudCluster.deleteAllCollections();
       miniSolrCloudCluster.shutdown();
     } catch (Exception e) {
+      // Do nothing
     }
   }
 
   @Override
   public void reset() {
     try {
-      miniSolrCloudCluster.deleteCollection("metron");
-    } catch (SolrServerException | IOException e) {
+      miniSolrCloudCluster.deleteAllCollections();
+    } catch (Exception e) {
       // Do nothing
     }
   }
@@ -130,12 +146,19 @@
     return miniSolrCloudCluster.getZkServer().getZkAddress();
   }
 
+  public void addCollection(String name, String configPath)
+      throws InterruptedException, IOException, KeeperException, SolrServerException {
+    miniSolrCloudCluster.uploadConfigSet(new File(configPath).toPath(), name);
+    CollectionAdminRequest.createCollection(name, 1, 1)
+        .process(miniSolrCloudCluster.getSolrClient());
+  }
+
   public boolean hasCollection(String collection) {
     MetronSolrClient solr = getSolrClient();
     boolean collectionFound = false;
     try {
       collectionFound = solr.listCollections().contains(collection);
-    } catch(Exception e) {
+    } catch (Exception e) {
       e.printStackTrace();
     }
     return collectionFound;
@@ -146,16 +169,64 @@
     CloudSolrClient solr = miniSolrCloudCluster.getSolrClient();
     solr.setDefaultCollection(collection);
     SolrQuery parameters = new SolrQuery();
-    parameters.set("q", "*:*");
+
+    // If it's metaalert, we need to adjust the query. We want child docs with the parent,
+    // not separate.
+    if (collection.equals("metaalert")) {
+      parameters.setQuery("source.type:metaalert")
+          .setFields("*", "[child parentFilter=source.type:metaalert limit=999]");
+    } else {
+      parameters.set("q", "*:*");
+    }
     try {
       solr.commit();
       QueryResponse response = solr.query(parameters);
       for (SolrDocument solrDocument : response.getResults()) {
-        docs.add(solrDocument);
+        // Use the utils to make sure we get child docs.
+        docs.add(SolrUtilities.toDocument(solrDocument).getDocument());
       }
     } catch (SolrServerException | IOException e) {
       e.printStackTrace();
     }
     return docs;
   }
+
+  public void addDocs(String collection, List<Map<String, Object>> docs)
+      throws IOException, SolrServerException {
+    CloudSolrClient solr = miniSolrCloudCluster.getSolrClient();
+    solr.setDefaultCollection(collection);
+    Collection<SolrInputDocument> solrInputDocuments = docs.stream().map(doc -> {
+      SolrInputDocument solrInputDocument = new SolrInputDocument();
+      for (Entry<String, Object> entry : doc.entrySet()) {
+        // If the entry itself is a map, add it as a child document. Handle one level of nesting.
+        if (entry.getValue() instanceof List && !entry.getKey().equals(
+            MetaAlertConstants.METAALERT_FIELD)) {
+          for (Object entryItem : (List)entry.getValue()) {
+            if (entryItem instanceof Map) {
+              @SuppressWarnings("unchecked")
+              Map<String, Object> childDoc = (Map<String, Object>) entryItem;
+              SolrInputDocument childInputDoc = new SolrInputDocument();
+              for (Entry<String, Object> childEntry : childDoc.entrySet()) {
+                childInputDoc.addField(childEntry.getKey(), childEntry.getValue());
+              }
+              solrInputDocument.addChildDocument(childInputDoc);
+            }
+          }
+        } else {
+          solrInputDocument.addField(entry.getKey(), entry.getValue());
+        }
+      }
+      return solrInputDocument;
+    }).collect(Collectors.toList());
+
+    checkUpdateResponse(solr.add(collection, solrInputDocuments));
+    // Make sure to commit so things show up
+    checkUpdateResponse(solr.commit(true, true));
+  }
+
+  protected void checkUpdateResponse(UpdateResponse result) throws IOException {
+    if (result.getStatus() != 0) {
+      throw new IOException("Response error received while adding documents: " + result);
+    }
+  }
 }
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/schema/SchemaValidationIntegrationTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/schema/SchemaValidationIntegrationTest.java
new file mode 100644
index 0000000..1a8e290
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/integration/schema/SchemaValidationIntegrationTest.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.integration.schema;
+
+import com.google.common.collect.Iterables;
+import com.google.common.io.Files;
+import org.apache.metron.common.configuration.writer.WriterConfiguration;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.common.writer.BulkWriterResponse;
+import org.apache.metron.solr.integration.components.SolrComponent;
+import org.apache.metron.solr.writer.SolrWriter;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.storm.tuple.Tuple;
+import org.apache.zookeeper.KeeperException;
+import org.json.simple.JSONObject;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.*;
+
+import static org.apache.metron.solr.SolrConstants.SOLR_ZOOKEEPER;
+import static org.mockito.Mockito.mock;
+
+public class SchemaValidationIntegrationTest {
+  public static Iterable<String> getData(String sensor) throws IOException {
+    return Iterables.filter(
+            Files.readLines(new File("src/test/resources/example_data/" + sensor), Charset.defaultCharset()),
+            s -> !s.startsWith("#") && s.length() > 0
+    );
+  }
+
+  public static Map<String, Object> getGlobalConfig(String sensorType, SolrComponent component) {
+    Map<String, Object> globalConfig = new HashMap<>();
+    globalConfig.put(SOLR_ZOOKEEPER, component.getZookeeperUrl());
+    return globalConfig;
+  }
+
+  public static SolrComponent createSolrComponent(String sensor) throws Exception {
+    return new SolrComponent.Builder().build();
+  }
+
+  @Test
+  public void testError() throws Exception {
+    test("error");
+  }
+
+  @Test
+  public void testBro() throws Exception {
+    test("bro");
+  }
+
+  @Test
+  public void testSnort() throws Exception {
+    test("snort");
+  }
+
+  @Test
+  public void testYaf() throws Exception {
+    test("yaf");
+  }
+
+  public String getGuid(Map<String, Object> m) {
+    if(m.containsKey("guid")) {
+      return (String)m.get("guid");
+    }
+    else {
+      return (String) m.get("original_string");
+    }
+  }
+
+  public void test(String sensorType) throws Exception {
+    SolrComponent component = null;
+    try {
+      component = createSolrComponent(sensorType);
+      component.start();
+      component.addCollection(String.format("%s", sensorType), String.format("src/main/config/schema/%s", sensorType));
+      Map<String, Object> globalConfig = getGlobalConfig(sensorType, component);
+
+      List<JSONObject> inputs = new ArrayList<>();
+      List<Tuple> tuples = new ArrayList<>();
+      Map<String, Map<String, Object>> index = new HashMap<>();
+      for (String message : getData(sensorType)) {
+        if (message.trim().length() > 0) {
+          Tuple t = mock(Tuple.class);
+          tuples.add(t);
+          Map<String, Object> m = JSONUtils.INSTANCE.load(message.trim(), JSONUtils.MAP_SUPPLIER);
+          String guid = getGuid(m);
+          index.put(guid, m);
+          inputs.add(new JSONObject(m));
+        }
+      }
+      Assert.assertTrue(inputs.size() > 0);
+
+      SolrWriter solrWriter = new SolrWriter();
+
+      WriterConfiguration writerConfig = new WriterConfiguration() {
+        @Override
+        public int getBatchSize(String sensorName) {
+          return inputs.size();
+        }
+
+        @Override
+        public int getBatchTimeout(String sensorName) {
+          return 0;
+        }
+
+        @Override
+        public List<Integer> getAllConfiguredTimeouts() {
+          return new ArrayList<>();
+        }
+
+        @Override
+        public String getIndex(String sensorName) {
+          return sensorType;
+        }
+
+        @Override
+        public boolean isEnabled(String sensorName) {
+          return true;
+        }
+
+        @Override
+        public Map<String, Object> getSensorConfig(String sensorName) {
+          return new HashMap<String, Object>() {{
+            put("index", sensorType);
+            put("batchSize", inputs.size());
+            put("enabled", true);
+          }};
+        }
+
+        @Override
+        public Map<String, Object> getGlobalConfig() {
+          return globalConfig;
+        }
+
+        @Override
+        public boolean isDefault(String sensorName) {
+          return false;
+        }
+
+        @Override
+        public String getFieldNameConverter(String sensorName) {
+          return null;
+        }
+      };
+
+      solrWriter.init(null, null, writerConfig);
+
+      BulkWriterResponse response = solrWriter.write(sensorType, writerConfig, tuples, inputs);
+      Assert.assertTrue(response.getErrors().isEmpty());
+      for (Map<String, Object> m : component.getAllIndexedDocs(sensorType)) {
+        Map<String, Object> expected = index.get(getGuid(m));
+        for (Map.Entry<String, Object> field : expected.entrySet()) {
+          if (field.getValue() instanceof Collection && ((Collection) field.getValue()).size() == 0) {
+            continue;
+          }
+          if(m.get(field.getKey()) instanceof Number) {
+            Number n1 = ConversionUtils.convert(field.getValue(), Double.class);
+            Number n2 = (Number)m.get(field.getKey());
+            boolean isSame = Math.abs(n1.doubleValue() - n2.doubleValue()) < 1e-3;
+            if(!isSame) {
+              String s1 = "" + n1.doubleValue();
+              String s2 = "" + n2.doubleValue();
+              isSame = s1.startsWith(s2) || s2.startsWith(s1);
+            }
+            Assert.assertTrue("Unable to validate " + field.getKey() + ": " + n1 + " != " + n2, isSame);
+          }
+          else {
+            Assert.assertEquals("Unable to find " + field.getKey(), "" + field.getValue(), "" + m.get(field.getKey()));
+          }
+        }
+      }
+    }
+    finally {
+      if(component != null) {
+        component.stop();
+      }
+    }
+  }
+
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/ModifiableSolrParamsMatcher.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/ModifiableSolrParamsMatcher.java
new file mode 100644
index 0000000..cd68be9
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/ModifiableSolrParamsMatcher.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.matcher;
+
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.hamcrest.Description;
+import org.mockito.ArgumentMatcher;
+
+public class ModifiableSolrParamsMatcher extends ArgumentMatcher<ModifiableSolrParams> {
+
+  private ModifiableSolrParams expectedModifiableSolrParams;
+
+  public ModifiableSolrParamsMatcher(ModifiableSolrParams modifiableSolrParams) {
+    this.expectedModifiableSolrParams = modifiableSolrParams;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    ModifiableSolrParams modifiableSolrParams = (ModifiableSolrParams) o;
+    for(String name: expectedModifiableSolrParams.getParameterNames()) {
+      String expectedValue = expectedModifiableSolrParams.get(name);
+      String value = modifiableSolrParams.get(name);
+      if(expectedValue == null) {
+        if (value != null) {
+          return false;
+        }
+      } else {
+        if (!expectedValue.equals(value)) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public void describeTo(Description description) {
+    description.appendValue(expectedModifiableSolrParams);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentListMatcher.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentListMatcher.java
new file mode 100644
index 0000000..6c4ab20
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentListMatcher.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.matcher;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.hamcrest.Description;
+import org.mockito.ArgumentMatcher;
+
+import java.util.List;
+
+public class SolrInputDocumentListMatcher extends ArgumentMatcher<List<SolrInputDocument>> {
+
+  private List<SolrInputDocument> expectedSolrInputDocuments;
+
+  public SolrInputDocumentListMatcher(List<SolrInputDocument> solrInputDocuments) {
+    this.expectedSolrInputDocuments = solrInputDocuments;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    List<SolrInputDocument> solrInputDocuments = (List<SolrInputDocument>) o;
+    for(int i = 0; i < solrInputDocuments.size(); i++) {
+      SolrInputDocument solrInputDocument = solrInputDocuments.get(i);
+      for (int j = 0; j < expectedSolrInputDocuments.size(); j++) {
+        SolrInputDocument expectedSolrInputDocument = expectedSolrInputDocuments.get(j);
+        if (solrInputDocument.get("guid").equals(expectedSolrInputDocument.get("guid"))) {
+          for(String field: solrInputDocument.getFieldNames()) {
+            Object expectedValue = expectedSolrInputDocument.getField(field).getValue();
+            Object value = solrInputDocument.getField(field).getValue();
+            boolean matches = expectedValue != null ? expectedValue.equals(value) : value == null;
+            if (!matches) {
+              return false;
+            }
+          }
+        }
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public void describeTo(Description description) {
+    description.appendValue(expectedSolrInputDocuments);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentMatcher.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentMatcher.java
new file mode 100644
index 0000000..b64c9f2
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrInputDocumentMatcher.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.matcher;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.hamcrest.Description;
+import org.mockito.ArgumentMatcher;
+
+public class SolrInputDocumentMatcher extends ArgumentMatcher<SolrInputDocument> {
+
+  private SolrInputDocument expectedSolrInputDocument;
+
+  public SolrInputDocumentMatcher(SolrInputDocument solrInputDocument) {
+    this.expectedSolrInputDocument = solrInputDocument;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    SolrInputDocument solrInputDocument = (SolrInputDocument) o;
+    for(String field: solrInputDocument.getFieldNames()) {
+      Object expectedValue = expectedSolrInputDocument.getField(field).getValue();
+      Object value = solrInputDocument.getField(field).getValue();
+      boolean matches = expectedValue != null ? expectedValue.equals(value) : value == null;
+      if (!matches) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public void describeTo(Description description) {
+    description.appendValue(expectedSolrInputDocument);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrQueryMatcher.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrQueryMatcher.java
new file mode 100644
index 0000000..45bf85d
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/matcher/SolrQueryMatcher.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.solr.matcher;
+
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.hamcrest.Description;
+import org.mockito.ArgumentMatcher;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+public class SolrQueryMatcher extends ArgumentMatcher<ModifiableSolrParams> {
+
+  private SolrQuery expectedSolrQuery;
+
+  public SolrQueryMatcher(SolrQuery solrQuery) {
+    this.expectedSolrQuery = solrQuery;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    SolrQuery solrQuery = (SolrQuery) o;
+    return Objects.equals(solrQuery.getStart(), expectedSolrQuery.getStart()) &&
+            Objects.equals(solrQuery.getRows(), expectedSolrQuery.getRows()) &&
+            Objects.equals(solrQuery.getQuery(), expectedSolrQuery.getQuery()) &&
+            Objects.equals(solrQuery.getSorts(), expectedSolrQuery.getSorts()) &&
+            Objects.equals(solrQuery.getFields(), expectedSolrQuery.getFields()) &&
+            Arrays.equals(solrQuery.getFacetFields(), expectedSolrQuery.getFacetFields()) &&
+            Objects.equals(solrQuery.get("collection"), expectedSolrQuery.get("collection")) &&
+            Objects.equals(solrQuery.get("stats"), expectedSolrQuery.get("stats")) &&
+            Objects.equals(solrQuery.get("stats.field"), expectedSolrQuery.get("stats.field")) &&
+            Objects.equals(solrQuery.get("facet"), expectedSolrQuery.get("facet")) &&
+            Objects.equals(solrQuery.get("facet.pivot"), expectedSolrQuery.get("facet.pivot"));
+  }
+
+  @Override
+  public void describeTo(Description description) {
+    description.appendValue(expectedSolrQuery);
+  }
+}
diff --git a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/writer/SolrWriterTest.java b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/writer/SolrWriterTest.java
index a56916f..7b7d208 100644
--- a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/writer/SolrWriterTest.java
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/writer/SolrWriterTest.java
@@ -17,7 +17,19 @@
  */
 package org.apache.metron.solr.writer;
 
-import org.apache.metron.common.configuration.EnrichmentConfigurations;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.argThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.metron.common.Constants;
 import org.apache.metron.common.configuration.IndexingConfigurations;
 import org.apache.metron.common.configuration.writer.IndexingWriterConfiguration;
 import org.apache.metron.enrichment.integration.utils.SampleUtil;
@@ -25,18 +37,11 @@
 import org.apache.solr.common.SolrInputDocument;
 import org.hamcrest.Description;
 import org.json.simple.JSONObject;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.mockito.Mockito.argThat;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
 
 public class SolrWriterTest {
 
@@ -60,33 +65,37 @@
     }
   }
 
-  static class SolrInputDocumentMatcher extends ArgumentMatcher<SolrInputDocument> {
+  static class SolrInputDocumentMatcher extends ArgumentMatcher<Collection<SolrInputDocument>> {
 
-    private int expectedId;
-    private String expectedSourceType;
-    private int expectedInt;
-    private double expectedDouble;
 
-    public SolrInputDocumentMatcher(int expectedId, String expectedSourceType, int expectedInt, double expectedDouble) {
-      this.expectedId = expectedId;
-      this.expectedSourceType = expectedSourceType;
-      this.expectedInt = expectedInt;
-      this.expectedDouble = expectedDouble;
+    List<Map<String, Object>> expectedDocs;
+
+    public SolrInputDocumentMatcher(List<Map<String, Object>> expectedDocs) {
+      this.expectedDocs = expectedDocs;
     }
 
     @Override
     public boolean matches(Object o) {
-      SolrInputDocument solrInputDocument = (SolrInputDocument) o;
-      int actualId = (Integer) solrInputDocument.get("id").getValue();
-      String actualName = (String) solrInputDocument.get("sensorType").getValue();
-      int actualInt = (Integer) solrInputDocument.get("intField_i").getValue();
-      double actualDouble = (Double) solrInputDocument.get("doubleField_d").getValue();
-      return expectedId == actualId && expectedSourceType.equals(actualName) && expectedInt == actualInt && expectedDouble == actualDouble;
+      List<SolrInputDocument> docs = (List<SolrInputDocument>)o;
+      int size = docs.size();
+      if(size != expectedDocs.size()) {
+        return false;
+      }
+      for(int i = 0; i < size;++i) {
+        SolrInputDocument doc = docs.get(i);
+        Map<String, Object> expectedDoc = expectedDocs.get(i);
+        for(Map.Entry<String, Object> expectedKv : expectedDoc.entrySet()) {
+          if(!expectedKv.getValue().equals(doc.get(expectedKv.getKey()).getValue())) {
+            return false;
+          }
+        }
+      }
+      return true;
     }
 
     @Override
     public void describeTo(Description description) {
-      description.appendText(String.format("fields: [id=%d, doubleField_d=%f, name=%s, intField_i=%d]", expectedId, expectedDouble, expectedSourceType, expectedInt));
+      description.appendText(expectedDocs.toString());
     }
 
   }
@@ -95,9 +104,13 @@
   public void testWriter() throws Exception {
     IndexingConfigurations configurations = SampleUtil.getSampleIndexingConfigs();
     JSONObject message1 = new JSONObject();
+    message1.put(Constants.GUID, "guid-1");
+    message1.put(Constants.SENSOR_TYPE, "test");
     message1.put("intField", 100);
     message1.put("doubleField", 100.0);
     JSONObject message2 = new JSONObject();
+    message2.put(Constants.GUID, "guid-2");
+    message2.put(Constants.SENSOR_TYPE, "test");
     message2.put("intField", 200);
     message2.put("doubleField", 200.0);
     List<JSONObject> messages = new ArrayList<>();
@@ -108,33 +121,166 @@
     MetronSolrClient solr = Mockito.mock(MetronSolrClient.class);
     SolrWriter writer = new SolrWriter().withMetronSolrClient(solr);
     writer.init(null, null,new IndexingWriterConfiguration("solr", configurations));
-    verify(solr, times(1)).createCollection(collection, 1, 1);
     verify(solr, times(1)).setDefaultCollection(collection);
 
     collection = "metron2";
-    int numShards = 4;
-    int replicationFactor = 2;
     Map<String, Object> globalConfig = configurations.getGlobalConfig();
     globalConfig.put("solr.collection", collection);
-    globalConfig.put("solr.numShards", numShards);
-    globalConfig.put("solr.replicationFactor", replicationFactor);
     configurations.updateGlobalConfig(globalConfig);
     writer = new SolrWriter().withMetronSolrClient(solr);
     writer.init(null, null, new IndexingWriterConfiguration("solr", configurations));
-    verify(solr, times(1)).createCollection(collection, numShards, replicationFactor);
     verify(solr, times(1)).setDefaultCollection(collection);
 
     writer.write("test", new IndexingWriterConfiguration("solr", configurations), new ArrayList<>(), messages);
-    verify(solr, times(1)).add(argThat(new SolrInputDocumentMatcher(message1.toJSONString().hashCode(), "test", 100, 100.0)));
-    verify(solr, times(1)).add(argThat(new SolrInputDocumentMatcher(message2.toJSONString().hashCode(), "test", 200, 200.0)));
-    verify(solr, times(0)).commit(collection);
-
-    writer = new SolrWriter().withMetronSolrClient(solr).withShouldCommit(true);
-    writer.init(null, null, new IndexingWriterConfiguration("solr", configurations));
-    writer.write("test", new IndexingWriterConfiguration("solr", configurations), new ArrayList<>(), messages);
-    verify(solr, times(2)).add(argThat(new SolrInputDocumentMatcher(message1.toJSONString().hashCode(), "test", 100, 100.0)));
-    verify(solr, times(2)).add(argThat(new SolrInputDocumentMatcher(message2.toJSONString().hashCode(), "test", 200, 200.0)));
-    verify(solr, times(1)).commit(collection);
+    verify(solr, times(1)).add(eq("yaf"), argThat(new SolrInputDocumentMatcher(ImmutableList.of(message1, message2))));
+    verify(solr, times(1)).commit("yaf"
+                                 , (boolean)SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.defaultValue.get()
+                                 , (boolean)SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.defaultValue.get()
+                                 , (boolean)SolrWriter.SolrProperties.COMMIT_SOFT.defaultValue.get()
+                                 );
 
   }
+
+  @Test
+  public void configTest_zookeeperQuorumSpecified() throws Exception {
+    String expected = "test";
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.ZOOKEEPER_QUORUM.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.ZOOKEEPER_QUORUM.name, expected)
+                    , String.class));
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void configTest_zookeeperQuorumUnpecified() throws Exception {
+    SolrWriter.SolrProperties.ZOOKEEPER_QUORUM.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , String.class);
+  }
+
+
+  @Test
+  public void configTest_commitPerBatchSpecified() throws Exception {
+    Object expected = false;
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.COMMIT_PER_BATCH.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_PER_BATCH.name, false)
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitPerBatchUnpecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_PER_BATCH.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_PER_BATCH.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , Boolean.class));
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_PER_BATCH.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_PER_BATCH.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_PER_BATCH.name, new DummyClass())
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitSoftSpecified() throws Exception {
+    Object expected = true;
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.COMMIT_SOFT.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_SOFT.name, expected)
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitSoftUnpecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_SOFT.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_SOFT.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , Boolean.class));
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_SOFT.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_SOFT.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_SOFT.name, new DummyClass())
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitWaitFlushSpecified() throws Exception {
+    Object expected = false;
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.name, expected)
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitWaitFlushUnspecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , Boolean.class));
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_WAIT_FLUSH.name, new DummyClass())
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitWaitSearcherSpecified() throws Exception {
+    Object expected = false;
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.name, expected)
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_commitWaitSearcherUnspecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , Boolean.class));
+    Assert.assertEquals(SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.defaultValue.get(),
+    SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.COMMIT_WAIT_SEARCHER.name, new DummyClass())
+                    , Boolean.class));
+  }
+
+  @Test
+  public void configTest_defaultCollectionSpecified() throws Exception {
+    Object expected = "mycollection";
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.DEFAULT_COLLECTION.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.DEFAULT_COLLECTION.name, expected)
+                    , String.class));
+  }
+
+  @Test
+  public void configTest_defaultCollectionUnspecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.DEFAULT_COLLECTION.defaultValue.get(),
+    SolrWriter.SolrProperties.DEFAULT_COLLECTION.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , String.class));
+  }
+
+  @Test
+  public void configTest_httpConfigSpecified() throws Exception {
+    Object expected = new HashMap<String, Object>() {{
+      put("name", "metron");
+    }};
+    Assert.assertEquals(expected,
+            SolrWriter.SolrProperties.HTTP_CONFIG.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.HTTP_CONFIG.name, expected)
+                    , Map.class));
+  }
+
+  @Test
+  public void configTest_httpConfigUnspecified() throws Exception {
+    Assert.assertEquals(SolrWriter.SolrProperties.HTTP_CONFIG.defaultValue.get(),
+    SolrWriter.SolrProperties.HTTP_CONFIG.coerceOrDefaultOrExcept(
+                    new HashMap<>()
+                    , Map.class));
+    Assert.assertEquals(SolrWriter.SolrProperties.HTTP_CONFIG.defaultValue.get(),
+    SolrWriter.SolrProperties.HTTP_CONFIG.coerceOrDefaultOrExcept(
+                    ImmutableMap.of( SolrWriter.SolrProperties.HTTP_CONFIG.name, new DummyClass())
+                    , Map.class));
+  }
+
+  public static class DummyClass {}
 }
diff --git a/metron-platform/metron-solr/src/test/resources/config/test/conf/managed-schema b/metron-platform/metron-solr/src/test/resources/config/test/conf/managed-schema
new file mode 100644
index 0000000..8340a36
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/config/test/conf/managed-schema
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="example" version="1.6">
+  <field name="_version_" type="plong" indexed="false" stored="false"/>
+
+  <!-- points to the root document of a block of nested documents. Required for nested
+     document support, may be removed otherwise
+  -->
+  <field name="_root_" type="string" indexed="true" stored="false" docValues="false"/>
+
+  <!-- Only remove the "id" field if you have a very good reason to. While not strictly
+    required, it is highly recommended. A <uniqueKey> is present in almost all Solr
+    installations. See the <uniqueKey> declaration below where <uniqueKey> is set to "id".
+    Do NOT change the type and apply index-time analysis to the <uniqueKey> as it will likely
+    make routing in SolrCloud and document replacement in general fail. Limited _query_ time
+    analysis is possible as long as the indexing process is guaranteed to index the term
+    in a compatible way. Any analysis applied to the <uniqueKey> should _not_ produce multiple
+    tokens
+  -->
+  <field name="guid" type="string" indexed="true" stored="true" required="true"
+    multiValued="false"/>
+
+  <field name="source.type" type="string" indexed="true" stored="true"/>
+  <field name="name" type="string" indexed="true" stored="true"/>
+  <field name="timestamp" type="plong" indexed="true" stored="true"/>
+  <field name="new-field" type="string" indexed="true" stored="true"/>
+  <field name="metaalerts" type="string" multiValued="true" indexed="true" stored="true"/>
+  <field name="threat:triage:score" type="pdouble" indexed="true" stored="true"/>
+  <field name="score" type="pdouble" indexed="true" stored="true"/>
+
+
+  <!-- Comments field required for the UI -->
+  <field name="comments" type="string" indexed="true" stored="true" multiValued="true"/>
+
+  <dynamicField name="*" type="ignored" multiValued="false" docValues="true"/>
+
+
+  <!-- Field to use to determine and enforce document uniqueness.
+       Unless this field is marked with required="false", it will be a required field
+    -->
+  <uniqueKey>guid</uniqueKey>
+
+
+  <!-- field type definitions. The "name" attribute is
+     just a label to be used by field definitions.  The "class"
+     attribute and any other attributes determine the real
+     behavior of the fieldType.
+       Class names starting with "solr" refer to java classes in a
+     standard package such as org.apache.solr.analysis
+  -->
+
+  <!-- The StrField type is not analyzed, but indexed/stored verbatim. -->
+  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
+  <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="pfloat" class="solr.FloatPointField" docValues="true"/>
+  <fieldType name="plong" class="solr.LongPointField" docValues="true"/>
+  <fieldType name="pdouble" class="solr.DoublePointField" docValues="true"/>
+  <fieldType name="ignored" stored="false" indexed="false" multiValued="true"
+    class="solr.StrField"/>
+
+</schema>
diff --git a/metron-platform/metron-solr/src/test/resources/config/test/conf/solrconfig.xml b/metron-platform/metron-solr/src/test/resources/config/test/conf/solrconfig.xml
new file mode 100644
index 0000000..fff9d84
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/config/test/conf/solrconfig.xml
@@ -0,0 +1,1601 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.2.0</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+
+       The examples below can be used to load some solr-contribs along
+       with their external dependencies.
+    -->
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" />
+
+  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.
+         The default is 100 MB.  -->
+    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+      <!-- The number of commit points to be kept -->
+      <!-- <str name="maxCommitsToKeep">1</str> -->
+      <!-- The number of optimized commit points to be kept -->
+      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit>
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
+       <openSearcher>false</openSearcher>
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+     <autoSoftCommit>
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
+     </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.  It is safe to increase or remove this setting,
+         since it is purely an arbitrary limit to try and catch user errors where
+         large boolean queries may not be the best implementation choice.
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Slow Query Threshold (in millis)
+
+         At high request rates, logging all requests can become a bottleneck
+         and therefore INFO logging is often turned off. However, it is still
+         useful to be able to set a latency threshold above which a request
+         is considered "slow" and log that request at WARN level so we can
+         easily identify slow queries.
+    -->
+    <slowQueryThresholdMillis>-1</slowQueryThresholdMillis>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+
+        Caches results of searches - ordered lists of document ids
+        (DocList) based on a query, a sort, and the range of documents requested.
+        Additional supported parameter by LRUCache:
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy
+     -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="0"/>
+
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Feature Values Cache
+
+         Cache used by the Learning To Rank (LTR) contrib module.
+
+         You will need to set the solr.ltr.enabled system property
+         when running solr to run with ltr enabled:
+           -Dsolr.ltr.enabled=true
+
+         https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <cache enable="${solr.ltr.enabled:false}" name="QUERY_DOC_FV"
+           class="solr.search.LRUCache"
+           size="4096"
+           initialSize="2048"
+           autowarmCount="4096"
+           regenerator="solr.search.NoOpRegenerator" />
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache.
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+
+    <requestParsers enableRemoteStreaming="false"
+                    multipartUploadLimitInKB="-1"
+                    formdataUploadLimitInKB="-1"
+                    addHttpRequestToContext="false"/>
+      -->
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+
+         By default, no Cache-Control header is generated.
+
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+       <!-- Default search field
+          <str name="df">text</str>
+         -->
+       <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+          <str name="wt">xml</str>
+         -->
+       <!-- Controls the distribution of a query to shards other than itself.
+            Consider making 'preferLocalShards' true when:
+              1) maxShardsPerNode > 1
+              2) Number of shards > 1
+              3) CloudSolrClient or LbHttpSolrServer is used by clients.
+            Without this option, every core broadcasts the distributed query to
+            a replica of each shard where the replicas are chosen randomly.
+            This option directs the cores to prefer cores hosted locally, thus
+            preventing network delays between machines.
+            This behavior also immunizes a bad/slow machine from slowing down all
+            the good machines (if those good machines were querying this bad one).
+
+            Specify this option=false for clients connecting through HttpSolrServer
+       -->
+       <bool name="preferLocalShards">false</bool>
+     </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+
+       <!-- VelocityResponseWriter settings -->
+       <str name="wt">velocity</str>
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <!-- Query settings -->
+       <str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+          title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mm">100%</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+         title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
+       <int name="mlt.count">3</int>
+
+       <!-- Faceting defaults -->
+       <str name="facet">on</str>
+       <str name="facet.missing">true</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.field">content_type</str>
+       <str name="facet.field">author_s</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>
+
+       <!-- Highlighting defaults -->
+       <str name="hl">on</str>
+       <str name="hl.fl">content features title name</str>
+       <str name="hl.preserveMulti">true</str>
+       <str name="hl.encoder">html</str>
+       <str name="hl.simple.pre">&lt;b&gt;</str>
+       <str name="hl.simple.post">&lt;/b&gt;</str>
+       <str name="f.title.hl.fragsize">0</str>
+       <str name="f.title.hl.alternateField">title</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+       <str name="f.content.hl.snippets">3</str>
+       <str name="f.content.hl.fragsize">200</str>
+       <str name="f.content.hl.alternateField">content</str>
+       <str name="f.content.hl.maxAlternateFieldLength">750</str>
+
+       <!-- Spell checking defaults -->
+       <str name="spellcheck">on</str>
+       <str name="spellcheck.extendedResults">false</str>
+       <str name="spellcheck.count">5</str>
+       <str name="spellcheck.alternativeTermCount">2</str>
+       <str name="spellcheck.maxResultsForSuggest">5</str>
+       <str name="spellcheck.collate">true</str>
+       <str name="spellcheck.collateExtendedResults">true</str>
+       <str name="spellcheck.maxCollationTries">5</str>
+       <str name="spellcheck.maxCollations">3</str>
+     </lst>
+
+     <!-- append spellchecking to our list of components -->
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse,update">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- The following are implicitly added
+  <requestHandler name="/update/json" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/json</str>
+       </lst>
+  </requestHandler>
+  <requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
+        <lst name="invariants">
+         <str name="stream.contentType">application/csv</str>
+       </lst>
+  </requestHandler>
+  -->
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler
+
+    -->
+  <requestHandler name="/update/extract"
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+
+     -->
+
+   <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">text</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+
+    <!-- a spellchecker that uses a different distance measure -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">jarowinkler</str>
+         <str name="field">spell</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="distanceMeasure">
+           org.apache.lucene.search.spell.JaroWinklerDistance
+         </str>
+       </lst>
+     -->
+
+    <!-- a spellchecker that use an alternate comparator
+
+         comparatorClass be one of:
+          1. score (default)
+          2. freq (Frequency first, then score)
+          3. A fully qualified class name
+      -->
+    <!--
+       <lst name="spellchecker">
+         <str name="name">freq</str>
+         <str name="field">lowerfilt</str>
+         <str name="classname">solr.DirectSolrSpellChecker</str>
+         <str name="comparatorClass">freq</str>
+      </lst>
+      -->
+
+    <!-- A spellchecker that reads the list of words from a file -->
+    <!--
+       <lst name="spellchecker">
+         <str name="classname">solr.FileBasedSpellChecker</str>
+         <str name="name">file</str>
+         <str name="sourceLocation">spellings.txt</str>
+         <str name="characterEncoding">UTF-8</str>
+         <str name="spellcheckIndexDir">spellcheckerFile</str>
+       </lst>
+      -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck.dictionary">wordbreak</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The SuggestComponent in Solr provides users with automatic suggestions for query terms.
+       You can use this to implement a powerful auto-suggest feature in your search application.
+       As with the rest of this solrconfig.xml file, the configuration of this component is purely
+       an example that applies specifically to this configset and example documents.
+
+       More information about this component and other configuration options are described in the
+       "Suggester" section of the reference guide available at
+       http://archive.apache.org/dist/lucene/solr/ref-guide
+    -->
+  <searchComponent name="suggest" class="solr.SuggestComponent">
+    <lst name="suggester">
+      <str name="name">mySuggester</str>
+      <str name="lookupImpl">FuzzyLookupFactory</str>
+      <str name="dictionaryImpl">DocumentDictionaryFactory</str>
+      <str name="field">cat</str>
+      <str name="weightField">price</str>
+      <str name="suggestAnalyzerFieldType">string</str>
+      <str name="buildOnStartup">false</str>
+    </lst>
+  </searchComponent>
+
+  <requestHandler name="/suggest" class="solr.SearchHandler"
+                  startup="lazy" >
+    <lst name="defaults">
+      <str name="suggest">true</str>
+      <str name="suggest.count">10</str>
+    </lst>
+    <arr name="components">
+      <str>suggest</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       You'll need to set the solr.clustering.enabled system property
+       when running solr to run with clustering enabled:
+       -Dsolr.clustering.enabled=true
+
+       https://lucene.apache.org/solr/guide/result-clustering.html
+    -->
+  <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" >
+    <!--
+    Declaration of "engines" (clustering algorithms).
+
+    The open source algorithms from Carrot2.org project:
+      * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+      * org.carrot2.clustering.stc.STCClusteringAlgorithm
+      * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+    See http://project.carrot2.org/algorithms.html for more information.
+
+    Commercial algorithm Lingo3G (needs to be installed separately):
+      * com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
+    -->
+
+    <lst name="engine">
+      <str name="name">lingo3g</str>
+      <bool name="optional">true</bool>
+      <str name="carrot.algorithm">com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">lingo</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+
+    <lst name="engine">
+      <str name="name">kmeans</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
+      <str name="carrot.resourcesDir">clustering/carrot2</str>
+    </lst>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the clustering component.
+       This is meant as an example.
+       In reality you will likely want to add the component to your
+       already specified request handlers.
+    -->
+  <requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <bool name="clustering.results">true</bool>
+      <!-- Field name with the logical "title" of a each document (optional) -->
+      <str name="carrot.title">name</str>
+      <!-- Field name with the logical "URL" of a each document (optional) -->
+      <str name="carrot.url">id</str>
+      <!-- Field name with the logical "content" of a each document (optional) -->
+      <str name="carrot.snippet">features</str>
+      <!-- Apply highlighter to the title/ content and use this for clustering. -->
+      <bool name="carrot.produceSummary">true</bool>
+      <!-- the maximum number of labels per cluster -->
+      <!--<int name="carrot.numDescriptions">5</int>-->
+      <!-- produce sub clusters -->
+      <bool name="carrot.outputSubClusters">false</bool>
+
+      <!-- Configure the remaining request handler parameters. -->
+      <str name="defType">edismax</str>
+      <str name="qf">
+        text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+      </str>
+      <str name="q.alt">*:*</str>
+      <str name="rows">100</str>
+      <str name="fl">*,score</str>
+    </lst>
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    -->
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Language identification
+
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Script update processor
+
+    This example hooks in an update processor implemented using JavaScript.
+
+    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
+  -->
+  <!--
+    <updateRequestProcessorChain name="script">
+      <processor class="solr.StatelessScriptUpdateProcessorFactory">
+        <str name="script">update-script.js</str>
+        <lst name="params">
+          <str name="config_param">example config parameter</str>
+        </lst>
+      </processor>
+      <processor class="solr.RunUpdateProcessorFactory" />
+    </updateRequestProcessorChain>
+  -->
+
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
+      <str name="template.base.dir">${velocity.template.base.dir:}</str>
+    </queryResponseWriter>
+
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!--  LTR query parser
+
+        You will need to set the solr.ltr.enabled system property
+        when running solr to run with ltr enabled:
+          -Dsolr.ltr.enabled=true
+
+        https://lucene.apache.org/solr/guide/learning-to-rank.html
+
+        Query parser is used to rerank top docs with a provided model
+    -->
+  <queryParser enable="${solr.ltr.enabled:false}" name="ltr" class="org.apache.solr.ltr.search.LTRQParserPlugin"/>
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+
+    <!--
+      LTR Transformer will encode the document features in the response. For each document the transformer
+      will add the features as an extra field in the response. The name of the field will be the
+      name of the transformer enclosed between brackets (in this case [features]).
+      In order to get the feature vector you will have to specify that you
+      want the field (e.g., fl="*,[features])
+
+      You will need to set the solr.ltr.enabled system property
+      when running solr to run with ltr enabled:
+        -Dsolr.ltr.enabled=true
+
+      https://lucene.apache.org/solr/guide/learning-to-rank.html
+      -->
+    <transformer enable="${solr.ltr.enabled:false}" name="features" class="org.apache.solr.ltr.response.transform.LTRFeatureLoggerTransformerFactory">
+      <str name="fvCacheName">QUERY_DOC_FV</str>
+    </transformer>
+
+</config>
diff --git a/metron-platform/metron-solr/src/test/resources/example_data/bro b/metron-platform/metron-solr/src/test/resources/example_data/bro
new file mode 100644
index 0000000..73d0e76
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/example_data/bro
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{"adapter.threatinteladapter.end.ts":"1517499201357","bro_timestamp":"1517499194.7338","ip_dst_port":8080,"enrichmentsplitterbolt.splitter.end.ts":"1517499201202","enrichmentsplitterbolt.splitter.begin.ts":"1517499201200","adapter.hostfromjsonlistadapter.end.ts":"1517499201207","adapter.geoadapter.begin.ts":"1517499201209","uid":"CUrRne3iLIxXavQtci","trans_depth":143,"protocol":"http","original_string":"HTTP | id.orig_p:50451 method:GET request_body_len:0 id.resp_p:8080 uri:\/api\/v1\/clusters\/metron_cluster\/services\/KAFKA\/components\/KAFKA_BROKER?fields=metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsBytesInPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsBytesOutPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsMessagesInPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/controller\/KafkaController\/ActiveControllerCount[1484165330,1484168930,15],metrics\/kafka\/controller\/ControllerStats\/LeaderElectionRateAndTimeMs\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/controller\/ControllerStats\/UncleanLeaderElectionsPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaFetcherManager\/Replica-MaxLag[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/PartitionCount[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/UnderReplicatedPartitions[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/LeaderCount[1484165330,1484168930,15]&format=null_padding&_=1484168930776 tags:[] uid:CUrRne3iLIxXavQtci referrer:http:\/\/node1:8080\/ trans_depth:143 host:node1 id.orig_h:192.168.66.1 response_body_len:0 user_agent:Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/55.0.2883.95 Safari\/537.36 ts:1517499194.7338 id.resp_h:192.168.66.121","ip_dst_addr":"192.168.66.121","threatinteljoinbolt.joiner.ts":"1517499201359","host":"node1","enrichmentjoinbolt.joiner.ts":"1517499201212","adapter.hostfromjsonlistadapter.begin.ts":"1517499201206","threatintelsplitterbolt.splitter.begin.ts":"1517499201215","ip_src_addr":"192.168.66.1","user_agent":"Mozilla\/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/55.0.2883.95 Safari\/537.36","timestamp":1517499194733,"method":"GET","request_body_len":0,"uri":"\/api\/v1\/clusters\/metron_cluster\/services\/KAFKA\/components\/KAFKA_BROKER?fields=metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsBytesInPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsBytesOutPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/BrokerTopicMetrics\/AllTopicsMessagesInPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/controller\/KafkaController\/ActiveControllerCount[1484165330,1484168930,15],metrics\/kafka\/controller\/ControllerStats\/LeaderElectionRateAndTimeMs\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/controller\/ControllerStats\/UncleanLeaderElectionsPerSec\/1MinuteRate[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaFetcherManager\/Replica-MaxLag[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/PartitionCount[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/UnderReplicatedPartitions[1484165330,1484168930,15],metrics\/kafka\/server\/ReplicaManager\/LeaderCount[1484165330,1484168930,15]&format=null_padding&_=1484168930776","tags":[],"source.type":"bro","adapter.geoadapter.end.ts":"1517499201209","referrer":"http:\/\/node1:8080\/","threatintelsplitterbolt.splitter.end.ts":"1517499201215","adapter.threatinteladapter.begin.ts":"1517499201217","ip_src_port":50451,"guid":"b62fe444-82fb-46a4-8c4a-5cfc248bee41","response_body_len":0}
+{"adapter.threatinteladapter.end.ts":"1517499201385","bro_timestamp":"1517499194.511788","status_code":200,"ip_dst_port":80,"enrichmentsplitterbolt.splitter.end.ts":"1517499201203","enrichments.geo.ip_dst_addr.city":"Strasbourg","enrichments.geo.ip_dst_addr.latitude":"48.5839","enrichmentsplitterbolt.splitter.begin.ts":"1517499201203","adapter.hostfromjsonlistadapter.end.ts":"1517499201207","enrichments.geo.ip_dst_addr.country":"FR","enrichments.geo.ip_dst_addr.locID":"2973783","adapter.geoadapter.begin.ts":"1517499201209","enrichments.geo.ip_dst_addr.postalCode":"67100","uid":"CRGLdEasAJUDL8Tu4","resp_mime_types":["application\/x-shockwave-flash"],"trans_depth":1,"protocol":"http","original_string":"HTTP | id.orig_p:49185 status_code:200 method:GET request_body_len:0 id.resp_p:80 uri:\/ tags:[] uid:CRGLdEasAJUDL8Tu4 referrer:http:\/\/va872g.g90e1h.b8.642b63u.j985a2.v33e.37.pa269cc.e8mfzdgrf7g0.groupprograms.in\/?285a4d4e4e5a4d4d4649584c5d43064b4745 resp_mime_types:[\"application\\\/x-shockwave-flash\"] trans_depth:1 host:ubb67.3c147o.u806a4.w07d919.o5f.f1.b80w.r0faf9.e8mfzdgrf7g0.groupprograms.in status_msg:OK id.orig_h:192.168.138.158 response_body_len:8973 user_agent:Mozilla\/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0) ts:1517499194.511788 id.resp_h:62.75.195.236 resp_fuids:[\"FHMpUl2B1lUkpzZoQi\"]","ip_dst_addr":"62.75.195.236","threatinteljoinbolt.joiner.ts":"1517499201387","host":"ubb67.3c147o.u806a4.w07d919.o5f.f1.b80w.r0faf9.e8mfzdgrf7g0.groupprograms.in","enrichmentjoinbolt.joiner.ts":"1517499201213","adapter.hostfromjsonlistadapter.begin.ts":"1517499201207","threatintelsplitterbolt.splitter.begin.ts":"1517499201215","enrichments.geo.ip_dst_addr.longitude":"7.7455","ip_src_addr":"192.168.138.158","user_agent":"Mozilla\/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)","resp_fuids":["FHMpUl2B1lUkpzZoQi"],"timestamp":1517499194511,"method":"GET","request_body_len":0,"uri":"\/","tags":[],"source.type":"bro","adapter.geoadapter.end.ts":"1517499201210","referrer":"http:\/\/va872g.g90e1h.b8.642b63u.j985a2.v33e.37.pa269cc.e8mfzdgrf7g0.groupprograms.in\/?285a4d4e4e5a4d4d4649584c5d43064b4745","threatintelsplitterbolt.splitter.end.ts":"1517499201215","adapter.threatinteladapter.begin.ts":"1517499201357","ip_src_port":49185,"enrichments.geo.ip_dst_addr.location_point":"48.5839,7.7455","status_msg":"OK","guid":"04c670c2-417e-4fd5-aff6-3dd55847d3e2","response_body_len":8973}
+{"adapter.threatinteladapter.end.ts":"1517499201399","bro_timestamp":"1517499194.20478","status_code":404,"ip_dst_port":80,"enrichmentsplitterbolt.splitter.end.ts":"1517499201203","enrichments.geo.ip_dst_addr.city":"Phoenix","enrichments.geo.ip_dst_addr.latitude":"33.4499","enrichmentsplitterbolt.splitter.begin.ts":"1517499201203","adapter.hostfromjsonlistadapter.end.ts":"1517499201207","enrichments.geo.ip_dst_addr.country":"US","enrichments.geo.ip_dst_addr.locID":"5308655","adapter.geoadapter.begin.ts":"1517499201210","enrichments.geo.ip_dst_addr.postalCode":"85004","uid":"CgI9Lp32cTchxqp8Wk","resp_mime_types":["text\/html"],"trans_depth":1,"protocol":"http","original_string":"HTTP | id.orig_p:49199 status_code:404 method:POST request_body_len:96 id.resp_p:80 orig_mime_types:[\"text\\\/plain\"] uri:\/wp-content\/themes\/twentyfifteen\/img5.php?l=8r1gf1b2t1kuq42 tags:[] uid:CgI9Lp32cTchxqp8Wk resp_mime_types:[\"text\\\/html\"] trans_depth:1 orig_fuids:[\"FDpZNy3tiCh1cjvs19\"] host:runlove.us status_msg:Not Found id.orig_h:192.168.138.158 response_body_len:357 user_agent:Mozilla\/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0) ts:1517499194.20478 id.resp_h:204.152.254.221 resp_fuids:[\"FCCDfF1umBiOBkbAl3\"]","ip_dst_addr":"204.152.254.221","threatinteljoinbolt.joiner.ts":"1517499201401","enrichments.geo.ip_dst_addr.dmaCode":"753","host":"runlove.us","enrichmentjoinbolt.joiner.ts":"1517499201273","adapter.hostfromjsonlistadapter.begin.ts":"1517499201207","threatintelsplitterbolt.splitter.begin.ts":"1517499201276","enrichments.geo.ip_dst_addr.longitude":"-112.0712","ip_src_addr":"192.168.138.158","user_agent":"Mozilla\/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)","resp_fuids":["FCCDfF1umBiOBkbAl3"],"timestamp":1517499194204,"method":"POST","request_body_len":96,"orig_mime_types":["text\/plain"],"uri":"\/wp-content\/themes\/twentyfifteen\/img5.php?l=8r1gf1b2t1kuq42","tags":[],"source.type":"bro","adapter.geoadapter.end.ts":"1517499201270","threatintelsplitterbolt.splitter.end.ts":"1517499201276","adapter.threatinteladapter.begin.ts":"1517499201385","orig_fuids":["FDpZNy3tiCh1cjvs19"],"ip_src_port":49199,"enrichments.geo.ip_dst_addr.location_point":"33.4499,-112.0712","status_msg":"Not Found","guid":"e78f4fbd-1728-4f5d-814a-588998653cc5","response_body_len":357}
+{"adapter.threatinteladapter.end.ts":"1517499201399","bro_timestamp":"1517499194.548579","status_code":200,"ip_dst_port":80,"enrichmentsplitterbolt.splitter.end.ts":"1517499201203","enrichments.geo.ip_dst_addr.city":"Strasbourg","enrichments.geo.ip_dst_addr.latitude":"48.5839","enrichmentsplitterbolt.splitter.begin.ts":"1517499201203","adapter.hostfromjsonlistadapter.end.ts":"1517499201207","enrichments.geo.ip_dst_addr.country":"FR","enrichments.geo.ip_dst_addr.locID":"2973783","adapter.geoadapter.begin.ts":"1517499201270","enrichments.geo.ip_dst_addr.postalCode":"67100","uid":"CMoJLQHEghS3LbRW5","trans_depth":1,"protocol":"http","original_string":"HTTP | id.orig_p:49190 status_code:200 method:GET request_body_len:0 id.resp_p:80 uri:\/?b2566564b3ba1a38e61c83957a7dbcd5 tags:[] uid:CMoJLQHEghS3LbRW5 trans_depth:1 host:62.75.195.236 status_msg:OK id.orig_h:192.168.138.158 response_body_len:0 user_agent:Mozilla\/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0) ts:1517499194.548579 id.resp_h:62.75.195.236","ip_dst_addr":"62.75.195.236","threatinteljoinbolt.joiner.ts":"1517499201401","host":"62.75.195.236","enrichmentjoinbolt.joiner.ts":"1517499201273","adapter.hostfromjsonlistadapter.begin.ts":"1517499201207","threatintelsplitterbolt.splitter.begin.ts":"1517499201276","enrichments.geo.ip_dst_addr.longitude":"7.7455","ip_src_addr":"192.168.138.158","user_agent":"Mozilla\/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident\/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)","timestamp":1517499194548,"method":"GET","request_body_len":0,"uri":"\/?b2566564b3ba1a38e61c83957a7dbcd5","tags":[],"source.type":"bro","adapter.geoadapter.end.ts":"1517499201270","threatintelsplitterbolt.splitter.end.ts":"1517499201276","adapter.threatinteladapter.begin.ts":"1517499201399","ip_src_port":49190,"enrichments.geo.ip_dst_addr.location_point":"48.5839,7.7455","status_msg":"OK","guid":"8fbfb4df-07f4-48cf-aa0b-6dd491d765d4","response_body_len":0}
+{"adapter.threatinteladapter.end.ts":"1517499201456","qclass_name":"qclass-32769","bro_timestamp":"1517499194.746276","qtype_name":"PTR","ip_dst_port":5353,"enrichmentsplitterbolt.splitter.end.ts":"1517499201204","qtype":12,"rejected":false,"enrichmentsplitterbolt.splitter.begin.ts":"1517499201204","adapter.hostfromjsonlistadapter.end.ts":"1517499201207","trans_id":0,"adapter.geoadapter.begin.ts":"1517499201270","uid":"Cqfoel1A3zgfxBLO58","protocol":"dns","original_string":"DNS | AA:false qclass_name:qclass-32769 id.orig_p:5353 qtype_name:PTR qtype:12 rejected:false id.resp_p:5353 query:_googlecast._tcp.local trans_id:0 TC:false RA:false uid:Cqfoel1A3zgfxBLO58 RD:false proto:udp id.orig_h:192.168.66.1 Z:0 qclass:32769 ts:1517499194.746276 id.resp_h:224.0.0.251","ip_dst_addr":"224.0.0.251","threatinteljoinbolt.joiner.ts":"1517499201459","enrichmentjoinbolt.joiner.ts":"1517499201274","adapter.hostfromjsonlistadapter.begin.ts":"1517499201207","threatintelsplitterbolt.splitter.begin.ts":"1517499201276","Z":0,"ip_src_addr":"192.168.66.1","qclass":32769,"timestamp":1517499194746,"AA":false,"query":"_googlecast._tcp.local","TC":false,"RA":false,"source.type":"bro","adapter.geoadapter.end.ts":"1517499201270","RD":false,"threatintelsplitterbolt.splitter.end.ts":"1517499201276","adapter.threatinteladapter.begin.ts":"1517499201399","ip_src_port":5353,"proto":"udp","guid":"77f3743d-b931-4022-bdbb-cf22e1d45af3"}
+{"adapter.threatinteladapter.end.ts":"1528192727455","bro_timestamp":"1402307733.473","enrichments.geo.ip_src_addr.longitude":"-118.4041","enrichmentsplitterbolt.splitter.end.ts":"1528192727437","enrichments.geo.ip_dst_addr.city":"Richardson","enrichments.geo.ip_dst_addr.country":"US","enrichments.geo.ip_dst_addr.locID":"4722625","enrichments.geo.ip_src_addr.city":"Los Angeles","resp_mime_types":["text\/html","text\/xml"],"protocol":"http","original_string":"HTTP | id.orig_p:58808 status_code:200 method:GET request_body_len:0 id.resp_p:80 uri:\/ tags:[\"a\",\"b\",\"c\"] uid:CTo78A11g7CYbbOHvj resp_mime_types:[\"text\\\/html\",\"text\\\/xml\"] trans_depth:1 host:www.cisco.com status_msg:OK id.orig_h:192.249.113.37 response_body_len:25523 user_agent:curl\/7.22.0 (x86_64-pc-linux-gnu) libcurl\/7.22.0 OpenSSL\/1.0.1 zlib\/1.2.3.4 libidn\/1.23 librtmp\/2.3 ts:1402307733.473 id.resp_h:72.163.4.161 resp_fuids:[\"FJDyMC15lxUn5ngPfd\",\"GJDyMC15lxUn5ngPfe\"]","enrichments.geo.ip_dst_addr.dmaCode":"623","host":"www.cisco.com","enrichmentjoinbolt.joiner.ts":"1528192727444","adapter.hostfromjsonlistadapter.begin.ts":"1528192727439","enrichments.geo.ip_src_addr.dmaCode":"803","method":"GET","tags":["a","b","c"],"adapter.geoadapter.end.ts":"1528192727442","adapter.threatinteladapter.begin.ts":"1528192727455","enrichments.geo.ip_dst_addr.location_point":"32.9513,-96.7154","guid":"68731e82-6a23-4d5c-97f4-9701490a99dc","response_body_len":25523,"status_code":200,"ip_dst_port":80,"enrichments.geo.ip_src_addr.location_point":"33.9571,-118.4041","enrichments.geo.ip_dst_addr.latitude":"32.9513","enrichmentsplitterbolt.splitter.begin.ts":"1528192727437","adapter.hostfromjsonlistadapter.end.ts":"1528192727439","adapter.geoadapter.begin.ts":"1528192727442","enrichments.geo.ip_dst_addr.postalCode":"75081","enrichments.geo.ip_src_addr.postalCode":"90045","uid":"CTo78A11g7CYbbOHvj","trans_depth":1,"ip_dst_addr":"72.163.4.161","enrichments.geo.ip_src_addr.latitude":"33.9571","threatinteljoinbolt.joiner.ts":"1528192727458","threatintelsplitterbolt.splitter.begin.ts":"1528192727446","enrichments.geo.ip_src_addr.locID":"5368361","enrichments.geo.ip_dst_addr.longitude":"-96.7154","ip_src_addr":"192.249.113.37","user_agent":"curl\/7.22.0 (x86_64-pc-linux-gnu) libcurl\/7.22.0 OpenSSL\/1.0.1 zlib\/1.2.3.4 libidn\/1.23 librtmp\/2.3","enrichments.geo.ip_src_addr.country":"US","resp_fuids":["FJDyMC15lxUn5ngPfd","GJDyMC15lxUn5ngPfe"],"timestamp":1402307733473,"request_body_len":0,"uri":"\/","source.type":"bro","threatintelsplitterbolt.splitter.end.ts":"1528192727446","ip_src_port":58808,"status_msg":"OK"}
+{"TTLs":[3600.0,289.0,14.0],"adapter.threatinteladapter.end.ts":"1528192727455","qclass_name":"C_INTERNET","bro_timestamp":"1402308259.609","qtype_name":"AAAA","ip_dst_port":53,"enrichmentsplitterbolt.splitter.end.ts":"1528192727437","qtype":28,"rejected":false,"enrichments.geo.ip_dst_addr.city":"Almere Stad","enrichments.geo.ip_dst_addr.latitude":"52.3881","answers":["www.cisco.com.akadns.net","origin-www.cisco.com","2001:420:1201:2::a"],"enrichmentsplitterbolt.splitter.begin.ts":"1528192727437","adapter.hostfromjsonlistadapter.end.ts":"1528192727439","enrichments.geo.ip_dst_addr.country":"NL","enrichments.geo.ip_dst_addr.locID":"2759879","trans_id":62418,"adapter.geoadapter.begin.ts":"1528192727442","enrichments.geo.ip_dst_addr.postalCode":"1317","uid":"CuJT272SKaJSuqO0Ia","protocol":"dns","original_string":"DNS | AA:true TTLs:[3600.0,289.0,14.0] qclass_name:C_INTERNET id.orig_p:33976 qtype_name:AAAA qtype:28 rejected:false id.resp_p:53 query:www.cisco.com answers:[\"www.cisco.com.akadns.net\",\"origin-www.cisco.com\",\"2001:420:1201:2::a\"] trans_id:62418 rcode:0 rcode_name:NOERROR TC:false RA:true uid:CuJT272SKaJSuqO0Ia RD:true proto:udp id.orig_h:10.122.196.204 Z:0 qclass:1 ts:1402308259.609 id.resp_h:144.254.71.184","ip_dst_addr":"144.254.71.184","threatinteljoinbolt.joiner.ts":"1528192727458","enrichmentjoinbolt.joiner.ts":"1528192727445","adapter.hostfromjsonlistadapter.begin.ts":"1528192727439","threatintelsplitterbolt.splitter.begin.ts":"1528192727446","Z":0,"enrichments.geo.ip_dst_addr.longitude":"5.2354","ip_src_addr":"10.122.196.204","qclass":1,"timestamp":1402308259609,"AA":true,"query":"www.cisco.com","rcode":0,"rcode_name":"NOERROR","TC":false,"RA":true,"source.type":"bro","adapter.geoadapter.end.ts":"1528192727442","RD":true,"threatintelsplitterbolt.splitter.end.ts":"1528192727446","adapter.threatinteladapter.begin.ts":"1528192727455","ip_src_port":33976,"proto":"udp","enrichments.geo.ip_dst_addr.location_point":"52.3881,5.2354","guid":"d320cb1c-e4dc-4b1d-9650-75bcf2c9e371"}
+{"adapter.threatinteladapter.end.ts":"1528192727455","bro_timestamp":"1216706983.387664","timedout":true,"enrichments.geo.ip_src_addr.longitude":"-118.244","enrichmentsplitterbolt.splitter.end.ts":"1528192727438","enrichments.geo.ip_src_addr.location_point":"34.0544,-118.244","enrichmentsplitterbolt.splitter.begin.ts":"1528192727438","adapter.hostfromjsonlistadapter.end.ts":"1528192727440","source":"HTTP","adapter.geoadapter.begin.ts":"1528192727442","duration":30.701792,"protocol":"files","original_string":"FILES | timedout:true rx_hosts:[\"192.168.15.4\",\"192.168.15.5\"] source:HTTP is_orig:false tx_hosts:[\"216.113.185.92\",\"216.113.185.93\"] overflow_bytes:0 duration:30.701792 depth:0 analyzers:[\"MD5\",\"SHA1\"] fuid:FnEYba9VPOcC41c1 conn_uids:[\"CLWqoN1IA9MB8Ru9i3\",\"DLWqoN1IA9MB8Ru9i4\"] seen_bytes:0 missing_bytes:3384 ts:1216706983.387664","ip_dst_addr":"192.168.15.4","analyzers":["MD5","SHA1"],"enrichments.geo.ip_src_addr.latitude":"34.0544","threatinteljoinbolt.joiner.ts":"1528192727458","enrichmentjoinbolt.joiner.ts":"1528192727445","adapter.hostfromjsonlistadapter.begin.ts":"1528192727440","threatintelsplitterbolt.splitter.begin.ts":"1528192727446","fuid":"FnEYba9VPOcC41c1","seen_bytes":0,"missing_bytes":3384,"ip_src_addr":"216.113.185.92","enrichments.geo.ip_src_addr.country":"US","timestamp":1216706983387,"is_orig":false,"overflow_bytes":0,"source.type":"bro","adapter.geoadapter.end.ts":"1528192727442","depth":0,"threatintelsplitterbolt.splitter.end.ts":"1528192727446","adapter.threatinteladapter.begin.ts":"1528192727455","guid":"558bb655-3867-439b-b26d-13aa77d1b3ec","conn_uids":["CLWqoN1IA9MB8Ru9i3","DLWqoN1IA9MB8Ru9i4"]}
+{"adapter.threatinteladapter.end.ts":"1528192727455","bro_timestamp":"1440447880.931272","resp_pkts":1,"ip_dst_port":1812,"enrichmentsplitterbolt.splitter.end.ts":"1528192727439","enrichmentsplitterbolt.splitter.begin.ts":"1528192727439","adapter.hostfromjsonlistadapter.end.ts":"1528192727441","adapter.geoadapter.begin.ts":"1528192727442","duration":1.001459,"uid":"CWxtRHnBTbldHnmGh","protocol":"conn","original_string":"CONN | id.orig_p:52178 resp_pkts:1 resp_ip_bytes:48 orig_bytes:75 id.resp_p:1812 orig_ip_bytes:103 orig_pkts:1 missed_bytes:0 history:Dd tunnel_parents:[\"a\",\"b\",\"c\"] duration:1.001459 uid:CWxtRHnBTbldHnmGh resp_bytes:20 service:radius conn_state:SF proto:udp id.orig_h:127.0.0.1 ts:1440447880.931272 id.resp_h:127.0.0.1","ip_dst_addr":"127.0.0.1","threatinteljoinbolt.joiner.ts":"1528192727458","conn_state":"SF","enrichmentjoinbolt.joiner.ts":"1528192727445","adapter.hostfromjsonlistadapter.begin.ts":"1528192727441","threatintelsplitterbolt.splitter.begin.ts":"1528192727446","ip_src_addr":"127.0.0.1","timestamp":1440447880931,"resp_ip_bytes":48,"orig_bytes":75,"orig_ip_bytes":103,"orig_pkts":1,"missed_bytes":0,"history":"Dd","tunnel_parents":["a","b","c"],"source.type":"bro","adapter.geoadapter.end.ts":"1528192727442","resp_bytes":20,"threatintelsplitterbolt.splitter.end.ts":"1528192727446","adapter.threatinteladapter.begin.ts":"1528192727455","ip_src_port":52178,"service":"radius","proto":"udp","guid":"d599c0a8-46f5-44d5-a504-409790d7468a"}
+{"adapter.threatinteladapter.end.ts":"1528192727458","bro_timestamp":"1258568036.57884","ip_dst_port":25,"enrichmentsplitterbolt.splitter.end.ts":"1528192727442","enrichmentsplitterbolt.splitter.begin.ts":"1528192727441","adapter.hostfromjsonlistadapter.end.ts":"1528192727444","adapter.geoadapter.begin.ts":"1528192727444","uid":"ChR6254RrWbrxiGsd7","path":["192.168.1.1","192.168.1.105"],"trans_depth":1,"protocol":"smtp","original_string":"SMTP | id.orig_p:49353 id.resp_p:25 helo:M57Terry uid:ChR6254RrWbrxiGsd7 path:[\"192.168.1.1\",\"192.168.1.105\"] trans_depth:1 is_webmail:false last_reply:220 2.0.0 Ready to start TLS id.orig_h:192.168.1.105 tls:true fuids:[\"a\",\"b\",\"c\"] ts:1258568036.57884 id.resp_h:192.168.1.1","ip_dst_addr":"192.168.1.1","is_webmail":false,"threatinteljoinbolt.joiner.ts":"1528192727460","enrichmentjoinbolt.joiner.ts":"1528192727447","adapter.hostfromjsonlistadapter.begin.ts":"1528192727444","threatintelsplitterbolt.splitter.begin.ts":"1528192727455","fuids":["a","b","c"],"ip_src_addr":"192.168.1.105","timestamp":1258568036578,"source.type":"bro","helo":"M57Terry","adapter.geoadapter.end.ts":"1528192727444","threatintelsplitterbolt.splitter.end.ts":"1528192727455","adapter.threatinteladapter.begin.ts":"1528192727457","ip_src_port":49353,"last_reply":"220 2.0.0 Ready to start TLS","guid":"c1ca10a2-615b-4038-be57-5c9790743477","tls":true}
+{"adapter.threatinteladapter.end.ts":"1528192727458","server_name":"login.live.com","bro_timestamp":"1216706999.444925","ip_dst_port":443,"enrichmentsplitterbolt.splitter.end.ts":"1528192727442","enrichments.geo.ip_dst_addr.city":"Redmond","subject":"CN=login.live.com,OU=MSN-Passport,O=Microsoft Corporation,street=One Microsoft Way,L=Redmond,ST=Washington,postalCode=98052,C=US,serialNumber=600413485,businessCategory=V1.0\\, Clause 5.(b),1.3.6.1.4.1.311.60.2.1.2=#130A57617368696E67746F6E,1.3.6.1.4.1.311.60.2.1.3=#13025553","enrichments.geo.ip_dst_addr.latitude":"47.6801","cert_chain_fuids":["FkYBO41LPAXxh44KFk","FPrzYN1SuBqHflXZId","FZ71xF13r5XVSam1z1"],"enrichmentsplitterbolt.splitter.begin.ts":"1528192727442","adapter.hostfromjsonlistadapter.end.ts":"1528192727444","enrichments.geo.ip_dst_addr.country":"US","enrichments.geo.ip_dst_addr.locID":"5808079","adapter.geoadapter.begin.ts":"1528192727444","issuer":"CN=VeriSign Class 3 Extended Validation SSL CA,OU=Terms of use at https:\/\/www.verisign.com\/rpa (c)06,OU=VeriSign Trust Network,O=VeriSign\\, Inc.,C=US","enrichments.geo.ip_dst_addr.postalCode":"98052","uid":"CVrS2IBW8gukBClA8","protocol":"ssl","original_string":"SSL | cipher:TLS_RSA_WITH_RC4_128_MD5 established:true server_name:login.live.com id.orig_p:36532 client_cert_chain_fuids:[\"FkYBO41LPAXxh44KFk\",\"FPrzYN1SuBqHflXZId\",\"FZ71xF13r5XVSam1z1\"] subject:CN=login.live.com,OU=MSN-Passport,O=Microsoft Corporation,street=One Microsoft Way,L=Redmond,ST=Washington,postalCode=98052,C=US,serialNumber=600413485,businessCategory=V1.0\\, Clause 5.(b),1.3.6.1.4.1.311.60.2.1.2=#130A57617368696E67746F6E,1.3.6.1.4.1.311.60.2.1.3=#13025553 id.resp_p:443 cert_chain_fuids:[\"FkYBO41LPAXxh44KFk\",\"FPrzYN1SuBqHflXZId\",\"FZ71xF13r5XVSam1z1\"] version:TLSv10 issuer:CN=VeriSign Class 3 Extended Validation SSL CA,OU=Terms of use at https:\/\/www.verisign.com\/rpa (c)06,OU=VeriSign Trust Network,O=VeriSign\\, Inc.,C=US uid:CVrS2IBW8gukBClA8 id.orig_h:192.168.15.4 validation_status:unable to get local issuer certificate resumed:false ts:1216706999.444925 id.resp_h:65.54.186.47","ip_dst_addr":"65.54.186.47","threatinteljoinbolt.joiner.ts":"1528192727460","enrichments.geo.ip_dst_addr.dmaCode":"819","enrichmentjoinbolt.joiner.ts":"1528192727447","adapter.hostfromjsonlistadapter.begin.ts":"1528192727444","threatintelsplitterbolt.splitter.begin.ts":"1528192727455","enrichments.geo.ip_dst_addr.longitude":"-122.1206","ip_src_addr":"192.168.15.4","timestamp":1216706999444,"cipher":"TLS_RSA_WITH_RC4_128_MD5","established":true,"client_cert_chain_fuids":["FkYBO41LPAXxh44KFk","FPrzYN1SuBqHflXZId","FZ71xF13r5XVSam1z1"],"version":"TLSv10","source.type":"bro","adapter.geoadapter.end.ts":"1528192727444","threatintelsplitterbolt.splitter.end.ts":"1528192727455","adapter.threatinteladapter.begin.ts":"1528192727458","ip_src_port":36532,"enrichments.geo.ip_dst_addr.location_point":"47.6801,-122.1206","guid":"0c5b0898-dbcc-4ac3-a56c-44ade0774e22","validation_status":"unable to get local issuer certificate","resumed":false}
+{"msg":"SSL certificate validation failed with (unable to get local issuer certificate)","suppress_for":3600.0,"adapter.threatinteladapter.end.ts":"1528192727459","note":"SSL::Invalid_Server_Cert","sub":"CN=www.google.com,O=Google Inc,L=Mountain View,ST=California,C=US","bro_timestamp":"1216706377.196728","dst":"74.125.19.104","ip_dst_port":443,"enrichmentsplitterbolt.splitter.end.ts":"1528192727443","enrichments.geo.ip_dst_addr.city":"Morganton","enrichments.geo.ip_dst_addr.latitude":"35.7454","dropped":false,"enrichmentsplitterbolt.splitter.begin.ts":"1528192727443","adapter.hostfromjsonlistadapter.end.ts":"1528192727445","enrichments.geo.ip_dst_addr.country":"US","enrichments.geo.ip_dst_addr.locID":"4480219","adapter.geoadapter.begin.ts":"1528192727445","enrichments.geo.ip_dst_addr.postalCode":"28680","uid":"CNHQmp1mNiZHdAf5Ce","protocol":"notice","original_string":"NOTICE | msg:SSL certificate validation failed with (unable to get local issuer certificate) suppress_for:3600.0 note:SSL::Invalid_Server_Cert sub:CN=www.google.com,O=Google Inc,L=Mountain View,ST=California,C=US id.orig_p:35736 dst:74.125.19.104 src:192.168.15.4 id.resp_p:443 dropped:false peer_descr:bro p:443 uid:CNHQmp1mNiZHdAf5Ce proto:tcp id.orig_h:192.168.15.4 actions:[\"Notice::ACTION_LOG\",\"Notice::ACTION_ALARM\"] ts:1216706377.196728 id.resp_h:74.125.19.104","ip_dst_addr":"74.125.19.104","threatinteljoinbolt.joiner.ts":"1528192727461","enrichments.geo.ip_dst_addr.dmaCode":"517","enrichmentjoinbolt.joiner.ts":"1528192727454","adapter.hostfromjsonlistadapter.begin.ts":"1528192727445","threatintelsplitterbolt.splitter.begin.ts":"1528192727456","enrichments.geo.ip_dst_addr.longitude":"-81.6848","ip_src_addr":"192.168.15.4","timestamp":1216706377196,"src":"192.168.15.4","peer_descr":"bro","source.type":"bro","p":443,"adapter.geoadapter.end.ts":"1528192727445","threatintelsplitterbolt.splitter.end.ts":"1528192727456","adapter.threatinteladapter.begin.ts":"1528192727459","ip_src_port":35736,"proto":"tcp","enrichments.geo.ip_dst_addr.location_point":"35.7454,-81.6848","guid":"79162baa-4798-4a5f-aae5-5c225a6a2bad","actions":["Notice::ACTION_LOG","Notice::ACTION_ALARM"]}
+{"adapter.threatinteladapter.end.ts":"1528192727460","bro_timestamp":"1216698600.338338","ip_dst_port":10000,"enrichmentsplitterbolt.splitter.end.ts":"1528192727444","enrichments.geo.ip_dst_addr.city":"Holmdel","enrichments.geo.ip_dst_addr.latitude":"40.3754","enrichmentsplitterbolt.splitter.begin.ts":"1528192727444","adapter.hostfromjsonlistadapter.end.ts":"1528192727446","enrichments.geo.ip_dst_addr.country":"US","enrichments.geo.ip_dst_addr.locID":"5099193","adapter.geoadapter.begin.ts":"1528192727446","response_path":["SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000"],"enrichments.geo.ip_dst_addr.postalCode":"07733","uid":"Cl2G2m3bdeE8F9I9ei","trans_depth":0,"protocol":"sip","original_string":"SIP | id.orig_p:1033 method:REGISTER request_body_len:0 id.resp_p:10000 response_path:[\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\"] uri:sip:t.voncp.com:10000 call_id:7757a70e218b95730dd2daeaac7d20b1@192.168.1.64 uid:Cl2G2m3bdeE8F9I9ei trans_depth:0 request_from:\"16178766111\" <sip:16178766111@t.voncp.com:10000> request_path:[\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\",\"SIP\\\/2.0\\\/UDP 192.168.1.64:10000\"] id.orig_h:192.168.1.64 request_to:\"16178766111\" <sip:16178766111@t.voncp.com:10000> seq:1761527957 REGISTER user_agent:VDV21 001DD92E4F61 2.8.1_1.4.7 LwooEk3GCD\/bcm001DD92E4F61.xml ts:1216698600.338338 id.resp_h:69.59.232.120","ip_dst_addr":"69.59.232.120","threatinteljoinbolt.joiner.ts":"1528192727463","enrichments.geo.ip_dst_addr.dmaCode":"501","enrichmentjoinbolt.joiner.ts":"1528192727455","adapter.hostfromjsonlistadapter.begin.ts":"1528192727446","threatintelsplitterbolt.splitter.begin.ts":"1528192727458","enrichments.geo.ip_dst_addr.longitude":"-74.1712","request_to":"\"16178766111\" <sip:16178766111@t.voncp.com:10000>","ip_src_addr":"192.168.1.64","seq":"1761527957 REGISTER","user_agent":"VDV21 001DD92E4F61 2.8.1_1.4.7 LwooEk3GCD\/bcm001DD92E4F61.xml","timestamp":1216698600338,"method":"REGISTER","request_body_len":0,"uri":"sip:t.voncp.com:10000","call_id":"7757a70e218b95730dd2daeaac7d20b1@192.168.1.64","source.type":"bro","adapter.geoadapter.end.ts":"1528192727446","request_from":"\"16178766111\" <sip:16178766111@t.voncp.com:10000>","threatintelsplitterbolt.splitter.end.ts":"1528192727458","adapter.threatinteladapter.begin.ts":"1528192727460","ip_src_port":1033,"enrichments.geo.ip_dst_addr.location_point":"40.3754,-74.1712","request_path":["SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000","SIP\/2.0\/UDP 192.168.1.64:10000"],"guid":"403f7e81-12d9-4a0c-a846-fa11b81108fe"}
diff --git a/metron-platform/metron-solr/src/test/resources/example_data/error b/metron-platform/metron-solr/src/test/resources/example_data/error
new file mode 100644
index 0000000..58802be
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/example_data/error
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{"guid" : "0000000","exception":"java.lang.IllegalStateException: Grok parser Error: Grok statement produced a null message. Original message was: dummy and the parsed message was: {} . Check the pattern at: \/patterns\/yaf on dummy","failed_sensor_type":"yaf","stack":"java.lang.IllegalStateException: Grok parser Error: Grok statement produced a null message. Original message was: dummy and the parsed message was: {} . Check the pattern at: \/patterns\/yaf on dummy\n\tat org.apache.metron.parsers.GrokParser.parse(GrokParser.java:164)\n\tat org.apache.metron.parsers.interfaces.MessageParser.parseOptional(MessageParser.java:45)\n\tat org.apache.metron.parsers.bolt.ParserBolt.execute(ParserBolt.java:175)\n\tat org.apache.storm.daemon.executor$fn__6573$tuple_action_fn__6575.invoke(executor.clj:734)\n\tat org.apache.storm.daemon.executor$mk_task_receiver$fn__6494.invoke(executor.clj:466)\n\tat org.apache.storm.disruptor$clojure_handler$reify__6007.onEvent(disruptor.clj:40)\n\tat org.apache.storm.utils.DisruptorQueue.consumeBatchToCursor(DisruptorQueue.java:451)\n\tat org.apache.storm.utils.DisruptorQueue.consumeBatchWhenAvailable(DisruptorQueue.java:430)\n\tat org.apache.storm.disruptor$consume_batch_when_available.invoke(disruptor.clj:73)\n\tat org.apache.storm.daemon.executor$fn__6573$fn__6586$fn__6639.invoke(executor.clj:853)\n\tat org.apache.storm.util$async_loop$fn__554.invoke(util.clj:484)\n\tat clojure.lang.AFn.run(AFn.java:22)\n\tat java.lang.Thread.run(Thread.java:745)\nCaused by: java.lang.RuntimeException: Grok statement produced a null message. Original message was: dummy and the parsed message was: {} . Check the pattern at: \/patterns\/yaf\n\tat org.apache.metron.parsers.GrokParser.parse(GrokParser.java:144)\n\t... 12 more\n","hostname":"node1","raw_message":"dummy","error_hash":"b5a2c96250612366ea272ffac6d9744aaf4b45aacd96aa7cfcb931ee3b558259","error_type":"parser_error","message":"Grok parser Error: Grok statement produced a null message. Original message was: dummy and the parsed message was: {} . Check the pattern at: \/patterns\/yaf on dummy","source.type":"error","timestamp":1517606359312}
diff --git a/metron-platform/metron-solr/src/test/resources/example_data/snort b/metron-platform/metron-solr/src/test/resources/example_data/snort
new file mode 100644
index 0000000..eff1ba8
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/example_data/snort
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{"msg":"'snort test alert'","adapter.threatinteladapter.end.ts":"1517499195495","sig_rev":"0","ip_dst_port":"50183","enrichmentsplitterbolt.splitter.end.ts":"1517499192333","ethsrc":"08:00:27:E8:B0:7A","threat.triage.rules.0.comment":null,"tcpseq":"0x8DF4FA2F","threat.triage.score":10.0,"dgmlen":"52","enrichmentsplitterbolt.splitter.begin.ts":"1517499192195","adapter.hostfromjsonlistadapter.end.ts":"1517499192400","adapter.geoadapter.begin.ts":"1517499192446","tcpwindow":"0x1F5","threat.triage.rules.0.score":10,"tcpack":"0x8368306E","protocol":"TCP","ip_dst_addr":"192.168.66.1","original_string":"02\/01\/18-15:33:07.000000 ,1,999158,0,\"'snort test alert'\",TCP,192.168.66.121,8080,192.168.66.1,50183,08:00:27:E8:B0:7A,0A:00:27:00:00:00,0x42,***A****,0x8DF4FA2F,0x8368306E,,0x1F5,64,0,62260,52,53248,,,,","threatinteljoinbolt.joiner.ts":"1517499195528","enrichmentjoinbolt.joiner.ts":"1517499192965","threat.triage.rules.0.reason":null,"tos":"0","adapter.hostfromjsonlistadapter.begin.ts":"1517499192400","threatintelsplitterbolt.splitter.begin.ts":"1517499193330","id":"62260","ip_src_addr":"192.168.66.121","timestamp":1517499187000,"ethdst":"0A:00:27:00:00:00","threat.triage.rules.0.name":null,"is_alert":"true","ttl":"64","source.type":"snort","adapter.geoadapter.end.ts":"1517499192723","ethlen":"0x42","iplen":"53248","threatintelsplitterbolt.splitter.end.ts":"1517499193359","adapter.threatinteladapter.begin.ts":"1517499193366","ip_src_port":"8080","tcpflags":"***A****","guid":"b486ac73-6c5f-425c-92c3-5f2542b53c35","sig_id":"999158","sig_generator":"1"}
+{"msg":"'snort test alert'","adapter.threatinteladapter.end.ts":"1517499195797","enrichmentsplitterbolt.splitter.end.ts":"1517499192359","enrichments.geo.ip_dst_addr.city":"Strasbourg","threat.triage.rules.0.comment":null,"dgmlen":"353","enrichments.geo.ip_dst_addr.country":"FR","enrichments.geo.ip_dst_addr.locID":"2973783","tcpack":"0xB640F4","protocol":"TCP","original_string":"02\/01\/18-15:33:07.000000 ,1,999158,0,\"'snort test alert'\",TCP,192.168.138.158,49192,62.75.195.236,80,00:00:00:00:00:00,00:00:00:00:00:00,0x16F,***AP***,0xD57E2000,0xB640F4,,0xFAF0,128,0,2416,353,99332,,,,","enrichmentjoinbolt.joiner.ts":"1517499193236","adapter.hostfromjsonlistadapter.begin.ts":"1517499192452","id":"2416","adapter.geoadapter.end.ts":"1517499193234","ethlen":"0x16F","adapter.threatinteladapter.begin.ts":"1517499195496","enrichments.geo.ip_dst_addr.location_point":"48.5839,7.7455","tcpflags":"***AP***","guid":"27a11b7a-9ed2-4a49-b177-04acc30b69c5","sig_rev":"0","ip_dst_port":"80","ethsrc":"00:00:00:00:00:00","enrichments.geo.ip_dst_addr.latitude":"48.5839","tcpseq":"0xD57E2000","threat.triage.score":10.0,"enrichmentsplitterbolt.splitter.begin.ts":"1517499192359","adapter.hostfromjsonlistadapter.end.ts":"1517499192452","adapter.geoadapter.begin.ts":"1517499192723","tcpwindow":"0xFAF0","enrichments.geo.ip_dst_addr.postalCode":"67100","threat.triage.rules.0.score":10,"ip_dst_addr":"62.75.195.236","threatinteljoinbolt.joiner.ts":"1517499195801","threat.triage.rules.0.reason":null,"tos":"0","threatintelsplitterbolt.splitter.begin.ts":"1517499193359","enrichments.geo.ip_dst_addr.longitude":"7.7455","ip_src_addr":"192.168.138.158","timestamp":1517499187000,"ethdst":"00:00:00:00:00:00","threat.triage.rules.0.name":null,"is_alert":"true","ttl":"128","source.type":"snort","iplen":"99332","threatintelsplitterbolt.splitter.end.ts":"1517499193359","ip_src_port":"49192","sig_id":"999158","sig_generator":"1"}
+{"msg":"'snort test alert'","adapter.threatinteladapter.end.ts":"1517499196016","sig_rev":"0","ip_dst_port":"8080","enrichmentsplitterbolt.splitter.end.ts":"1517499192360","ethsrc":"0A:00:27:00:00:00","threat.triage.rules.0.comment":null,"tcpseq":"0xE6B38B18","threat.triage.score":10.0,"dgmlen":"52","enrichmentsplitterbolt.splitter.begin.ts":"1517499192360","adapter.hostfromjsonlistadapter.end.ts":"1517499192452","adapter.geoadapter.begin.ts":"1517499193234","tcpwindow":"0xFF2","threat.triage.rules.0.score":10,"tcpack":"0x79C2FA21","protocol":"TCP","ip_dst_addr":"192.168.66.121","original_string":"02\/01\/18-15:33:07.000000 ,1,999158,0,\"'snort test alert'\",TCP,192.168.66.1,50186,192.168.66.121,8080,0A:00:27:00:00:00,08:00:27:E8:B0:7A,0x42,***A****,0xE6B38B18,0x79C2FA21,,0xFF2,64,0,31478,52,53248,,,,","threatinteljoinbolt.joiner.ts":"1517499196019","enrichmentjoinbolt.joiner.ts":"1517499193238","threat.triage.rules.0.reason":null,"tos":"0","adapter.hostfromjsonlistadapter.begin.ts":"1517499192452","threatintelsplitterbolt.splitter.begin.ts":"1517499193359","id":"31478","ip_src_addr":"192.168.66.1","timestamp":1517499187000,"ethdst":"08:00:27:E8:B0:7A","threat.triage.rules.0.name":null,"is_alert":"true","ttl":"64","source.type":"snort","adapter.geoadapter.end.ts":"1517499193236","ethlen":"0x42","iplen":"53248","threatintelsplitterbolt.splitter.end.ts":"1517499193360","adapter.threatinteladapter.begin.ts":"1517499195797","ip_src_port":"50186","tcpflags":"***A****","guid":"50f8de4d-d3ef-4f31-b337-5ea67493ebe5","sig_id":"999158","sig_generator":"1"}
+{"msg":"'snort test alert'","adapter.threatinteladapter.end.ts":"1517499196016","enrichmentsplitterbolt.splitter.end.ts":"1517499192400","enrichments.geo.ip_dst_addr.city":"Strasbourg","threat.triage.rules.0.comment":null,"dgmlen":"40","enrichments.geo.ip_dst_addr.country":"FR","enrichments.geo.ip_dst_addr.locID":"2973783","tcpack":"0x7371702D","protocol":"TCP","original_string":"02\/01\/18-15:33:07.000000 ,1,999158,0,\"'snort test alert'\",TCP,192.168.138.158,49186,62.75.195.236,80,00:00:00:00:00:00,00:00:00:00:00:00,0x3C,***A****,0x516C475D,0x7371702D,,0xFAF0,128,0,2257,40,40960,,,,","enrichmentjoinbolt.joiner.ts":"1517499193239","adapter.hostfromjsonlistadapter.begin.ts":"1517499192452","id":"2257","adapter.geoadapter.end.ts":"1517499193236","ethlen":"0x3C","adapter.threatinteladapter.begin.ts":"1517499196016","enrichments.geo.ip_dst_addr.location_point":"48.5839,7.7455","tcpflags":"***A****","guid":"054ff2bb-4d29-4cfc-b225-fef7488b96a6","sig_rev":"0","ip_dst_port":"80","ethsrc":"00:00:00:00:00:00","enrichments.geo.ip_dst_addr.latitude":"48.5839","tcpseq":"0x516C475D","threat.triage.score":10.0,"enrichmentsplitterbolt.splitter.begin.ts":"1517499192369","adapter.hostfromjsonlistadapter.end.ts":"1517499192452","adapter.geoadapter.begin.ts":"1517499193236","tcpwindow":"0xFAF0","enrichments.geo.ip_dst_addr.postalCode":"67100","threat.triage.rules.0.score":10,"ip_dst_addr":"62.75.195.236","threatinteljoinbolt.joiner.ts":"1517499196020","threat.triage.rules.0.reason":null,"tos":"0","threatintelsplitterbolt.splitter.begin.ts":"1517499193360","enrichments.geo.ip_dst_addr.longitude":"7.7455","ip_src_addr":"192.168.138.158","timestamp":1517499187000,"ethdst":"00:00:00:00:00:00","threat.triage.rules.0.name":null,"is_alert":"true","ttl":"128","source.type":"snort","iplen":"40960","threatintelsplitterbolt.splitter.end.ts":"1517499193360","ip_src_port":"49186","sig_id":"999158","sig_generator":"1"}
+{"msg":"'snort test alert'","adapter.threatinteladapter.end.ts":"1517499196062","enrichments.geo.ip_src_addr.longitude":"7.7455","enrichmentsplitterbolt.splitter.end.ts":"1517499192448","threat.triage.rules.0.comment":null,"dgmlen":"1407","enrichments.geo.ip_src_addr.city":"Strasbourg","tcpack":"0x9DFB1927","protocol":"TCP","original_string":"02\/01\/18-15:33:07.000000 ,1,999158,0,\"'snort test alert'\",TCP,62.75.195.236,80,192.168.138.158,49189,00:00:00:00:00:00,00:00:00:00:00:00,0x58D,***AP***,0xF1BC1268,0x9DFB1927,,0xFAF0,128,0,1722,1407,130068,,,,","enrichmentjoinbolt.joiner.ts":"1517499193239","adapter.hostfromjsonlistadapter.begin.ts":"1517499192452","id":"1722","adapter.geoadapter.end.ts":"1517499193238","ethlen":"0x58D","adapter.threatinteladapter.begin.ts":"1517499196016","tcpflags":"***AP***","guid":"65366689-c232-46bf-a3ae-ad72ab560a70","sig_rev":"0","ip_dst_port":"49189","enrichments.geo.ip_src_addr.location_point":"48.5839,7.7455","ethsrc":"00:00:00:00:00:00","tcpseq":"0xF1BC1268","threat.triage.score":10.0,"enrichmentsplitterbolt.splitter.begin.ts":"1517499192448","adapter.hostfromjsonlistadapter.end.ts":"1517499192452","adapter.geoadapter.begin.ts":"1517499193236","tcpwindow":"0xFAF0","enrichments.geo.ip_src_addr.postalCode":"67100","threat.triage.rules.0.score":10,"ip_dst_addr":"192.168.138.158","enrichments.geo.ip_src_addr.latitude":"48.5839","threatinteljoinbolt.joiner.ts":"1517499196065","threat.triage.rules.0.reason":null,"tos":"0","threatintelsplitterbolt.splitter.begin.ts":"1517499193360","enrichments.geo.ip_src_addr.locID":"2973783","ip_src_addr":"62.75.195.236","enrichments.geo.ip_src_addr.country":"FR","timestamp":1517499187000,"ethdst":"00:00:00:00:00:00","threat.triage.rules.0.name":null,"is_alert":"true","ttl":"128","source.type":"snort","iplen":"130068","threatintelsplitterbolt.splitter.end.ts":"1517499193360","ip_src_port":"80","sig_id":"999158","sig_generator":"1"}
diff --git a/metron-platform/metron-solr/src/test/resources/example_data/yaf b/metron-platform/metron-solr/src/test/resources/example_data/yaf
new file mode 100644
index 0000000..2a42251
--- /dev/null
+++ b/metron-platform/metron-solr/src/test/resources/example_data/yaf
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{"adapter.threatinteladapter.end.ts":"1517605468528","iflags":"A","ip_dst_port":80,"uflags":0,"enrichmentsplitterbolt.splitter.end.ts":"1517605468509","isn":"9dfb1927","enrichments.geo.ip_dst_addr.city":"Strasbourg","enrichments.geo.ip_dst_addr.latitude":"48.5839","enrichmentsplitterbolt.splitter.begin.ts":"1517605468509","adapter.hostfromjsonlistadapter.end.ts":"1517605468513","enrichments.geo.ip_dst_addr.country":"FR","enrichments.geo.ip_dst_addr.locID":"2973783","adapter.geoadapter.begin.ts":"1517605468513","enrichments.geo.ip_dst_addr.postalCode":"67100","duration":"0.000","protocol":"TCP","rpkt":0,"ip_dst_addr":"62.75.195.236","original_string":"2018-02-02 20:58:59.000|2018-02-02 20:58:59.000|   0.000|   0.000|  6|                         192.168.138.158|49189|                           62.75.195.236|   80|       A|       0|       0|       0|9dfb1927|00000000|000|000|       1|      40|       0|       0|    0|idle ","threatinteljoinbolt.joiner.ts":"1517605468537","pkt":1,"enrichmentjoinbolt.joiner.ts":"1517605468517","ruflags":0,"adapter.hostfromjsonlistadapter.begin.ts":"1517605468513","threatintelsplitterbolt.splitter.begin.ts":"1517605468524","roct":0,"tag":0,"enrichments.geo.ip_dst_addr.longitude":"7.7455","ip_src_addr":"192.168.138.158","rtag":0,"timestamp":1517605139000,"app":0,"oct":40,"end_reason":"idle ","risn":0,"end_time":1517605139000,"source.type":"yaf","adapter.geoadapter.end.ts":"1517605468513","start_time":1517605139000,"riflags":0,"rtt":"0.000","threatintelsplitterbolt.splitter.end.ts":"1517605468524","adapter.threatinteladapter.begin.ts":"1517605468527","ip_src_port":49189,"enrichments.geo.ip_dst_addr.location_point":"48.5839,7.7455","guid":"2db8680f-b08a-41cd-bd06-b3bbbf319435"}
+{"adapter.threatinteladapter.end.ts":"1517605468537","iflags":"AP","ip_dst_port":80,"uflags":0,"enrichmentsplitterbolt.splitter.end.ts":"1517605468511","isn":"63626c24","enrichments.geo.ip_dst_addr.latitude":"55.7386","enrichmentsplitterbolt.splitter.begin.ts":"1517605468510","adapter.hostfromjsonlistadapter.end.ts":"1517605468514","enrichments.geo.ip_dst_addr.country":"RU","adapter.geoadapter.begin.ts":"1517605468514","duration":"0.000","protocol":"TCP","rpkt":0,"ip_dst_addr":"95.163.121.204","original_string":"2018-02-02 20:58:59.000|2018-02-02 20:58:59.000|   0.000|   0.000|  6|                         192.168.138.158|49210|                          95.163.121.204|   80|      AP|       0|       0|       0|63626c24|00000000|000|000|       1|     475|       0|       0|    0|idle ","threatinteljoinbolt.joiner.ts":"1517605468539","pkt":1,"enrichmentjoinbolt.joiner.ts":"1517605468518","ruflags":0,"adapter.hostfromjsonlistadapter.begin.ts":"1517605468514","threatintelsplitterbolt.splitter.begin.ts":"1517605468528","roct":0,"tag":0,"enrichments.geo.ip_dst_addr.longitude":"37.6068","ip_src_addr":"192.168.138.158","rtag":0,"timestamp":1517605139000,"app":0,"oct":475,"end_reason":"idle ","risn":0,"end_time":1517605139000,"source.type":"yaf","adapter.geoadapter.end.ts":"1517605468515","start_time":1517605139000,"riflags":0,"rtt":"0.000","threatintelsplitterbolt.splitter.end.ts":"1517605468528","adapter.threatinteladapter.begin.ts":"1517605468537","ip_src_port":49210,"enrichments.geo.ip_dst_addr.location_point":"55.7386,37.6068","guid":"1a250282-1683-44e3-a455-0bc7b0ee576c"}
+{"adapter.threatinteladapter.end.ts":"1517605468722","iflags":"A","ip_dst_port":50451,"uflags":0,"enrichmentsplitterbolt.splitter.end.ts":"1517605468537","isn":"7782f40c","enrichmentsplitterbolt.splitter.begin.ts":"1517605468537","adapter.hostfromjsonlistadapter.end.ts":"1517605468539","adapter.geoadapter.begin.ts":"1517605468539","duration":"0.000","protocol":"TCP","rpkt":0,"ip_dst_addr":"192.168.66.1","original_string":"2018-02-02 20:58:59.000|2018-02-02 20:58:59.000|   0.000|   0.000|  6|                          192.168.66.121| 8080|                            192.168.66.1|50451|       A|       0|       0|       0|7782f40c|00000000|000|000|       1|    2948|       0|       0|    0|idle ","threatinteljoinbolt.joiner.ts":"1517605468727","pkt":1,"enrichmentjoinbolt.joiner.ts":"1517605468544","ruflags":0,"adapter.hostfromjsonlistadapter.begin.ts":"1517605468539","threatintelsplitterbolt.splitter.begin.ts":"1517605468546","roct":0,"tag":0,"ip_src_addr":"192.168.66.121","rtag":0,"timestamp":1517605139000,"app":0,"oct":2948,"end_reason":"idle ","risn":0,"end_time":1517605139000,"source.type":"yaf","adapter.geoadapter.end.ts":"1517605468539","start_time":1517605139000,"riflags":0,"rtt":"0.000","threatintelsplitterbolt.splitter.end.ts":"1517605468546","adapter.threatinteladapter.begin.ts":"1517605468551","ip_src_port":8080,"guid":"283754ec-c3c1-4a4a-97a5-6835bb00e2b2"}
+{"adapter.threatinteladapter.end.ts":"1517605468796","iflags":"AP","enrichments.geo.ip_src_addr.longitude":"7.7455","ip_dst_port":49186,"uflags":0,"enrichmentsplitterbolt.splitter.end.ts":"1517605468554","isn":73726688,"enrichments.geo.ip_src_addr.location_point":"48.5839,7.7455","enrichmentsplitterbolt.splitter.begin.ts":"1517605468554","adapter.hostfromjsonlistadapter.end.ts":"1517605468556","adapter.geoadapter.begin.ts":"1517605468557","enrichments.geo.ip_src_addr.postalCode":"67100","duration":"0.000","enrichments.geo.ip_src_addr.city":"Strasbourg","protocol":"TCP","rpkt":0,"ip_dst_addr":"192.168.138.158","original_string":"2018-02-02 20:58:59.000|2018-02-02 20:58:59.000|   0.000|   0.000|  6|                           62.75.195.236|   80|                         192.168.138.158|49186|      AP|       0|       0|       0|73726688|00000000|000|000|       1|    1407|       0|       0|    0|idle ","enrichments.geo.ip_src_addr.latitude":"48.5839","threatinteljoinbolt.joiner.ts":"1517605468798","pkt":1,"enrichmentjoinbolt.joiner.ts":"1517605468599","ruflags":0,"adapter.hostfromjsonlistadapter.begin.ts":"1517605468556","threatintelsplitterbolt.splitter.begin.ts":"1517605468601","enrichments.geo.ip_src_addr.locID":"2973783","roct":0,"tag":0,"ip_src_addr":"62.75.195.236","rtag":0,"enrichments.geo.ip_src_addr.country":"FR","timestamp":1517605139000,"app":0,"oct":1407,"end_reason":"idle ","risn":0,"end_time":1517605139000,"source.type":"yaf","adapter.geoadapter.end.ts":"1517605468595","start_time":1517605139000,"riflags":0,"rtt":"0.000","threatintelsplitterbolt.splitter.end.ts":"1517605468601","adapter.threatinteladapter.begin.ts":"1517605468722","ip_src_port":80,"guid":"992817c2-8960-4a5d-a9cc-0252f4d1256c"}
+{"adapter.threatinteladapter.end.ts":"1517605468796","iflags":"AP","ip_dst_port":50183,"uflags":0,"enrichmentsplitterbolt.splitter.end.ts":"1517605468556","isn":"8df560a1","enrichmentsplitterbolt.splitter.begin.ts":"1517605468556","adapter.hostfromjsonlistadapter.end.ts":"1517605468559","adapter.geoadapter.begin.ts":"1517605468595","duration":"0.000","protocol":"TCP","rpkt":0,"ip_dst_addr":"192.168.66.1","original_string":"2018-02-02 20:58:59.000|2018-02-02 20:58:59.000|   0.000|   0.000|  6|                          192.168.66.121| 8080|                            192.168.66.1|50183|      AP|       0|       0|       0|8df560a1|00000000|000|000|       1|     187|       0|       0|    0|idle ","threatinteljoinbolt.joiner.ts":"1517605468798","pkt":1,"enrichmentjoinbolt.joiner.ts":"1517605468600","ruflags":0,"adapter.hostfromjsonlistadapter.begin.ts":"1517605468559","threatintelsplitterbolt.splitter.begin.ts":"1517605468601","roct":0,"tag":0,"ip_src_addr":"192.168.66.121","rtag":0,"timestamp":1517605139000,"app":0,"oct":187,"end_reason":"idle ","risn":0,"end_time":1517605139000,"source.type":"yaf","adapter.geoadapter.end.ts":"1517605468595","start_time":1517605139000,"riflags":0,"rtt":"0.000","threatintelsplitterbolt.splitter.end.ts":"1517605468601","adapter.threatinteladapter.begin.ts":"1517605468796","ip_src_port":8080,"guid":"061a2601-e268-4492-ab75-0e2aba434f6e"}
diff --git a/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/error/MetronErrorJSONMatcher.java b/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/error/MetronErrorJSONMatcher.java
index ad24283..b3120f8 100644
--- a/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/error/MetronErrorJSONMatcher.java
+++ b/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/error/MetronErrorJSONMatcher.java
@@ -37,6 +37,8 @@
     expected.remove("timestamp");
     actual.remove("stack");
     expected.remove("stack");
+    actual.remove("guid");
+    expected.remove("guid");
     return actual.equals(expected);
   }
 }
diff --git a/metron-platform/pom.xml b/metron-platform/pom.xml
index a99dbc7..048eabb 100644
--- a/metron-platform/pom.xml
+++ b/metron-platform/pom.xml
@@ -82,13 +82,13 @@
 		<dependency>
 			<groupId>org.powermock</groupId>
 			<artifactId>powermock-module-junit4</artifactId>
-			<version>1.6.6</version>
+			<version>${global_powermock_version}</version>
 			<scope>test</scope>
 		</dependency>
 		<dependency>
 			<groupId>org.powermock</groupId>
 			<artifactId>powermock-api-mockito</artifactId>
-			<version>1.6.6</version>
+			<version>${global_powermock_version}</version>
 			<scope>test</scope>
 		</dependency>
 		<dependency>
diff --git a/pom.xml b/pom.xml
index 2734de4..1947b37 100644
--- a/pom.xml
+++ b/pom.xml
@@ -108,8 +108,9 @@
         <global_slf4j_version>1.7.7</global_slf4j_version>
         <global_opencsv_version>3.7</global_opencsv_version>
         <global_java_version>1.8</global_java_version>
-        <global_solr_version>5.2.1</global_solr_version>
+        <global_solr_version>6.6.2</global_solr_version>
         <global_mockito_version>1.10.19</global_mockito_version>
+        <global_powermock_version>1.7.0</global_powermock_version>
         <global_shade_version>2.4.3</global_shade_version>
         <global_jackson_version>2.7.4</global_jackson_version>
         <global_errorprone_core_version>2.0.14</global_errorprone_core_version>