DRILL-7851: Restore original and untruncated license text
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index c6dbe24..5d0d6c6 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -80,7 +80,7 @@
       # Install libraries required for protobuf generation
       - name: Install dependencies
         run: |
-          sudo apt-get install -y libboost-all-dev libzookeeper-mt-dev libsasl2-dev cmake libcppunit-dev checkinstall && \
+          sudo apt update -y && sudo apt install -y libboost-all-dev libzookeeper-mt-dev libsasl2-dev cmake libcppunit-dev checkinstall && \
           pushd .. && \
           if [ -f $HOME/protobuf/protobuf_3.11.1* ]; then \
             sudo dpkg -i $HOME/protobuf/protobuf_3.11.1*; \
diff --git a/common/pom.xml b/common/pom.xml
index 3f103b0..e7c02a2 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-common</artifactId>
   <packaging>jar</packaging>
-  <name>Common (Logical Plan, Base expressions)</name>
+  <name>Drill : Common</name>
 
   <dependencies>
     <dependency>
diff --git a/contrib/data/pom.xml b/contrib/data/pom.xml
index c84473a..d1fcafb 100644
--- a/contrib/data/pom.xml
+++ b/contrib/data/pom.xml
@@ -28,7 +28,7 @@
 
   <groupId>org.apache.drill.contrib.data</groupId>
   <artifactId>drill-contrib-data-parent</artifactId>
-  <name>contrib/data/Parent Pom</name>
+  <name>Drill : Contrib : Data : </name>
   <packaging>pom</packaging>
 
   <dependencies>
diff --git a/contrib/data/tpch-sample-data/pom.xml b/contrib/data/tpch-sample-data/pom.xml
index d7c6d8e..4f6ef12 100644
--- a/contrib/data/tpch-sample-data/pom.xml
+++ b/contrib/data/tpch-sample-data/pom.xml
@@ -27,7 +27,7 @@
   </parent>
 
   <artifactId>tpch-sample-data</artifactId>
-  <name>contrib/data/tpch-sample-data</name>
+  <name>Drill : Contrib : Data : TPCH Sample</name>
   <packaging>jar</packaging>
 
   <dependencies>
diff --git a/contrib/format-esri/pom.xml b/contrib/format-esri/pom.xml
index 20d6d95..88a6688 100644
--- a/contrib/format-esri/pom.xml
+++ b/contrib/format-esri/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-esri</artifactId>
-  <name>contrib/format-esri</name>
+  <name>Drill : Contrib : Format : Esri</name>
 
   <dependencies>
     <dependency>
diff --git a/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpFormatPlugin.java b/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpFormatPlugin.java
index 01dda6c..20bb704 100644
--- a/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpFormatPlugin.java
+++ b/contrib/format-esri/src/main/java/org/apache/drill/exec/store/esri/ShpFormatPlugin.java
@@ -26,7 +26,6 @@
 
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
@@ -34,14 +33,8 @@
 import org.apache.drill.exec.store.esri.ShpBatchReader.ShpReaderConfig;
 import org.apache.hadoop.conf.Configuration;
 
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 public class ShpFormatPlugin extends EasyFormatPlugin<ShpFormatConfig> {
 
-  private static final Logger logger = LoggerFactory.getLogger(ShpFormatPlugin.class);
-
   public static final String PLUGIN_NAME = "shp";
 
   public static class ShpReaderFactory extends FileReaderFactory {
@@ -79,18 +72,17 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, ShpFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = false;
-    config.compressible = false;
-    config.supportsProjectPushdown = true;
-    config.extensions = Lists.newArrayList(pluginConfig.getExtensions());
-    config.fsConf = fsConf;
-    config.defaultName = PLUGIN_NAME;
-    config.readerOperatorType = CoreOperatorType.SHP_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(false)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(PLUGIN_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 }
diff --git a/contrib/format-excel/pom.xml b/contrib/format-excel/pom.xml
index 39572a9..28cb9de 100644
--- a/contrib/format-excel/pom.xml
+++ b/contrib/format-excel/pom.xml
@@ -28,10 +28,10 @@
   </parent>
 
   <artifactId>drill-format-excel</artifactId>
-  <name>contrib/format-excel</name>
+  <name>Drill : Contrib : Format : Excel</name>
 
   <properties>
-    <poi.version>4.1.2</poi.version>
+    <poi.version>5.0.0</poi.version>
   </properties>
   <dependencies>
     <dependency>
@@ -67,7 +67,7 @@
     <dependency>
       <groupId>com.github.pjfanning</groupId>
       <artifactId>excel-streaming-reader</artifactId>
-      <version>2.3.5</version>
+      <version>2.4.0</version>
     </dependency>
   </dependencies>
   <build>
diff --git a/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java b/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java
index 1df4071..82901f1 100644
--- a/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java
+++ b/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelBatchReader.java
@@ -459,7 +459,15 @@
     } else if (cellType == CellType.NUMERIC && DateUtil.isCellDateFormatted(cell)) {
       // Case if the column is a date or time
       addColumnToArray(rowWriter, excelFieldNames.get(colPosition), MinorType.TIMESTAMP, false);
-    } else if (cellType == CellType.NUMERIC || cellType == CellType.FORMULA || cellType == CellType.BLANK || cellType == CellType._NONE) {
+    } else if (cellType == CellType.FORMULA) {
+      // Cells with formulae can return either strings or numbers.
+      CellType formulaCellType = cell.getCachedFormulaResultType();
+      if (formulaCellType == CellType.STRING) {
+        addColumnToArray(rowWriter, excelFieldNames.get(colPosition), MinorType.VARCHAR, false);
+      } else {
+        addColumnToArray(rowWriter, excelFieldNames.get(colPosition), MinorType.FLOAT8, false);
+      }
+    } else if (cellType == CellType.NUMERIC || cellType == CellType.BLANK || cellType == CellType._NONE) {
       // Case if the column is numeric
       addColumnToArray(rowWriter, excelFieldNames.get(colPosition), MinorType.FLOAT8, false);
     } else {
diff --git a/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelFormatPlugin.java b/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelFormatPlugin.java
index 0d8d52d..58cae94 100644
--- a/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelFormatPlugin.java
+++ b/contrib/format-excel/src/main/java/org/apache/drill/exec/store/excel/ExcelFormatPlugin.java
@@ -18,7 +18,6 @@
 
 package org.apache.drill.exec.store.excel;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.common.types.TypeProtos;
@@ -28,7 +27,6 @@
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
 
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
@@ -66,29 +64,28 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, ExcelFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = false;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = pluginConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = DEFAULT_NAME;
-    config.readerOperatorType = UserBitShared.CoreOperatorType.EXCEL_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
   public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(
-    EasySubScan scan, OptionManager options) throws ExecutionSetupException {
+    EasySubScan scan, OptionManager options) {
     return new ExcelBatchReader(formatConfig.getReaderConfig(this), scan.getMaxRecords());
   }
 
   @Override
-  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) throws ExecutionSetupException {
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) {
     FileScanBuilder builder = new FileScanBuilder();
     ExcelReaderConfig readerConfig = new ExcelReaderConfig(this);
 
diff --git a/contrib/format-excel/src/test/java/org/apache/drill/exec/store/excel/TestExcelFormat.java b/contrib/format-excel/src/test/java/org/apache/drill/exec/store/excel/TestExcelFormat.java
index 934f78c..b79108b 100644
--- a/contrib/format-excel/src/test/java/org/apache/drill/exec/store/excel/TestExcelFormat.java
+++ b/contrib/format-excel/src/test/java/org/apache/drill/exec/store/excel/TestExcelFormat.java
@@ -423,6 +423,47 @@
   }
 
   @Test
+  public void testTextFormula() throws Exception {
+    String sql = "SELECT * FROM cp.`excel/text-formula.xlsx`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("Grade", MinorType.VARCHAR)
+      .addNullable("Gender", MinorType.VARCHAR)
+      .addNullable("Combined", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+      .addRow("Seventh Grade", "Girls", "Seventh Grade Girls")
+      .addRow("Sixth Grade", "Girls", "Sixth Grade Girls")
+      .addRow("Fourth Grade", "Girls", "Fourth Grade Girls")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testNumericFormula() throws Exception {
+    String sql = "SELECT * FROM cp.`excel/numeric-formula.xlsx`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("col1", MinorType.FLOAT8)
+      .addNullable("col2", MinorType.FLOAT8)
+      .addNullable("calc", MinorType.FLOAT8)
+      .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+      .addRow(2.0, 8.0, 256.0)
+      .addRow(4.0, 6.0, 4096.0)
+      .addRow(6.0, 4.0, 1296.0)
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
   public void testLimitPushdown() throws Exception {
     String sql = "SELECT id, first_name, order_count FROM cp.`excel/test_data.xlsx` LIMIT 5";
 
diff --git a/contrib/format-excel/src/test/resources/excel/numeric-formula.xlsx b/contrib/format-excel/src/test/resources/excel/numeric-formula.xlsx
new file mode 100644
index 0000000..6d65a55
--- /dev/null
+++ b/contrib/format-excel/src/test/resources/excel/numeric-formula.xlsx
Binary files differ
diff --git a/contrib/format-excel/src/test/resources/excel/text-formula.xlsx b/contrib/format-excel/src/test/resources/excel/text-formula.xlsx
new file mode 100644
index 0000000..9cce25e
--- /dev/null
+++ b/contrib/format-excel/src/test/resources/excel/text-formula.xlsx
Binary files differ
diff --git a/contrib/format-hdf5/pom.xml b/contrib/format-hdf5/pom.xml
index 1b61297..7ef9cdc 100644
--- a/contrib/format-hdf5/pom.xml
+++ b/contrib/format-hdf5/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-hdf5</artifactId>
-  <name>contrib/format-hdf5</name>
+  <name>Drill : Contrib : Format : HDF5</name>
   
   <dependencies>
     <dependency>
diff --git a/contrib/format-hdf5/src/main/java/org/apache/drill/exec/store/hdf5/HDF5FormatPlugin.java b/contrib/format-hdf5/src/main/java/org/apache/drill/exec/store/hdf5/HDF5FormatPlugin.java
index 3e9480f..07e0763 100644
--- a/contrib/format-hdf5/src/main/java/org/apache/drill/exec/store/hdf5/HDF5FormatPlugin.java
+++ b/contrib/format-hdf5/src/main/java/org/apache/drill/exec/store/hdf5/HDF5FormatPlugin.java
@@ -19,7 +19,6 @@
 package org.apache.drill.exec.store.hdf5;
 
 import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.Types;
@@ -28,7 +27,6 @@
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
 
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasySubScan;
@@ -55,23 +53,22 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, HDF5FormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = false;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = pluginConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = DEFAULT_NAME;
-    config.readerOperatorType = UserBitShared.CoreOperatorType.HDF5_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
-  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) throws ExecutionSetupException {
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) {
     FileScanBuilder builder = new FileScanBuilder();
 
     builder.setReaderFactory(new HDF5ReaderFactory(new HDF5BatchReader.HDF5ReaderConfig(this, formatConfig), scan.getMaxRecords()));
diff --git a/contrib/format-httpd/README.md b/contrib/format-httpd/README.md
new file mode 100644
index 0000000..87f02d8
--- /dev/null
+++ b/contrib/format-httpd/README.md
@@ -0,0 +1,122 @@
+# Web Server Log Format Plugin (HTTPD)
+This plugin enables Drill to read and query httpd (Apache Web Server) and nginx access logs natively. This plugin uses the work by [Niels Basjes](https://github.com/nielsbasjes
+) which is available here: https://github.com/nielsbasjes/logparser.
+
+## Configuration
+There are several fields which you can specify in order for Drill to read web server logs. In general the defaults should be fine, however the fields are:
+* **`logFormat`**:  The log format string is the format string found in your web server configuration. If you have multiple logFormats then you can add all of them in this
+ single parameter separated by a newline (`\n`). The parser will automatically select the first matching format.
+ Note that the well known formats `common`, `combined`, `combinedio`, `referer` and `agent` are also accepted as logFormat.
+ Be aware of leading and trailing spaces on a line when configuring this!
+* **`timestampFormat`**:  The format of time stamps in your log files. This setting is optional and is almost never needed.
+* **`extensions`**:  The file extension of your web server logs.  Defaults to `httpd`.
+* **`maxErrors`**:  Sets the plugin error tolerance. When set to any value less than `0`, Drill will ignore all errors. If unspecified then maxErrors is 0 which will cause the query to fail on the first error.
+* **`flattenWildcards`**: There are a few variables which Drill extracts into maps.  Defaults to `false`.
+* **`parseUserAgent`**: When set to true the [Yauaa useragent analyzer](https://yauaa.basjes.nl) will be applied to the UserAgent field if present. Defaults to `false` because of the extra startup and memory overhead.
+* **`logParserRemapping`**: This makes it possible to parse deeper into the logline in custom situations. See documentation below for further info.
+
+In common situations the config will look something like this (having two logformats with a newline `\n` separated):
+```json
+"httpd" : {
+  "type" : "httpd",
+  "logFormat" : "%h %l %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-agent}i\" %V\ncombined",
+  "maxErrors" : 0,
+  "flattenWildcards" : true,
+  "parseUserAgent" : true
+}
+```
+
+## Data Model
+The fields which Drill will return from HTTPD access logs should be fairly self explanatory and should all be mapped to correct data types.  For instance, `TIMESTAMP` fields are
+ all Drill `TIMESTAMPS` and so forth.
+
+### Nested Columns
+The HTTPD parser can produce a few columns of nested data. For instance, the various `query_string` columns are parsed into Drill maps so that if you want to look for a specific
+ field, you can do so.
+
+ Drill allows you to directly access maps in with the format of:
+ ```
+<table>.<map>.<field>
+```
+ One note is that in order to access a map, you must assign an alias to your table as shown below:
+ ```sql
+SELECT mylogs.`request_firstline_uri_query_$`.`username` AS username
+FROM dfs.test.`logfile.httpd` AS mylogs
+
+```
+In this example, we assign an alias of `mylogs` to the table, the column name is `request_firstline_uri_query_$` and then the individual field within that mapping is `username
+`.  This particular example enables you to analyze items in query strings.
+
+### Flattening Maps
+In the event that you have a map field that you would like broken into columns rather than getting the nested fields, you can set the `flattenWildcards` option to `true` and
+Drill will create columns for these fields.  For example if you have a URI Query option called `username`.  If you selected the `flattedWildcards` option, Drill will create a
+field called `request_firstline_uri_query_username`.
+
+** Note that underscores in the field name are replaced with double underscores **
+
+## Useful Functions
+ If you are using Drill to analyze web access logs, there are a few other useful functions which you should know about:
+
+ * `parse_url(<url>)`: This function accepts a URL as an argument and returns a map of the URL's protocol, authority, host, and path.
+ * `parse_query(<query_string>)`: This function accepts a query string and returns a key/value pairing of the variables submitted in the request.
+ * `parse_user_agent(<user agent>)`, `parse_user_agent( <useragent field>, <desired field> )`: The function parse_user_agent() takes a user agent string as an argument and
+  returns a map of the available fields. Note that not every field will be present in every user agent string.
+  [Complete Docs Here](https://github.com/apache/drill/tree/master/contrib/udfs#user-agent-functions)
+
+## LogParser type remapping
+**Advanced feature**
+The underlying [logparser](https://github.com/nielsbasjes/logparser) supports something called type remapping.
+Essentially it means that an extracted value which would normally be treated as an unparsable STRING can now be 'cast' to something
+that can be further cut into relevant pieces.
+
+The parameter string is a `;` separated list of mappings.
+Each mapping is a `:` separated list of
+- the name of the underlying logparser field (which is different from th Drill column name),
+- the underlying `type` which is used to determine which additional Dissectors can be applied.
+- optionally the `cast` (one of `STRING`, `LONG`, `DOUBLE`) which may impact the type of the Drill column
+
+Examples:
+- If you have a query parameter in the URL called `ua` which is really the UserAgent string and you would like to parse this you can add
+`request.firstline.uri.query.ua:HTTP.USERAGENT`
+- If you have a query parameter in the URL called `timestamp` which is really the numerical timestamp (epoch milliseconds).
+The additional "LONG" will cause the returned value be a long which tells Drill the `TIME.EPOCH` is to be interpreted as a `TIMESTAMP` column.
+`request.firstline.uri.query.timestamp:TIME.EPOCH:LONG`
+
+Combining all of this can make a query that does something like this:
+```sql
+SELECT
+          `request_receive_time_epoch`
+        , `request_user-agent`
+        , `request_user-agent_device__name`
+        , `request_user-agent_agent__name__version__major`
+        , `request_firstline_uri_query_timestamp`
+        , `request_firstline_uri_query_ua`
+        , `request_firstline_uri_query_ua_device__name`
+        , `request_firstline_uri_query_ua_agent__name__version__major`
+FROM       table(
+             cp.`httpd/typeremap.log`
+                 (
+                   type => 'httpd',
+                   logFormat => 'combined\n%h %l %u %t \"%r\" %>s %b',
+                   flattenWildcards => true,
+                   parseUserAgent => true,
+                   logParserRemapping => '
+                       request.firstline.uri.query.ua        :HTTP.USERAGENT;
+                       request.firstline.uri.query.timestamp :TIME.EPOCH    : LONG'
+                 )
+           )
+```
+
+## Implicit Columns
+Data queried by this plugin will return two implicit columns:
+
+* **`_raw`**: This returns the raw, unparsed log line
+* **`_matched`**:  Returns `true` or `false` depending on whether the line matched the config string.
+
+Thus, if you wanted to see which lines in your log file were not matching the config, you could use the following query:
+
+```sql
+SELECT _raw
+FROM <data>
+WHERE _matched = false
+```
\ No newline at end of file
diff --git a/contrib/format-httpd/pom.xml b/contrib/format-httpd/pom.xml
new file mode 100644
index 0000000..7238725
--- /dev/null
+++ b/contrib/format-httpd/pom.xml
@@ -0,0 +1,108 @@
+<?xml version="1.0"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <artifactId>drill-contrib-parent</artifactId>
+    <groupId>org.apache.drill.contrib</groupId>
+    <version>1.19.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>drill-format-httpd</artifactId>
+  <name>Drill : Contrib : Format : Httpd/Nginx Access Log</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+
+    <dependency>
+      <groupId>nl.basjes.parse.httpdlog</groupId>
+      <artifactId>httpdlog-parser</artifactId>
+      <version>${httpdlog-parser.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-codec</groupId>
+          <artifactId>commons-codec</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>nl.basjes.parse.useragent</groupId>
+      <artifactId>yauaa-logparser</artifactId>
+      <version>${yauaa.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>nl.basjes.parse.httpdlog</groupId>
+          <artifactId>httpdlog-parser</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!-- Test dependencies -->
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-java-sources</id>
+            <phase>process-sources</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/classes/org/apache/drill/exec/store/httpd
+              </outputDirectory>
+              <resources>
+                <resource>
+                  <directory>src/main/java/org/apache/drill/exec/store/httpd</directory>
+                  <filtering>true</filtering>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogBatchReader.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogBatchReader.java
new file mode 100644
index 0000000..275132a
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogBatchReader.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.httpd;
+
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.shaded.guava.com.google.common.base.Charsets;
+import org.apache.hadoop.mapred.FileSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+
+public class HttpdLogBatchReader implements ManagedReader<FileSchemaNegotiator> {
+
+  private static final Logger logger = LoggerFactory.getLogger(HttpdLogBatchReader.class);
+  public static final String RAW_LINE_COL_NAME = "_raw";
+  public static final String MATCHED_COL_NAME = "_matched";
+  private final HttpdLogFormatConfig formatConfig;
+  private final int maxRecords;
+  private final EasySubScan scan;
+  private HttpdParser parser;
+  private FileSplit split;
+  private InputStream fsStream;
+  private RowSetLoader rowWriter;
+  private BufferedReader reader;
+  private int lineNumber;
+  private CustomErrorContext errorContext;
+  private ScalarWriter rawLineWriter;
+  private ScalarWriter matchedWriter;
+  private int errorCount;
+
+
+  public HttpdLogBatchReader(HttpdLogFormatConfig formatConfig, int maxRecords, EasySubScan scan) {
+    this.formatConfig = formatConfig;
+    this.maxRecords = maxRecords;
+    this.scan = scan;
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    // Open the input stream to the log file
+    openFile(negotiator);
+    errorContext = negotiator.parentErrorContext();
+    try {
+      parser = new HttpdParser(
+              formatConfig.getLogFormat(),
+              formatConfig.getTimestampFormat(),
+              formatConfig.getFlattenWildcards(),
+              formatConfig.getParseUserAgent(),
+              formatConfig.getLogParserRemapping(),
+              scan);
+      negotiator.tableSchema(parser.setupParser(), false);
+    } catch (Exception e) {
+      throw UserException.dataReadError(e)
+        .message("Error opening HTTPD file: " + e.getMessage())
+        .addContext(errorContext)
+        .build(logger);
+    }
+
+    ResultSetLoader loader = negotiator.build();
+    rowWriter = loader.writer();
+    parser.addFieldsToParser(rowWriter);
+    rawLineWriter = addImplicitColumn(RAW_LINE_COL_NAME, MinorType.VARCHAR);
+    matchedWriter = addImplicitColumn(MATCHED_COL_NAME, MinorType.BIT);
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    while (!rowWriter.isFull()) {
+      if (!nextLine(rowWriter)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  private boolean nextLine(RowSetLoader rowWriter) {
+    String line;
+
+    // Check if the limit has been reached
+    if (rowWriter.limitReached(maxRecords)) {
+      return false;
+    }
+
+    try {
+      line = reader.readLine();
+      if (line == null) {
+        return false;
+      } else if (line.isEmpty()) {
+        return true;
+      }
+    } catch (Exception e) {
+      throw UserException.dataReadError(e)
+        .message("Error reading HTTPD file at line number %d", lineNumber)
+        .addContext(e.getMessage())
+        .addContext(errorContext)
+        .build(logger);
+    }
+    // Start the row
+    rowWriter.start();
+
+    try {
+      parser.parse(line);
+      matchedWriter.setBoolean(true);
+    } catch (Exception e) {
+      errorCount++;
+      if (errorCount >= formatConfig.getMaxErrors()) {
+        throw UserException.dataReadError()
+          .message("Error reading HTTPD file at line number %d", lineNumber)
+          .addContext(e.getMessage())
+          .addContext(errorContext)
+          .build(logger);
+      } else {
+        matchedWriter.setBoolean(false);
+      }
+    }
+
+    // Write raw line
+    rawLineWriter.setString(line);
+
+    // Finish the row
+    rowWriter.save();
+    lineNumber++;
+
+    return true;
+  }
+
+  @Override
+  public void close() {
+    if (fsStream == null) {
+      return;
+    }
+    try {
+      fsStream.close();
+    } catch (IOException e) {
+      logger.warn("Error when closing HTTPD file: {} {}", split.getPath().toString(), e.getMessage());
+    }
+    fsStream = null;
+  }
+
+  private void openFile(FileSchemaNegotiator negotiator) {
+    split = negotiator.split();
+    try {
+      fsStream = negotiator.fileSystem().openPossiblyCompressedStream(split.getPath());
+    } catch (Exception e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Failed to open open input file: %s", split.getPath().toString())
+        .addContext(e.getMessage())
+        .build(logger);
+    }
+    reader = new BufferedReader(new InputStreamReader(fsStream, Charsets.UTF_8));
+  }
+
+  private ScalarWriter addImplicitColumn(String colName, MinorType type) {
+    ColumnMetadata colSchema = MetadataUtils.newScalar(colName, type, TypeProtos.DataMode.OPTIONAL);
+    colSchema.setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
+    int index = rowWriter.addColumn(colSchema);
+
+    return rowWriter.scalar(index);
+  }
+}
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java
new file mode 100644
index 0000000..aa40e95
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.httpd;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.drill.common.PlanStringBuilder;
+import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+
+@JsonTypeName(HttpdLogFormatPlugin.DEFAULT_NAME)
+@JsonInclude(JsonInclude.Include.NON_DEFAULT)
+public class HttpdLogFormatConfig implements FormatPluginConfig {
+
+  public static final String DEFAULT_TS_FORMAT = "dd/MMM/yyyy:HH:mm:ss ZZ";
+  public final String logFormat;
+  public final String timestampFormat;
+  public final List<String> extensions;
+  public final int maxErrors;
+  public final boolean flattenWildcards;
+  public final boolean parseUserAgent;
+  public final String logParserRemapping;
+
+  @JsonCreator
+  public HttpdLogFormatConfig(
+      @JsonProperty("extensions") List<String> extensions,
+      @JsonProperty("logFormat") String logFormat,
+      @JsonProperty("timestampFormat") String timestampFormat,
+      @JsonProperty("maxErrors") int maxErrors,
+      @JsonProperty("flattenWildcards") boolean flattenWildcards,
+      @JsonProperty("parseUserAgent") boolean parseUserAgent,
+      @JsonProperty("logParserRemapping") String logParserRemapping
+  ) {
+
+    this.extensions = extensions == null
+      ? Collections.singletonList("httpd")
+      : ImmutableList.copyOf(extensions);
+    this.logFormat = logFormat;
+    this.timestampFormat = timestampFormat;
+    this.maxErrors = maxErrors;
+    this.flattenWildcards = flattenWildcards;
+    this.parseUserAgent = parseUserAgent;
+    this.logParserRemapping = logParserRemapping;
+  }
+
+  /**
+   * @return the log formatting string. This string is the config string from
+   *         httpd.conf or similar config file.
+   */
+  public String getLogFormat() {
+    return logFormat;
+  }
+
+  /**
+   * @return the timestampFormat
+   */
+  public String getTimestampFormat() {
+    return timestampFormat;
+  }
+
+  public List<String> getExtensions() {
+    return extensions;
+  }
+
+  public int getMaxErrors() {
+    return maxErrors;
+  }
+
+  public boolean getFlattenWildcards () {
+    return flattenWildcards;
+  }
+
+  public boolean getParseUserAgent() {
+    return parseUserAgent;
+  }
+
+  public String getLogParserRemapping() {
+    return logParserRemapping;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(
+            logFormat,
+            timestampFormat,
+            maxErrors,
+            flattenWildcards,
+            parseUserAgent,
+            logParserRemapping);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    HttpdLogFormatConfig other = (HttpdLogFormatConfig) obj;
+    return Objects.equals(logFormat, other.logFormat)
+      && Objects.equals(timestampFormat, other.timestampFormat)
+      && Objects.equals(maxErrors, other.maxErrors)
+      && Objects.equals(flattenWildcards, other.flattenWildcards)
+      && Objects.equals(parseUserAgent, other.parseUserAgent)
+      && Objects.equals(logParserRemapping, other.logParserRemapping);
+  }
+
+  @Override
+  public String toString() {
+    return new PlanStringBuilder(this)
+        .field("log format", logFormat)
+        .field("timestamp format", timestampFormat)
+        .field("max errors", maxErrors)
+        .field("flattenWildcards", flattenWildcards)
+        .field("parseUserAgent", parseUserAgent)
+        .field("logParserRemapping", logParserRemapping)
+        .toString();
+  }
+}
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
new file mode 100644
index 0000000..c3120d1
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.httpd;
+
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.hadoop.conf.Configuration;
+
+public class HttpdLogFormatPlugin extends EasyFormatPlugin<HttpdLogFormatConfig> {
+
+  protected static final String DEFAULT_NAME = "httpd";
+
+  public static final String OPERATOR_TYPE = "HTPPD_LOG_SUB_SCAN";
+
+  private static class HttpLogReaderFactory extends FileReaderFactory {
+
+    private final HttpdLogFormatConfig config;
+    private final int maxRecords;
+    private final EasySubScan scan;
+
+    private HttpLogReaderFactory(HttpdLogFormatConfig config, int maxRecords, EasySubScan scan) {
+      this.config = config;
+      this.maxRecords = maxRecords;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileScanFramework.FileSchemaNegotiator> newReader() {
+      return new HttpdLogBatchReader(config, maxRecords, scan);
+    }
+  }
+
+  public HttpdLogFormatPlugin(final String name,
+                              final DrillbitContext context,
+                              final Configuration fsConf,
+                              final StoragePluginConfig storageConfig,
+                              final HttpdLogFormatConfig formatConfig) {
+
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
+  }
+
+  private static EasyFormatConfig easyConfig(Configuration fsConf, HttpdLogFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .readerOperatorType(OPERATOR_TYPE)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
+  }
+
+  @Override
+  public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(
+    EasySubScan scan, OptionManager options) {
+    return new HttpdLogBatchReader(formatConfig, scan.getMaxRecords(), scan);
+  }
+
+  @Override
+  protected FileScanFramework.FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) {
+    FileScanFramework.FileScanBuilder builder = new FileScanFramework.FileScanBuilder();
+    builder.setReaderFactory(new HttpLogReaderFactory(formatConfig, scan.getMaxRecords(), scan));
+
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(TypeProtos.MinorType.VARCHAR));
+    return builder;
+  }
+}
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
new file mode 100644
index 0000000..c30b468
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
@@ -0,0 +1,483 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.httpd;
+
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import nl.basjes.parse.core.Casts;
+import nl.basjes.parse.core.Parser;
+import org.joda.time.Instant;
+import org.joda.time.LocalDate;
+import org.joda.time.LocalTime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+public class HttpdLogRecord {
+
+  private static final Logger logger = LoggerFactory.getLogger(HttpdLogRecord.class);
+
+  private final Map<String, ScalarWriter> strings = Maps.newHashMap();
+  private final Map<String, ScalarWriter> longs = Maps.newHashMap();
+  private final Map<String, ScalarWriter> doubles = Maps.newHashMap();
+  private final Map<String, ScalarWriter> dates = Maps.newHashMap();
+  private final Map<String, ScalarWriter> times = Maps.newHashMap();
+  private final Map<String, ScalarWriter> timestamps = new HashMap<>();
+  private final Map<String, TupleWriter> wildcards = Maps.newHashMap();
+  private final Map<String, String> cleanExtensions = Maps.newHashMap();
+  private final Map<String, TupleWriter> startedWildcards = Maps.newHashMap();
+  private final Map<String, TupleWriter> wildcardWriters = Maps.newHashMap();
+  private final SimpleDateFormat dateFormatter;
+  private RowSetLoader rootRowWriter;
+  private final boolean flattenWildcards;
+
+  public HttpdLogRecord(String timeFormat, boolean flattenWildcards) {
+    if (timeFormat == null) {
+      timeFormat = HttpdLogFormatConfig.DEFAULT_TS_FORMAT;
+    }
+    this.dateFormatter = new SimpleDateFormat(timeFormat);
+    this.flattenWildcards = flattenWildcards;
+  }
+
+  /**
+   * Call this method after a record has been parsed. This finished the lifecycle of any maps that were written and
+   * removes all the entries for the next record to be able to work.
+   */
+  public void finishRecord() {
+    wildcardWriters.clear();
+    startedWildcards.clear();
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a String data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void set(String field, String value) {
+    if (value != null) {
+      final ScalarWriter w = strings.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as string: {}", field, value);
+        w.setString(value);
+      } else {
+        logger.warn("No 'string' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a Long data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void set(String field, Long value) {
+    if (value != null) {
+      final ScalarWriter w = longs.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as long: {}", field, value);
+        w.setLong(value);
+      } else {
+        logger.warn("No 'long' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a Date data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setDate(String field, String value) {
+    if (value != null) {
+      final ScalarWriter w = dates.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as long: {}", field, value);
+        w.setDate(new LocalDate(value));
+      } else {
+        logger.warn("No 'date' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a Time data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setTime(String field, String value) {
+    if (value != null) {
+      final ScalarWriter w = times.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as long: {}", field, value);
+        w.setTime(new LocalTime(value));
+      } else {
+        logger.warn("No 'date' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a timestamp data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setTimestampFromEpoch(String field, Long value) {
+    if (value != null) {
+      final ScalarWriter w = timestamps.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as timestamp: {}", field, value);
+        w.setTimestamp(new Instant(value));
+      } else {
+        logger.warn("No 'timestamp' writer found for field: {}", field);
+      }
+    }
+  }
+
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a timestamp data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setTimestamp(String field, String value) {
+    if (value != null) {
+      //Convert the date string into a long
+      long ts = 0;
+      try {
+        Date d = this.dateFormatter.parse(value);
+        ts = d.getTime();
+      } catch (Exception e) {
+        //If the date formatter does not successfully create a date, the timestamp will fall back to zero
+        //Do not throw exception
+      }
+      final ScalarWriter tw = timestamps.get(field);
+      if (tw != null) {
+        logger.debug("Parsed field: {}, as time: {}", field, value);
+        tw.setTimestamp(new Instant(ts));
+      } else {
+        logger.warn("No 'timestamp' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
+   * called when the value of a log field is a Double data type.
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void set(String field, Double value) {
+    if (value != null) {
+      final ScalarWriter w = doubles.get(field);
+      if (w != null) {
+        logger.debug("Parsed field: {}, as double: {}", field, value);
+        w.setDouble(value);
+      } else {
+        logger.warn("No 'double' writer found for field: {}", field);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. When the parser processes a field like:
+   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
+   * invoked. <br>
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setWildcard(String field, String value) {
+    if (value != null) {
+      String cleanedField = HttpdUtils.getFieldNameFromMap(field);
+      if (flattenWildcards) {
+        String drillFieldName = HttpdUtils.drillFormattedFieldName(field);
+        ScalarWriter writer = getColWriter(rootRowWriter, drillFieldName, MinorType.VARCHAR);
+        writer.setString(value);
+      } else {
+        final TupleWriter mapWriter = getWildcardWriter(field);
+        logger.debug("Parsed wildcard field: {}, as String: {}", field, value);
+        writeStringColumn(mapWriter, cleanedField, value);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. When the parser processes a field like:
+   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
+   * invoked. <br>
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setWildcard(String field, Long value) {
+    if (value != null) {
+      String cleanedField = HttpdUtils.getFieldNameFromMap(field);
+
+      if (flattenWildcards) {
+        String drillFieldName = HttpdUtils.drillFormattedFieldName(field);
+        ScalarWriter writer = getColWriter(rootRowWriter, drillFieldName, MinorType.BIGINT);
+        writer.setLong(value);
+      } else {
+        final TupleWriter mapWriter = getWildcardWriter(field);
+        logger.debug("Parsed wildcard field: {}, as long: {}", field, value);
+        writeLongColumn(mapWriter, cleanedField, value);
+      }
+    }
+  }
+
+  /**
+   * This method is referenced and called via reflection. When the parser processes a field like:
+   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
+   * invoked. <br>
+   *
+   * @param field name of field
+   * @param value value of field
+   */
+  @SuppressWarnings("unused")
+  public void setWildcard(String field, Double value) {
+    if (value != null) {
+      String cleanedField = HttpdUtils.getFieldNameFromMap(field);
+
+      if (flattenWildcards) {
+        String drillFieldName = HttpdUtils.drillFormattedFieldName(field);
+        ScalarWriter writer = getColWriter(rootRowWriter, drillFieldName, MinorType.FLOAT8);
+        writer.setDouble(value);
+      } else {
+        final TupleWriter mapWriter = getWildcardWriter(field);
+        logger.debug("Parsed wildcard field: {}, as double: {}", field, value);
+        writeFloatColumn(mapWriter, cleanedField, value);
+      }
+    }
+  }
+
+  /**
+   * For a configuration like HTTP.URI:request.firstline.uri.query.*, a writer was created with name
+   * HTTP.URI:request.firstline.uri.query, we traverse the list of wildcard writers to see which one is the root of the
+   * name of the field passed in like HTTP.URI:request.firstline.uri.query.old. This is writer entry that is needed.
+   *
+   * @param field like HTTP.URI:request.firstline.uri.query.old where 'old' is one of many different parameter names.
+   * @return the writer to be used for this field.
+   */
+  private TupleWriter getWildcardWriter(String field) {
+
+    TupleWriter writer = startedWildcards.get(field);
+    if (writer == null) {
+      for (Map.Entry<String, TupleWriter> entry : wildcards.entrySet()) {
+        String root = entry.getKey();
+        if (field.startsWith(root)) {
+          writer = entry.getValue();
+          /*
+           * In order to save some time, store the cleaned version of the field extension. It is possible it will have
+           * unsafe characters in it.
+           */
+          if (!cleanExtensions.containsKey(field)) {
+            String extension = field.substring(root.length() + 1);
+            String cleanExtension = HttpdUtils.drillFormattedFieldName(extension);
+            cleanExtensions.put(field, cleanExtension);
+            logger.debug("Added extension: field='{}' with cleanExtension='{}'", field, cleanExtension);
+          }
+
+          /*
+           * We already know we have the writer, but if we have put this writer in the started list, do NOT call start
+           * again.
+           */
+          if (!wildcardWriters.containsKey(root)) {
+            /*
+             * Start and store this root map writer for later retrieval.
+             */
+            logger.debug("Starting new wildcard field writer: {}", field);
+            startedWildcards.put(field, writer);
+            wildcardWriters.put(root, writer);
+          }
+          /*
+           * Break out of the for loop when we find a root writer that matches the field.
+           */
+          break;
+        }
+      }
+    }
+
+    return writer;
+  }
+
+  public Map<String, ScalarWriter> getStrings() {
+    return strings;
+  }
+
+  public Map<String, ScalarWriter> getLongs() {
+    return longs;
+  }
+
+  public Map<String, ScalarWriter> getDoubles() {
+    return doubles;
+  }
+
+  public Map<String, ScalarWriter> getTimestamps() {
+    return timestamps;
+  }
+
+  /**
+   * This record will be used with a single parser. For each field that is to be parsed a setter will be called. It
+   * registers a setter method for each field being parsed. It also builds the data writers to hold the data beings
+   * parsed.
+   *
+   * @param parser The initialized HttpdParser
+   * @param rowWriter An initialized RowSetLoader object
+   * @param columnCasts The logparser casts used to get the right data from the parser
+   * @param parserFieldName The field name which is generated by the Httpd Parser.  These are not "Drill safe"
+   * @param drillFieldName The Drill safe field name
+   * @param mappedColumns A list of columns mapped to their correct Drill data type
+   * @throws NoSuchMethodException Thrown in the event that the parser does not have a correct setter method
+   */
+  public void addField(final Parser<HttpdLogRecord> parser,
+                       final RowSetLoader rowWriter,
+                       final Map<String, Casts> columnCasts,
+                       final String parserFieldName,
+                       final String drillFieldName,
+                       Map<String, MinorType> mappedColumns) throws NoSuchMethodException {
+    final boolean hasWildcard = parserFieldName.endsWith(HttpdParser.PARSER_WILDCARD);
+
+    final Casts type = columnCasts.getOrDefault(drillFieldName, Casts.STRING);
+
+    logger.debug("Field name: {}", parserFieldName);
+    rootRowWriter = rowWriter;
+    /*
+     * This is a dynamic way to map the setter for each specified field type. <br/>
+     * e.g. a TIME.EPOCH may map to a LONG while a referrer may map to a STRING
+     */
+    if (hasWildcard) {
+      final String cleanName = parserFieldName.substring(0, parserFieldName.length() - HttpdParser.PARSER_WILDCARD.length());
+      logger.debug("Adding WILDCARD parse target: {} as {}, with field name: {}", parserFieldName, cleanName, drillFieldName);
+      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, String.class), parserFieldName);
+      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Double.class), parserFieldName);
+      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Long.class), parserFieldName);
+      wildcards.put(cleanName, getMapWriter(drillFieldName, rowWriter));
+    } else if (type.equals(Casts.DOUBLE) || mappedColumns.get(drillFieldName) == MinorType.FLOAT8) {
+      parser.addParseTarget(this.getClass().getMethod("set", String.class, Double.class), parserFieldName);
+      doubles.put(parserFieldName, rowWriter.scalar(drillFieldName));
+    } else if (type.equals(Casts.LONG) || mappedColumns.get(drillFieldName) == MinorType.BIGINT) {
+        parser.addParseTarget(this.getClass().getMethod("set", String.class, Long.class), parserFieldName);
+        longs.put(parserFieldName, rowWriter.scalar(drillFieldName));
+    } else {
+      if (parserFieldName.startsWith("TIME.STAMP:")) {
+        parser.addParseTarget(this.getClass().getMethod("setTimestamp", String.class, String.class), parserFieldName);
+        timestamps.put(parserFieldName, rowWriter.scalar(drillFieldName));
+      } else if (parserFieldName.startsWith("TIME.EPOCH:")) {
+        parser.addParseTarget(this.getClass().getMethod("setTimestampFromEpoch", String.class, Long.class), parserFieldName);
+        timestamps.put(parserFieldName, rowWriter.scalar(drillFieldName));
+      } else if (parserFieldName.startsWith("TIME.DATE")) {
+        parser.addParseTarget(this.getClass().getMethod("setDate", String.class, String.class), parserFieldName);
+        dates.put(parserFieldName, rowWriter.scalar(drillFieldName));
+      } else if (parserFieldName.startsWith("TIME.TIME")) {
+        parser.addParseTarget(this.getClass().getMethod("setTime", String.class, String.class), parserFieldName);
+        times.put(parserFieldName, rowWriter.scalar(drillFieldName));
+      } else {
+        parser.addParseTarget(this.getClass().getMethod("set", String.class, String.class), parserFieldName);
+        strings.put(parserFieldName, rowWriter.scalar(drillFieldName));
+      }
+    }
+  }
+
+  private TupleWriter getMapWriter(String mapName, RowSetLoader rowWriter) {
+    int index = rowWriter.tupleSchema().index(mapName);
+    if (index == -1) {
+      index = rowWriter.addColumn(SchemaBuilder.columnSchema(mapName, TypeProtos.MinorType.MAP, TypeProtos.DataMode.REQUIRED));
+    }
+    return rowWriter.tuple(index);
+  }
+
+  /**
+   * Helper function to write a 1D long column
+   *
+   * @param rowWriter The row to which the data will be written
+   * @param name The column name
+   * @param value The value to be written
+   */
+  private void writeLongColumn(TupleWriter rowWriter, String name, long value) {
+    ScalarWriter colWriter = getColWriter(rowWriter, name, MinorType.BIGINT);
+    colWriter.setLong(value);
+  }
+
+  /**
+   * Helper function to write a 1D String column
+   *
+   * @param rowWriter The row to which the data will be written
+   * @param name The column name
+   * @param value The value to be written
+   */
+  private void writeStringColumn(TupleWriter rowWriter, String name, String value) {
+    ScalarWriter colWriter = getColWriter(rowWriter, name, MinorType.VARCHAR);
+    colWriter.setString(value);
+  }
+
+  /**
+   * Helper function to write a 1D String column
+   *
+   * @param rowWriter The row to which the data will be written
+   * @param name The column name
+   * @param value The value to be written
+   */
+  private void writeFloatColumn(TupleWriter rowWriter, String name, double value) {
+    ScalarWriter colWriter = getColWriter(rowWriter, name, MinorType.FLOAT8);
+    colWriter.setDouble(value);
+  }
+
+  private ScalarWriter getColWriter(TupleWriter tupleWriter, String fieldName, TypeProtos.MinorType type) {
+    int index = tupleWriter.tupleSchema().index(fieldName);
+    if (index == -1) {
+      ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, type, TypeProtos.DataMode.OPTIONAL);
+      index = tupleWriter.addColumn(colSchema);
+    }
+    return tupleWriter.scalar(index);
+  }
+}
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
new file mode 100644
index 0000000..c31c5ad
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.httpd;
+
+import nl.basjes.parse.useragent.analyze.InvalidParserConfigurationException;
+import nl.basjes.parse.useragent.dissector.UserAgentDissector;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+import nl.basjes.parse.core.Casts;
+import nl.basjes.parse.core.Parser;
+import nl.basjes.parse.core.exceptions.DissectionFailure;
+import nl.basjes.parse.core.exceptions.InvalidDissectorException;
+import nl.basjes.parse.core.exceptions.MissingDissectorsException;
+import nl.basjes.parse.httpdlog.HttpdLoglineParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static nl.basjes.parse.core.Casts.DOUBLE;
+import static nl.basjes.parse.core.Casts.DOUBLE_ONLY;
+import static nl.basjes.parse.core.Casts.LONG;
+import static nl.basjes.parse.core.Casts.LONG_ONLY;
+import static nl.basjes.parse.core.Casts.STRING;
+import static nl.basjes.parse.core.Casts.STRING_ONLY;
+
+public class HttpdParser {
+
+  private static final Logger logger = LoggerFactory.getLogger(HttpdParser.class);
+
+  public static final String PARSER_WILDCARD = ".*";
+  private final Parser<HttpdLogRecord> parser;
+  private final List<SchemaPath> requestedColumns;
+  private final Map<String, MinorType> mappedColumns;
+  private final Map<String, Casts> columnCasts;
+  private final HttpdLogRecord record;
+  private final String logFormat;
+  private final boolean parseUserAgent;
+  private final String logParserRemapping;
+  private Map<String, String> requestedPaths;
+
+  public HttpdParser(
+          final String logFormat,
+          final String timestampFormat,
+          final boolean flattenWildcards,
+          final boolean parseUserAgent,
+          final String logParserRemapping,
+          final EasySubScan scan) {
+
+    Preconditions.checkArgument(logFormat != null && !logFormat.trim().isEmpty(), "logFormat cannot be null or empty");
+
+    this.logFormat = logFormat;
+    this.parseUserAgent = parseUserAgent;
+    this.record = new HttpdLogRecord(timestampFormat, flattenWildcards);
+
+    this.logParserRemapping = logParserRemapping;
+
+    this.parser = new HttpdLoglineParser<>(HttpdLogRecord.class, this.logFormat, timestampFormat);
+    applyRemapping(parser);
+    /*
+     * The log parser has the possibility of parsing the user agent and extracting additional fields
+     * Unfortunately, doing so negatively affects the startup speed of the parser, even if it is not used.
+     * So is is only enabled if there is a need for it in the requested columns.
+     */
+    if (parseUserAgent) {
+      parser.addDissector(new UserAgentDissector());
+    }
+
+
+    this.requestedColumns = scan.getColumns();
+
+    if (timestampFormat != null && !timestampFormat.trim().isEmpty()) {
+      logger.info("Custom timestamp format has been specified. This is an informational note only as custom timestamps is rather unusual.");
+    }
+    if (logFormat.contains("\n")) {
+      logger.info("Specified logformat is a multiline log format: {}", logFormat);
+    }
+
+    mappedColumns = new TreeMap<>();
+    columnCasts = new TreeMap<>();
+  }
+
+  private void applyRemapping(Parser<?> parser) {
+    if (logParserRemapping == null || logParserRemapping.isEmpty()) {
+      return;
+    }
+
+    for (String rawEntry: logParserRemapping.split(";")) {
+      String entry = rawEntry.replaceAll("\n","").replaceAll(" ","").trim();
+      if (entry.isEmpty()) {
+        continue;
+      }
+
+      String[] parts = entry.split(":");
+      String field = parts[0];
+      String newType = parts[1];
+      String castString = parts.length == 3 ? parts[2] : "STRING";
+
+      switch (castString) {
+        case "STRING":
+          parser.addTypeRemapping(field, newType, STRING_ONLY);
+          break;
+        case "LONG":
+          parser.addTypeRemapping(field, newType, LONG_ONLY);
+          break;
+        case "DOUBLE":
+          parser.addTypeRemapping(field, newType, DOUBLE_ONLY);
+          break;
+        default:
+          throw new InvalidParserConfigurationException("Invalid type remapping cast was specified");
+      }
+    }
+  }
+
+  /**
+   * We do not expose the underlying parser or the record which is used to manage the writers.
+   *
+   * @param line log line to tear apart.
+   * @throws DissectionFailure if there is a generic dissector failure
+   * @throws InvalidDissectorException if the dissector is not valid
+   * @throws MissingDissectorsException if the dissector is missing
+   */
+  public void parse(final String line) throws DissectionFailure, InvalidDissectorException, MissingDissectorsException {
+    parser.parse(record, line);
+    record.finishRecord();
+  }
+
+  public TupleMetadata setupParser()
+          throws NoSuchMethodException, MissingDissectorsException, InvalidDissectorException {
+
+    SchemaBuilder builder = new SchemaBuilder();
+
+    /*
+     * If the user has selected fields, then we will use them to configure the parser because this would be the most
+     * efficient way to parse the log.
+     */
+    List<String> allParserPaths = parser.getPossiblePaths();
+    allParserPaths.sort(String::compareTo);
+
+    /*
+     * Use all possible paths that the parser has determined from the specified log format.
+     */
+
+    // Create a mapping table to each allParserPaths field from their corresponding Drill column name.
+    requestedPaths = new TreeMap<>(); // Treemap to have a stable ordering!
+    for (final String parserPath : allParserPaths) {
+      requestedPaths.put(HttpdUtils.drillFormattedFieldName(parserPath), parserPath);
+    }
+
+    /*
+     * By adding the parse target to the dummy instance we activate it for use. Which we can then use to find out which
+     * paths cast to which native data types. After we are done figuring this information out, we throw this away
+     * because this will be the slowest parsing path possible for the specified format.
+     */
+    Parser<Object> dummy = new HttpdLoglineParser<>(Object.class, logFormat);
+    applyRemapping(dummy);
+
+    if (parseUserAgent) {
+      dummy.addDissector(new UserAgentDissector());
+    }
+
+    dummy.addParseTarget(String.class.getMethod("indexOf", String.class), allParserPaths);
+
+    /*
+    If the column is not requested explicitly, remove it from the requested path list.
+     */
+    if (!isStarQuery() &&
+        !isMetadataQuery() &&
+        !isOnlyImplicitColumns()) {
+      List<String> keysToRemove = new ArrayList<>();
+      for (final String key : requestedPaths.keySet()) {
+        if (!isRequested(key)) {
+          keysToRemove.add(key);
+        }
+      }
+      keysToRemove.forEach( key -> requestedPaths.remove(key));
+    }
+
+    EnumSet<Casts> allCasts;
+    for (final Map.Entry<String, String> entry : requestedPaths.entrySet()) {
+      allCasts = dummy.getCasts(entry.getValue());
+
+      // Select the cast we want to receive from the parser
+      Casts dataType = STRING;
+      if (allCasts.contains(DOUBLE)) {
+        dataType = DOUBLE;
+      } else if (allCasts.contains(LONG)) {
+        dataType = LONG;
+      }
+
+      columnCasts.put(entry.getKey(), dataType);
+
+      switch (dataType) {
+        case STRING:
+          if (entry.getValue().startsWith("TIME.STAMP:")) {
+            builder.addNullable(entry.getKey(), MinorType.TIMESTAMP);
+            mappedColumns.put(entry.getKey(), MinorType.TIMESTAMP);
+          } else if (entry.getValue().startsWith("TIME.DATE:")) {
+            builder.addNullable(entry.getKey(), MinorType.DATE);
+            mappedColumns.put(entry.getKey(), MinorType.DATE);
+          } else if (entry.getValue().startsWith("TIME.TIME:")) {
+            builder.addNullable(entry.getKey(), MinorType.TIME);
+            mappedColumns.put(entry.getKey(), MinorType.TIME);
+          } else if (HttpdUtils.isWildcard(entry.getValue())) {
+            builder.addMap(entry.getValue());
+            mappedColumns.put(entry.getKey(), MinorType.MAP);
+          }
+          else {
+            builder.addNullable(entry.getKey(), TypeProtos.MinorType.VARCHAR);
+            mappedColumns.put(entry.getKey(), MinorType.VARCHAR);
+          }
+          break;
+        case LONG:
+          if (entry.getValue().startsWith("TIME.EPOCH:")) {
+            builder.addNullable(entry.getKey(), MinorType.TIMESTAMP);
+            mappedColumns.put(entry.getKey(), MinorType.TIMESTAMP);
+          } else {
+            builder.addNullable(entry.getKey(), TypeProtos.MinorType.BIGINT);
+            mappedColumns.put(entry.getKey(), MinorType.BIGINT);
+          }
+          break;
+        case DOUBLE:
+          builder.addNullable(entry.getKey(), TypeProtos.MinorType.FLOAT8);
+          mappedColumns.put(entry.getKey(), MinorType.FLOAT8);
+          break;
+        default:
+          logger.error("HTTPD Unsupported data type {} for field {}", dataType.toString(), entry.getKey());
+          break;
+      }
+    }
+    return builder.build();
+  }
+
+  public void addFieldsToParser(RowSetLoader rowWriter) {
+    for (final Map.Entry<String, String> entry : requestedPaths.entrySet()) {
+      try {
+        record.addField(parser, rowWriter, columnCasts, entry.getValue(), entry.getKey(), mappedColumns);
+      } catch (NoSuchMethodException e) {
+        logger.error("Error adding fields to parser.");
+      }
+    }
+    logger.debug("Added Fields to Parser");
+  }
+
+  public boolean isStarQuery() {
+    return requestedColumns.size() == 1 && requestedColumns.get(0).isDynamicStar();
+  }
+
+  public boolean isMetadataQuery() {
+    return requestedColumns.size() == 0;
+  }
+
+  public boolean isRequested(String colName) {
+    for (SchemaPath path : requestedColumns) {
+      if (path.isDynamicStar()) {
+        return true;
+      } else if (path.nameEquals(colName)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /*
+  This is for the edge case where a query only contains the implicit fields.
+   */
+  public boolean isOnlyImplicitColumns() {
+
+    // If there are more than two columns, this isn't an issue.
+    if (requestedColumns.size() > 2) {
+      return false;
+    }
+
+    if (requestedColumns.size() == 1) {
+      return requestedColumns.get(0).nameEquals(HttpdLogBatchReader.RAW_LINE_COL_NAME) ||
+        requestedColumns.get(0).nameEquals(HttpdLogBatchReader.MATCHED_COL_NAME);
+    } else {
+      return (requestedColumns.get(0).nameEquals(HttpdLogBatchReader.RAW_LINE_COL_NAME) ||
+        requestedColumns.get(0).nameEquals(HttpdLogBatchReader.MATCHED_COL_NAME)) &&
+        (requestedColumns.get(1).nameEquals(HttpdLogBatchReader.RAW_LINE_COL_NAME) ||
+        requestedColumns.get(1).nameEquals(HttpdLogBatchReader.MATCHED_COL_NAME));
+    }
+  }
+}
diff --git a/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdUtils.java b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdUtils.java
new file mode 100644
index 0000000..bb8d28e
--- /dev/null
+++ b/contrib/format-httpd/src/main/java/org/apache/drill/exec/store/httpd/HttpdUtils.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.httpd;
+
+public class HttpdUtils {
+
+  public static final String PARSER_WILDCARD = ".*";
+  public static final String SAFE_WILDCARD = "_$";
+  public static final String SAFE_SEPARATOR = "_";
+
+  /**
+   * Drill cannot deal with fields with dots in them like request.referer. For the sake of simplicity we are going
+   * ensure the field name is cleansed. The resultant output field will look like: request_referer.<br>
+   * Additionally, wild cards will get replaced with _$
+   *
+   * @param parserFieldName name to be cleansed.
+   * @return The field name formatted for Drill
+   */
+  public static String drillFormattedFieldName(String parserFieldName) {
+    if (parserFieldName.contains(":")) {
+      String[] fieldPart = parserFieldName.split(":");
+      return fieldPart[1].replaceAll("_", "__").replace(PARSER_WILDCARD, SAFE_WILDCARD).replaceAll("\\.", SAFE_SEPARATOR);
+    } else {
+      return parserFieldName.replaceAll("_", "__").replace(PARSER_WILDCARD, SAFE_WILDCARD).replaceAll("\\.", SAFE_SEPARATOR);
+    }
+  }
+
+  /**
+   * Returns true if the field is a wildcard AKA map field, false if not.
+   * @param fieldName The target field name
+   * @return True if the field is a wildcard, false if not
+   */
+  public static boolean isWildcard(String fieldName) {
+    return fieldName.endsWith(PARSER_WILDCARD);
+  }
+
+  /**
+   * The HTTPD parser formats fields using the format HTTP.URI:request.firstline.uri.query.
+   * For maps, we only want the last part of this, so this function returns the last bit of the
+   * field name.
+   * @param mapField The unformatted field name
+   * @return The last part of the field name
+   */
+  public static String getFieldNameFromMap(String mapField) {
+    return mapField.substring(mapField.lastIndexOf('.') + 1);
+  }
+
+}
diff --git a/contrib/format-httpd/src/main/resources/bootstrap-format-plugins.json b/contrib/format-httpd/src/main/resources/bootstrap-format-plugins.json
new file mode 100644
index 0000000..654c228
--- /dev/null
+++ b/contrib/format-httpd/src/main/resources/bootstrap-format-plugins.json
@@ -0,0 +1,37 @@
+{
+  "storage":{
+    "dfs": {
+      "type": "file",
+      "formats": {
+        "httpd" : {
+          "type" : "httpd",
+          "logFormat" : "common\ncombined",
+          "maxErrors": 0,
+          "flattenWildcards": false
+        }
+      }
+    },
+    "cp": {
+      "type": "file",
+      "formats": {
+        "httpd" : {
+          "type" : "httpd",
+          "logFormat" : "common\ncombined",
+          "maxErrors": 0,
+          "flattenWildcards": false
+        }
+      }
+    },
+    "s3": {
+      "type": "file",
+      "formats": {
+        "httpd" : {
+          "type" : "httpd",
+          "logFormat" : "common\ncombined",
+          "maxErrors": 0,
+          "flattenWildcards": false
+        }
+      }
+    }
+  }
+}
diff --git a/contrib/format-httpd/src/main/resources/drill-module.conf b/contrib/format-httpd/src/main/resources/drill-module.conf
new file mode 100644
index 0000000..6236c50
--- /dev/null
+++ b/contrib/format-httpd/src/main/resources/drill-module.conf
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#  This file tells Drill to consider this module when class path scanning.
+#  This file can also include any supplementary configuration information.
+#  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+
+drill.classpath.scanning.packages += "org.apache.drill.exec.store.httpd"
diff --git a/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java b/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java
new file mode 100644
index 0000000..f240a82
--- /dev/null
+++ b/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.httpd;
+
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.apache.drill.test.rowSet.RowSetUtilities;
+import org.joda.time.LocalDate;
+import org.joda.time.LocalTime;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import java.nio.file.Paths;
+import java.util.stream.Collectors;
+
+import static org.apache.drill.test.QueryTestUtil.generateCompressedFile;
+import static org.junit.Assert.assertEquals;
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapArray;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+
+@Category(RowSetTests.class)
+public class TestHTTPDLogReader extends ClusterTest {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    // Needed for compressed file unit test
+    dirTestWatcher.copyResourceToRoot(Paths.get("httpd/"));
+  }
+
+  @Test
+  public void testDateField() throws RpcException {
+    String sql = "SELECT `request_receive_time` FROM cp.`httpd/hackers-access-small.httpd` LIMIT 5";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_receive_time", MinorType.TIMESTAMP)
+      .build();
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(1445742685000L)
+      .addRow(1445742686000L)
+      .addRow(1445742687000L)
+      .addRow(1445743471000L)
+      .addRow(1445743472000L)
+      .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+  @Test
+  public void testDateEpochField() throws RpcException {
+    String sql = "SELECT `request_receive_time`, `request_receive_time_epoch` FROM cp.`httpd/hackers-access-small.httpd` LIMIT 5";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_receive_time", MinorType.TIMESTAMP)
+      .addNullable("request_receive_time_epoch", MinorType.TIMESTAMP)
+      .build();
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(1445742685000L, 1445742685000L)
+      .addRow(1445742686000L, 1445742686000L)
+      .addRow(1445742687000L, 1445742687000L )
+      .addRow(1445743471000L, 1445743471000L)
+      .addRow(1445743472000L, 1445743472000L)
+      .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+  @Test
+  public void testCount() throws Exception {
+    String sql = "SELECT COUNT(*) FROM cp.`httpd/hackers-access-small.httpd`";
+    long result = client.queryBuilder().sql(sql).singletonLong();
+    assertEquals(10L, result);
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    String sql = "SELECT COUNT(*) AS cnt FROM cp.`httpd/hackers-access-small.httpd`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+    assertEquals("Counts should match",10L, cnt);
+  }
+
+  @Test
+  public void testFlattenMap() throws Exception {
+    String sql = "SELECT request_firstline_original_uri_query_came__from " +
+      "FROM  table(cp.`httpd/hackers-access-small.httpd` (type => 'httpd', logFormat => '%h %l %u %t \\\"%r\\\" %s %b \\\"%{Referer}i\\\" " +
+      "\\\"%{User-agent}i\\\"', " +
+      "flattenWildcards => true)) WHERE `request_firstline_original_uri_query_came__from` IS NOT NULL";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_firstline_original_uri_query_came__from", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("http://howto.basjes.nl/join_form")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    String sql = "SELECT * FROM cp.`httpd/hackers-access-small.httpd` LIMIT 5";
+
+    queryBuilder()
+      .sql(sql)
+      .planMatcher()
+      .include("Limit", "maxRecords=5")
+      .match();
+  }
+
+  @Test
+  public void testMapField() throws Exception {
+    String sql = "SELECT data.`request_firstline_original_uri_query_$`.aqb AS aqb, data.`request_firstline_original_uri_query_$`.t AS data_time " +
+      "FROM cp.`httpd/example1.httpd` AS data";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("aqb", MinorType.VARCHAR)
+      .addNullable("data_time", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("1", "19/5/2012 23:51:27 2 -120")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testSingleExplicitColumn() throws Exception {
+    String sql = "SELECT request_referer FROM cp.`httpd/hackers-access-small.httpd`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_referer", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("http://howto.basjes.nl/")
+      .addRow("http://howto.basjes.nl/")
+      .addRow("http://howto.basjes.nl/join_form")
+      .addRow("http://howto.basjes.nl/")
+      .addRow("http://howto.basjes.nl/join_form")
+      .addRow("http://howto.basjes.nl/join_form")
+      .addRow("http://howto.basjes.nl/")
+      .addRow("http://howto.basjes.nl/login_form")
+      .addRow("http://howto.basjes.nl/")
+      .addRow("http://howto.basjes.nl/")
+      .build();
+
+    assertEquals(results.rowCount(), 10);
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+
+  @Test
+  public void testImplicitColumn() throws Exception {
+    String sql = "SELECT _raw FROM cp.`httpd/hackers-access-small.httpd`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("_raw", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("195.154.46.135 - - [25/Oct/2015:04:11:25 +0100] \"GET /linux/doing-pxe-without-dhcp-control HTTP/1.1\" 200 24323 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0\"")
+      .addRow("23.95.237.180 - - [25/Oct/2015:04:11:26 +0100] \"GET /join_form HTTP/1.0\" 200 11114 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0\"")
+      .addRow("23.95.237.180 - - [25/Oct/2015:04:11:27 +0100] \"POST /join_form HTTP/1.1\" 302 9093 \"http://howto.basjes.nl/join_form\" \"Mozilla/5.0 (Windows NT 5.1; rv:35.0) " +
+        "Gecko/20100101 Firefox/35.0\"")
+      .addRow("158.222.5.157 - - [25/Oct/2015:04:24:31 +0100] \"GET /join_form HTTP/1.0\" 200 11114 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21\"")
+      .addRow("158.222.5.157 - - [25/Oct/2015:04:24:32 +0100] \"POST /join_form HTTP/1.1\" 302 9093 \"http://howto.basjes.nl/join_form\" \"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21\"")
+      .addRow("158.222.5.157 - - [25/Oct/2015:04:24:37 +0100] \"GET /acl_users/credentials_cookie_auth/require_login?came_from=http%3A//howto.basjes.nl/join_form HTTP/1.1\" 200 10716 \"http://howto.basjes.nl/join_form\" \"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21\"")
+      .addRow("158.222.5.157 - - [25/Oct/2015:04:24:39 +0100] \"GET /login_form HTTP/1.1\" 200 10543 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21\"")
+      .addRow("158.222.5.157 - - [25/Oct/2015:04:24:41 +0100] \"POST /login_form HTTP/1.1\" 200 16810 \"http://howto.basjes.nl/login_form\" \"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21\"")
+      .addRow("5.39.5.5 - - [25/Oct/2015:04:32:22 +0100] \"GET /join_form HTTP/1.1\" 200 11114 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0\"")
+      .addRow("180.180.64.16 - - [25/Oct/2015:04:34:37 +0100] \"GET /linux/doing-pxe-without-dhcp-control HTTP/1.1\" 200 24323 \"http://howto.basjes.nl/\" \"Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0\"")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testExplicitSomeQuery() throws Exception {
+    String sql = "SELECT request_referer_ref, request_receive_time_last_time, request_firstline_uri_protocol FROM cp.`httpd/hackers-access-small.httpd`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_referer_ref", MinorType.VARCHAR)
+      .addNullable("request_receive_time_last_time", MinorType.TIME)
+      .addNullable("request_firstline_uri_protocol", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+      .addRow(null, new LocalTime("04:11:25"), null)
+      .addRow(null, new LocalTime("04:11:26"), null)
+      .addRow(null, new LocalTime("04:11:27"), null)
+      .addRow(null, new LocalTime("04:24:31"), null)
+      .addRow(null, new LocalTime("04:24:32"), null)
+      .addRow(null, new LocalTime("04:24:37"), null)
+      .addRow(null, new LocalTime("04:24:39"), null)
+      .addRow(null, new LocalTime("04:24:41"), null)
+      .addRow(null, new LocalTime("04:32:22"), null)
+      .addRow(null, new LocalTime("04:34:37"), null)
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+
+  @Test
+  public void testExplicitSomeQueryWithCompressedFile() throws Exception {
+    generateCompressedFile("httpd/hackers-access-small.httpd", "zip", "httpd/hackers-access-small.httpd.zip" );
+
+    String sql = "SELECT request_referer_ref, request_receive_time_last_time, request_firstline_uri_protocol FROM dfs.`httpd/hackers-access-small.httpd.zip`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("request_referer_ref", MinorType.VARCHAR)
+      .addNullable("request_receive_time_last_time", MinorType.TIME)
+      .addNullable("request_firstline_uri_protocol", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+      .addRow(null, new LocalTime("04:11:25"), null)
+      .addRow(null, new LocalTime("04:11:26"), null)
+      .addRow(null, new LocalTime("04:11:27"), null)
+      .addRow(null, new LocalTime("04:24:31"), null)
+      .addRow(null, new LocalTime("04:24:32"), null)
+      .addRow(null, new LocalTime("04:24:37"), null)
+      .addRow(null, new LocalTime("04:24:39"), null)
+      .addRow(null, new LocalTime("04:24:41"), null)
+      .addRow(null, new LocalTime("04:32:22"), null)
+      .addRow(null, new LocalTime("04:34:37"), null)
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  private TupleMetadata expectedAllFieldsSchema() {
+    return new SchemaBuilder()
+            .addNullable("connection_client_host", MinorType.VARCHAR)
+            .addNullable("connection_client_host_last", MinorType.VARCHAR)
+            .addNullable("connection_client_logname", MinorType.BIGINT)
+            .addNullable("connection_client_logname_last", MinorType.BIGINT)
+            .addNullable("connection_client_user", MinorType.VARCHAR)
+            .addNullable("connection_client_user_last", MinorType.VARCHAR)
+            .addNullable("request_firstline", MinorType.VARCHAR)
+            .addNullable("request_firstline_method", MinorType.VARCHAR)
+            .addNullable("request_firstline_original", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_method", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_protocol", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_protocol_version", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_host", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_path", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_port", MinorType.BIGINT)
+            .addNullable("request_firstline_original_uri_protocol", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_query", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_ref", MinorType.VARCHAR)
+            .addNullable("request_firstline_original_uri_userinfo", MinorType.VARCHAR)
+            .addNullable("request_firstline_protocol", MinorType.VARCHAR)
+            .addNullable("request_firstline_protocol_version", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_host", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_path", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_port", MinorType.BIGINT)
+            .addNullable("request_firstline_uri_protocol", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_query", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_ref", MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_userinfo", MinorType.VARCHAR)
+            .addNullable("request_receive_time", MinorType.TIMESTAMP)
+            .addNullable("request_receive_time_date", MinorType.DATE)
+            .addNullable("request_receive_time_date__utc", MinorType.DATE)
+            .addNullable("request_receive_time_day", MinorType.BIGINT)
+            .addNullable("request_receive_time_day__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_epoch", MinorType.TIMESTAMP)
+            .addNullable("request_receive_time_hour", MinorType.BIGINT)
+            .addNullable("request_receive_time_hour__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last", MinorType.TIMESTAMP)
+            .addNullable("request_receive_time_last_date", MinorType.DATE)
+            .addNullable("request_receive_time_last_date__utc", MinorType.DATE)
+            .addNullable("request_receive_time_last_day", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_day__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_epoch", MinorType.TIMESTAMP)
+            .addNullable("request_receive_time_last_hour", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_hour__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_microsecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_microsecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_millisecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_millisecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_minute", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_minute__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_month", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_month__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_monthname", MinorType.VARCHAR)
+            .addNullable("request_receive_time_last_monthname__utc", MinorType.VARCHAR)
+            .addNullable("request_receive_time_last_nanosecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_nanosecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_second", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_second__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_time", MinorType.TIME)
+            .addNullable("request_receive_time_last_time__utc", MinorType.TIME)
+            .addNullable("request_receive_time_last_timezone", MinorType.VARCHAR)
+            .addNullable("request_receive_time_last_weekofweekyear", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_weekofweekyear__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_weekyear", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_weekyear__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_year", MinorType.BIGINT)
+            .addNullable("request_receive_time_last_year__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_microsecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_microsecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_millisecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_millisecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_minute", MinorType.BIGINT)
+            .addNullable("request_receive_time_minute__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_month", MinorType.BIGINT)
+            .addNullable("request_receive_time_month__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_monthname", MinorType.VARCHAR)
+            .addNullable("request_receive_time_monthname__utc", MinorType.VARCHAR)
+            .addNullable("request_receive_time_nanosecond", MinorType.BIGINT)
+            .addNullable("request_receive_time_nanosecond__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_second", MinorType.BIGINT)
+            .addNullable("request_receive_time_second__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_time", MinorType.TIME)
+            .addNullable("request_receive_time_time__utc", MinorType.TIME)
+            .addNullable("request_receive_time_timezone", MinorType.VARCHAR)
+            .addNullable("request_receive_time_weekofweekyear", MinorType.BIGINT)
+            .addNullable("request_receive_time_weekofweekyear__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_weekyear", MinorType.BIGINT)
+            .addNullable("request_receive_time_weekyear__utc", MinorType.BIGINT)
+            .addNullable("request_receive_time_year", MinorType.BIGINT)
+            .addNullable("request_receive_time_year__utc", MinorType.BIGINT)
+            .addNullable("request_referer", MinorType.VARCHAR)
+            .addNullable("request_referer_host", MinorType.VARCHAR)
+            .addNullable("request_referer_last", MinorType.VARCHAR)
+            .addNullable("request_referer_last_host", MinorType.VARCHAR)
+            .addNullable("request_referer_last_path", MinorType.VARCHAR)
+            .addNullable("request_referer_last_port", MinorType.BIGINT)
+            .addNullable("request_referer_last_protocol", MinorType.VARCHAR)
+            .addNullable("request_referer_last_query", MinorType.VARCHAR)
+            .addNullable("request_referer_last_ref", MinorType.VARCHAR)
+            .addNullable("request_referer_last_userinfo", MinorType.VARCHAR)
+            .addNullable("request_referer_path", MinorType.VARCHAR)
+            .addNullable("request_referer_port", MinorType.BIGINT)
+            .addNullable("request_referer_protocol", MinorType.VARCHAR)
+            .addNullable("request_referer_query", MinorType.VARCHAR)
+            .addNullable("request_referer_ref", MinorType.VARCHAR)
+            .addNullable("request_referer_userinfo", MinorType.VARCHAR)
+            .addNullable("request_status_last", MinorType.VARCHAR)
+            .addNullable("request_user-agent", MinorType.VARCHAR)
+            .addNullable("request_user-agent_last", MinorType.VARCHAR)
+            .addNullable("response_body_bytes", MinorType.BIGINT)
+            .addNullable("response_body_bytes_last", MinorType.BIGINT)
+            .addNullable("response_body_bytesclf", MinorType.BIGINT)
+            .add("request_firstline_original_uri_query_$", MinorType.MAP)
+            .add("request_firstline_uri_query_$", MinorType.MAP)
+            .add("request_referer_last_query_$", MinorType.MAP)
+            .add("request_referer_query_$", MinorType.MAP)
+            .build();
+  }
+
+  private RowSet expectedAllFieldsRowSet(TupleMetadata expectedSchema) {
+    return client
+            .rowSetBuilder(expectedSchema)
+            .addRow("195.154.46.135", "195.154.46.135", null, null, null, null,
+                    "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1", "GET",
+                    "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1", "GET",
+                    "HTTP/1.1", "1.1", "/linux/doing-pxe-without-dhcp-control", null, "/linux/doing-pxe-without-dhcp-control", null, null, null, null, null,
+                    "HTTP/1.1", "1.1", "/linux/doing-pxe-without-dhcp-control", null, "/linux/doing-pxe-without-dhcp-control", null, null, null, null, null,
+                    1445742685000L, new LocalDate("2015-10-25"), new LocalDate("2015-10-25"), 25, 25, 1445742685000L, 4, 3,
+                    1445742685000L, new LocalDate("2015-10-25"), new LocalDate("2015-10-25"), 25, 25, 1445742685000L, 4, 3,
+                    0, 0, 0, 0, 11, 11, 10, 10, "October", "October", 0, 0, 25, 25, new LocalTime("04:11:25"), new LocalTime("03:11:25"), "+01:00", 43, 43, 2015, 2015, 2015, 2015,
+                    0, 0, 0, 0, 11, 11, 10, 10, "October", "October", 0, 0, 25, 25, new LocalTime("04:11:25"), new LocalTime("03:11:25"), "+01:00", 43, 43, 2015, 2015, 2015, 2015,
+                    "http://howto.basjes.nl/", "howto.basjes.nl",
+                    "http://howto.basjes.nl/", "howto.basjes.nl",
+                    "/", null, "http", null, null, null,
+                    "/", null, "http", null, null, null,
+                    "200",
+                    "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0",
+                    "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0",
+                    24323, 24323, 24323, mapArray(), mapArray(), mapArray(), mapArray())
+            .build();
+  }
+
+  @Test
+  public void testStarRowSet() throws Exception {
+    String sql = "SELECT * FROM cp.`httpd/hackers-access-really-small.httpd`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = expectedAllFieldsSchema();
+
+    RowSet expectedRowSet = expectedAllFieldsRowSet(expectedSchema);
+    new RowSetComparison(expectedRowSet).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testExplicitAllFields() throws Exception {
+    TupleMetadata expectedSchema = expectedAllFieldsSchema();
+
+    // To avoid typos we generate the SQL from the schema.
+    String sql = "SELECT `" +
+            expectedSchema
+                    .toFieldList()
+                    .stream()
+                    .map(MaterializedField::getName)
+                    .collect(Collectors.joining("`, `")) +
+            "` FROM cp.`httpd/hackers-access-really-small.httpd`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    RowSet expectedRowSet = expectedAllFieldsRowSet(expectedSchema);
+    new RowSetComparison(expectedRowSet).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testInvalidFormat() throws Exception {
+    String sql = "SELECT * FROM cp.`httpd/dfs-bootstrap.httpd`";
+    try {
+      run(sql);
+      fail();
+    } catch (DrillRuntimeException e) {
+      assertTrue(e.getMessage().contains("Error reading HTTPD file "));
+    }
+  }
+}
diff --git a/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReaderUserAgent.java b/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReaderUserAgent.java
new file mode 100644
index 0000000..8b23efb
--- /dev/null
+++ b/contrib/format-httpd/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReaderUserAgent.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.httpd;
+
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.rpc.RpcException;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.rowSet.RowSetUtilities;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+@Category(RowSetTests.class)
+public class TestHTTPDLogReaderUserAgent extends ClusterTest {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    // Needed for compressed file unit test
+    dirTestWatcher.copyResourceToRoot(Paths.get("httpd/"));
+
+    defineHttpdPlugin();
+  }
+
+  private static void defineHttpdPlugin() {
+    Map<String, FormatPluginConfig> formats = new HashMap<>();
+    formats.put("multiformat", new HttpdLogFormatConfig(
+            Collections.singletonList("access_log"),
+            "combined" + '\n' +
+            "common" + '\n' +
+            "%h %l %u %t \"%r\" %s %b \"%{Referer}i\"" + '\n' +
+            "%h %l %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-agent}i\"" + '\n' +
+            "%%%h %a %A %l %u %t \"%r\" %>s %b %p \"%q\" \"%!200,304,302{Referer}i\" %D " +
+            "\"%200{User-agent}i\" \"%{Cookie}i\" \"%{Set-Cookie}o\" \"%{If-None-Match}i\" \"%{Etag}o\"" + '\n',
+            null,
+            0,
+            true,
+            true,
+            null));
+
+    // Define a temporary plugin for the "cp" storage plugin.
+    cluster.defineFormats("cp", formats);
+  }
+
+  @Test
+  public void testMultiFormatUserAgent() throws RpcException {
+    String sql =
+            "SELECT                                                       " +
+            "          `request_receive_time_epoch`,                      " +
+            "          `request_user-agent`,                              " +
+            "          `request_user-agent_device__name`,                 " +
+            "          `request_user-agent_agent__name__version__major`   " +
+            "FROM   cp.`httpd/multiformat.access_log`                     ";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+            .addNullable("request_receive_time_epoch",                      MinorType.TIMESTAMP)
+            .addNullable("request_user-agent",                              MinorType.VARCHAR)
+            .addNullable("request_user-agent_device__name",                 MinorType.VARCHAR)
+            .addNullable("request_user-agent_agent__name__version__major",  MinorType.VARCHAR)
+            .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+            .addRow(1_356_994_180_000L, "Mozilla/5.0 (X11; Linux i686 on x86_64; rv:11.0) Gecko/20100101 Firefox/11.0", "Linux Desktop", "Firefox 11")
+            .addRow(1_356_994_181_000L, "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Apple Macintosh", "Chrome 66")
+            .addRow(1_388_530_181_000L, null, null, null) // This line in the input does not have the useragent field at all.
+            .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+  @Test
+  public void testUserAgentEnabled() throws Exception {
+    String sql =
+            "SELECT                                                               " +
+                    "          `request_receive_time_epoch`,                      " +
+                    "          `request_user-agent`,                              " +
+                    "          `request_user-agent_device__name`,                 " +
+                    "          `request_user-agent_agent__name__version__major`   " +
+                    "FROM       table(                                            " +
+                    "             cp.`httpd/typeremap.log`                        " +
+                    "                 (                                           " +
+                    "                   type => 'httpd',                          " +
+                    "                   logFormat => 'common\ncombined\n%h %l %u %t \"%r\" %>s %b %{RequestId}o\n',\n" +
+                    "                   flattenWildcards => true,                 " +
+                    "                   parseUserAgent => true                    " +
+                    "                 )                                           " +
+                    "           )                                                 ";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+            .addNullable("request_receive_time_epoch",                      MinorType.TIMESTAMP)
+            .addNullable("request_user-agent",                              MinorType.VARCHAR)
+            .addNullable("request_user-agent_device__name",                 MinorType.VARCHAR)
+            .addNullable("request_user-agent_agent__name__version__major",  MinorType.VARCHAR)
+            .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+            .addRow(1_388_530_181_000L,
+                    "Mozilla/5.0 (compatible; Googlebot/2.1; Yauaa Bot/42.123; +https://yauaa.basjes.nl)", "Basjes Googlebot Imitator", "Yauaa Bot 42")
+            .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+  @Test
+  public void testUserAgentDisabled() throws Exception {
+    String sql =
+            "SELECT                                                               " +
+            "          `request_receive_time_epoch`,                      " +
+            "          `request_user-agent`,                              " +
+            "          `request_user-agent_device__name`,                 " +
+            "          `request_user-agent_agent__name__version__major`   " +
+            "FROM       table(                                            " +
+            "             cp.`httpd/typeremap.log`                        " +
+            "                 (                                           " +
+            "                   type => 'httpd',                          " +
+            "                   logFormat => 'common\ncombined\n%h %l %u %t \"%r\" %>s %b %{RequestId}o\n',\n" +
+            "                   flattenWildcards => true                  " +
+            "                 )                                           " +
+            "           )                                                 " +
+            "LIMIT 1                                                      ";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+            .addNullable("request_receive_time_epoch",                      MinorType.TIMESTAMP)
+            .addNullable("request_user-agent",                              MinorType.VARCHAR)
+            .addNullable("request_user-agent_device__name",                 MinorType.VARCHAR)
+            .addNullable("request_user-agent_agent__name__version__major",  MinorType.VARCHAR)
+            .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+            .addRow(1_388_530_181_000L,
+                    "Mozilla/5.0 (compatible; Googlebot/2.1; Yauaa Bot/42.123; +https://yauaa.basjes.nl)", null, null)
+            .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+
+  @Test
+  public void testUserAgentAndTypeRemapping() throws Exception {
+    String sql =
+            "SELECT                                                                           \n" +
+            "          `request_receive_time_epoch`                                           \n" +
+            "        , `request_user-agent`                                                   \n" +
+            "        , `request_user-agent_device__name`                                      \n" +
+            "        , `request_user-agent_agent__name__version__major`                       \n" +
+            "        , `request_firstline_uri_query_timestamp`                                \n" +
+            "        , `request_firstline_uri_query_ua`                                       \n" +
+            "        , `request_firstline_uri_query_ua_device__name`                          \n" +
+            "        , `request_firstline_uri_query_ua_agent__name__version__major`           \n" +
+            "        , `response_header_requestid_epoch`                                      \n" +
+//            "        , *                                                                     \n"+
+            "FROM       table(                                                                \n" +
+            "             cp.`httpd/typeremap.log`                                            \n" +
+            "                 (                                                               \n" +
+            "                   type => 'httpd',                                              \n" +
+            //                  LogFormat: Mind the leading and trailing spaces! Empty lines are ignored
+            "                   logFormat => 'common\ncombined\n%h %l %u %t \"%r\" %>s %b %{RequestId}o\n',\n" +
+            "                   flattenWildcards => true,                                     \n" +
+            "                   parseUserAgent => true,                                       \n" +
+            "                   logParserRemapping => '                                       \n" +
+            "                       request.firstline.uri.query.ua        :HTTP.USERAGENT ;   \n" +
+            "                       response.header.requestid             :MOD_UNIQUE_ID  ;   \n" +
+            "                       request.firstline.uri.query.timestamp :TIME.EPOCH : LONG  \n" +
+            "                   '                                                             \n" +
+            "                 )                                                               \n" +
+            "           )                                                                     \n";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    results.print();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+            .addNullable("request_receive_time_epoch",                          MinorType.TIMESTAMP)
+            .addNullable("request_user-agent",                                  MinorType.VARCHAR)
+            .addNullable("request_user-agent_device__name",                     MinorType.VARCHAR)
+            .addNullable("request_user-agent_agent__name__version__major",      MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_query_timestamp",               MinorType.TIMESTAMP)
+            .addNullable("request_firstline_uri_query_ua",                      MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_query_ua_device__name",         MinorType.VARCHAR)
+            .addNullable("request_firstline_uri_query_ua_agent__name__version__major", MinorType.VARCHAR)
+            .addNullable("response_header_requestid_epoch",                     MinorType.TIMESTAMP)
+            .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+            .addRow(// These are directly parsed from the line
+                    1_388_530_181_000L, // 2013-12-31T22:49:41.000Z
+                    "Mozilla/5.0 (compatible; Googlebot/2.1; Yauaa Bot/42.123; +https://yauaa.basjes.nl)",
+                    "Basjes Googlebot Imitator", "Yauaa Bot 42",
+
+                    // These are parsed by casting the query string parameters to something else
+                    1_607_506_430_621L, // 2020-12-09T09:33:50.621
+                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
+                    "Apple Macintosh", "Chrome 66",
+
+                    null // No mod_unique_id field present
+            )
+            .addRow(// These are directly parsed from the line
+                    1_388_530_181_000L, // 2013-12-31T22:49:41.000Z
+                    null,               // The second line in the test file does not have a useragent field.
+                    null, null,
+
+                    // These are parsed by casting the query string parameters to something else
+                    1_607_506_430_621L, // 2020-12-09T09:33:50.621
+                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3359.139 Safari/537.36",
+                    "Apple Macintosh", "Chrome 77",
+
+                    1_372_024_799_000L // 2013-06-23T21:59:59.000Z ==> The timestamp of the mod_unique_id value
+            )
+            .addRow(// These are directly parsed from the line
+                    1_388_530_181_000L, // 2013-12-31T22:49:41.000Z
+                    null,               // The second line in the test file does not have a useragent field.
+                    null, null,
+
+                    // These are parsed by casting the query string parameters to something else
+                    1_607_506_430_621L, // 2020-12-09T09:33:50.621
+                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.3359.139 Safari/537.36",
+                    "Apple Macintosh", "Chrome 55",
+
+                    null // No mod_unique_id field present
+            )
+            .build();
+
+    RowSetUtilities.verify(expected, results);
+  }
+
+
+
+}
+
+
diff --git a/exec/java-exec/src/test/resources/store/httpd/dfs-bootstrap.httpd b/contrib/format-httpd/src/test/resources/httpd/dfs-bootstrap.httpd
similarity index 100%
rename from exec/java-exec/src/test/resources/store/httpd/dfs-bootstrap.httpd
rename to contrib/format-httpd/src/test/resources/httpd/dfs-bootstrap.httpd
diff --git a/exec/java-exec/src/test/resources/store/httpd/example1.httpd b/contrib/format-httpd/src/test/resources/httpd/example1.httpd
similarity index 100%
rename from exec/java-exec/src/test/resources/store/httpd/example1.httpd
rename to contrib/format-httpd/src/test/resources/httpd/example1.httpd
diff --git a/contrib/format-httpd/src/test/resources/httpd/hackers-access-really-small.httpd b/contrib/format-httpd/src/test/resources/httpd/hackers-access-really-small.httpd
new file mode 100644
index 0000000..decb3c2
--- /dev/null
+++ b/contrib/format-httpd/src/test/resources/httpd/hackers-access-really-small.httpd
@@ -0,0 +1 @@
+195.154.46.135 - - [25/Oct/2015:04:11:25 +0100] "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1" 200 24323 "http://howto.basjes.nl/" "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0"
diff --git a/exec/java-exec/src/test/resources/httpd/hackers-access-small.httpd b/contrib/format-httpd/src/test/resources/httpd/hackers-access-small.httpd
similarity index 100%
rename from exec/java-exec/src/test/resources/httpd/hackers-access-small.httpd
rename to contrib/format-httpd/src/test/resources/httpd/hackers-access-small.httpd
diff --git a/contrib/format-httpd/src/test/resources/httpd/multiformat.access_log b/contrib/format-httpd/src/test/resources/httpd/multiformat.access_log
new file mode 100644
index 0000000..ea5f198
--- /dev/null
+++ b/contrib/format-httpd/src/test/resources/httpd/multiformat.access_log
@@ -0,0 +1,3 @@
+%127.0.0.1 127.0.0.1 127.0.0.1 - - [31/Dec/2012:23:49:40 +0100] "GET /icons/powered_by_rh.png?aap=noot&res=1024x768 HTTP/1.1" 200 1213 80 "" "http://localhost/index.php?mies=wim" 351 "Mozilla/5.0 (X11; Linux i686 on x86_64; rv:11.0) Gecko/20100101 Firefox/11.0" "jquery-ui-theme=Eggplant" "Apache=127.0.0.1.1344635380111339; path=/; domain=.basjes.nl" "-" "\"3780ff-4bd-4c1ce3df91380\""
+127.0.0.1 - - [31/Dec/2012:23:49:41 +0100] "GET /foo1 HTTP/1.1" 200 1213 "http://localhost/index.php?mies=wim&test=true" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
+127.0.0.1 - - [31/Dec/2013:23:49:41 +0100] "GET /foo2 HTTP/1.1" 200 1213 "http://localhost/index.php?mies=zus&test=false"
diff --git a/contrib/format-httpd/src/test/resources/httpd/typeremap.log b/contrib/format-httpd/src/test/resources/httpd/typeremap.log
new file mode 100644
index 0000000..765bfde
--- /dev/null
+++ b/contrib/format-httpd/src/test/resources/httpd/typeremap.log
@@ -0,0 +1,3 @@
+127.0.0.1 - - [31/Dec/2013:23:49:41 +0100] "GET /something.php?ua=Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_12_3)%20AppleWebKit/537.36%20(KHTML,%20like%20Gecko)%20Chrome/66.0.3359.139%20Safari/537.36&timestamp=1607506430621 HTTP/1.1" 200 1213 "http://localhost/index.php?mies=zus&test=false" "Mozilla/5.0 (compatible; Googlebot/2.1; Yauaa Bot/42.123; +https://yauaa.basjes.nl)"
+127.0.0.1 - - [31/Dec/2013:23:49:41 +0100] "GET /something.php?ua=Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_12_3)%20AppleWebKit/537.36%20(KHTML,%20like%20Gecko)%20Chrome/77.0.3359.139%20Safari/537.36&timestamp=1607506430621 HTTP/1.1" 200 1213 Ucdv38CoEJwAAEusp6EAAADz
+127.0.0.1 - - [31/Dec/2013:23:49:41 +0100] "GET /something.php?ua=Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_12_3)%20AppleWebKit/537.36%20(KHTML,%20like%20Gecko)%20Chrome/55.0.3359.139%20Safari/537.36&timestamp=1607506430621 HTTP/1.1" 200 1213
diff --git a/contrib/format-httpd/src/test/resources/logback-test.txt b/contrib/format-httpd/src/test/resources/logback-test.txt
new file mode 100644
index 0000000..e26ec99
--- /dev/null
+++ b/contrib/format-httpd/src/test/resources/logback-test.txt
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<configuration>
+  <if condition='property("drill.lilith.enable").equalsIgnoreCase("true")'>
+    <then>
+      <appender name="SOCKET" class="de.huxhorn.lilith.logback.appender.ClassicMultiplexSocketAppender">
+        <Compressing>true</Compressing>
+        <ReconnectionDelay>10000</ReconnectionDelay>
+        <IncludeCallerData>true</IncludeCallerData>
+        <RemoteHosts>${LILITH_HOSTNAME:-localhost}</RemoteHosts>
+      </appender>
+
+      <logger name="org.apache.drill" additivity="false">
+        <level value="DEBUG"/>
+        <appender-ref ref="SOCKET"/>
+      </logger>
+
+      <logger name="query.logger" additivity="false">
+        <level value="ERROR"/>
+        <appender-ref ref="SOCKET"/>
+      </logger>
+      <logger name="org.apache.drill.exec.store.http">
+        <level value="DEBUG"/>
+        <appender-ref ref="SOCKET"/>
+      </logger>
+    </then>
+  </if>
+
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <!-- encoders are assigned the type
+         ch.qos.logback.classic.encoder.PatternLayoutEncoder by default -->
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+    </encoder>
+  </appender>
+
+  <!--<root>
+    <level value="WARN"/>
+    <if condition='property("drill.lilith.enable").equalsIgnoreCase("true")'>
+      <then>
+        <appender-ref ref="SOCKET"/>
+      </then>
+    </if>
+    <appender-ref ref="STDOUT"/>
+  </root>-->
+  <logger name="org.apache.drill.exec.store.httpd" additivity="false">
+    <level value="debug" />
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <logger name="nl.basjes.parse" additivity="false">
+    <level value="info" />
+    <appender-ref ref="STDOUT" />
+  </logger>
+</configuration>
\ No newline at end of file
diff --git a/contrib/format-image/README.md b/contrib/format-image/README.md
new file mode 100644
index 0000000..bd9214b
--- /dev/null
+++ b/contrib/format-image/README.md
@@ -0,0 +1,16 @@
+## Overview
+
+Drill can query the metadata in various image formats. The metadata format plugin is useful for querying a large number of image files stored in a distributed file system. You do not have to build a metadata repository in advance.
+
+## Attributes
+
+The following table lists configuration attributes:  
+
+Attribute|Default Value|Description
+---------|-------------|-----------
+fileSystemMetadata|true|Set to true to extract filesystem metadata including the file size and the last modified timestamp, false otherwise. 
+descriptive|true|Set to true to extract metadata in a human-readable string format. Set false to extract metadata in a machine-readable typed format.
+timeZone|null|Specify the time zone to interpret the timestamp with no time zone information. If the timestamp includes the time zone information, this value is ignored. If null is set, the local time zone is used.
+
+## More
+All of the Drill documents are placed in `gh-pages` branch on Github. For more detail about image plugin usage, please see also : `gh-pages/_docs/data-sources-and-file-formats/111-image-metadata-format-plugin.md`
\ No newline at end of file
diff --git a/contrib/format-image/pom.xml b/contrib/format-image/pom.xml
new file mode 100644
index 0000000..b7b1e80
--- /dev/null
+++ b/contrib/format-image/pom.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <artifactId>drill-contrib-parent</artifactId>
+    <groupId>org.apache.drill.contrib</groupId>
+    <version>1.19.0-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>drill-format-image</artifactId>
+  <name>Drill : Contrib : Format : Image</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.drewnoakes</groupId>
+      <artifactId>metadata-extractor</artifactId>
+    </dependency>
+
+    <!-- Test dependencies -->
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-java-sources</id>
+            <phase>process-sources</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/classes/org/apache/drill/exec/store/image</outputDirectory>
+              <resources>
+                <resource>
+                  <directory>src/main/java/org/apache/drill/exec/store/image</directory>
+                  <filtering>true</filtering>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java
similarity index 98%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java
rename to contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java
index 82d42fd..78a4733 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDescriptor.java
@@ -15,18 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.drill.exec.store.image;
 
+import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_DURATION;
+import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_FILE_SIZE;
+import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_ORIENTATION;
+
 import com.drew.lang.annotations.NotNull;
 import com.drew.lang.annotations.Nullable;
 import com.drew.metadata.TagDescriptor;
 
-import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_FILE_SIZE;
-import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_ORIENTATION;
-import static org.apache.drill.exec.store.image.GenericMetadataDirectory.TAG_DURATION;
-
-@SuppressWarnings("WeakerAccess")
 public class GenericMetadataDescriptor extends TagDescriptor<GenericMetadataDirectory>
 {
   public GenericMetadataDescriptor(@NotNull GenericMetadataDirectory directory)
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java
similarity index 99%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java
rename to contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java
index 871a11b..0ff2a05 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataDirectory.java
@@ -15,16 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.drill.exec.store.image;
 
+import java.util.HashMap;
+
 import com.drew.lang.annotations.NotNull;
 import com.drew.metadata.Directory;
 import com.drew.metadata.MetadataException;
 
-import java.util.HashMap;
-
-@SuppressWarnings("WeakerAccess")
 public class GenericMetadataDirectory extends Directory
 {
   public static final int TAG_FILE_SIZE = 1;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java
similarity index 99%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java
rename to contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java
index cec677d..4268fcf 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/GenericMetadataReader.java
@@ -15,9 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.drill.exec.store.image;
 
+import java.util.Date;
+import java.util.TimeZone;
+
+import org.apache.hadoop.fs.FileStatus;
+
 import com.drew.imaging.FileType;
 import com.drew.imaging.png.PngChunkType;
 import com.drew.imaging.png.PngColorType;
@@ -46,10 +50,6 @@
 import com.drew.metadata.png.PngDirectory;
 import com.drew.metadata.wav.WavDirectory;
 import com.drew.metadata.webp.WebpDirectory;
-import org.apache.hadoop.fs.FileStatus;
-
-import java.util.Date;
-import java.util.TimeZone;
 
 public class GenericMetadataReader
 {
diff --git a/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageBatchReader.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageBatchReader.java
new file mode 100644
index 0000000..4f3ac22
--- /dev/null
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageBatchReader.java
@@ -0,0 +1,470 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.image;
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.dfs.DrillFileSystem;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.LoggerFactory;
+
+import com.drew.imaging.FileType;
+import com.drew.imaging.FileTypeDetector;
+import com.drew.imaging.ImageMetadataReader;
+import com.drew.imaging.ImageProcessingException;
+import com.drew.metadata.Directory;
+import com.drew.metadata.Metadata;
+import com.drew.metadata.eps.EpsDirectory;
+import com.drew.metadata.exif.ExifIFD0Directory;
+import com.drew.metadata.xmp.XmpDirectory;
+
+public class ImageBatchReader implements ManagedReader<FileSchemaNegotiator> {
+
+  private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ImageBatchReader.class);
+
+  private final ImageFormatConfig config;
+  private final EasySubScan scan;
+  private CustomErrorContext errorContext;
+  private Path path;
+  private FileStatus fileStatus;
+  private BufferedInputStream metaInputStream;
+  private RowSetLoader loader;
+  private LinkedHashMap<String, ColumnDefn> genericColumns;
+  private Metadata metadata;
+
+  public ImageBatchReader(final ImageFormatConfig config, final EasySubScan scan) {
+    this.config = config;
+    this.scan = scan;
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    try {
+      errorContext = negotiator.parentErrorContext();
+      DrillFileSystem dfs = negotiator.fileSystem();
+      path = dfs.makeQualified(negotiator.split().getPath());
+      fileStatus = dfs.getFileStatus(path);
+      metaInputStream = new BufferedInputStream(dfs.openPossiblyCompressedStream(path));
+      logger.debug("The config is {}, root is {}, columns has {}", config, scan.getSelectionRoot(), scan.getColumns());
+    } catch (IOException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Failure in initial image inputstream. " + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    }
+    // define the schema
+    negotiator.tableSchema(defineMetadata(), false);
+    ResultSetLoader resultSetLoader = negotiator.build();
+    loader = resultSetLoader.writer();
+    // bind the writer for generic columns
+    bindColumns(loader);
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    try {
+      loader.start();
+      // process generic metadata
+      processGenericMetadata();
+      // process external metadata
+      processExtenalMetadata();
+      loader.save();
+    } catch (IOException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Failed to estimates the file type. " + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    } catch (ImageProcessingException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Error in reading metadata from inputstream. " + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    } catch (Exception e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Error in processing metadata directory. " + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    }
+    return false;
+  }
+
+  @Override
+  public void close() {
+    AutoCloseables.closeSilently(metaInputStream);
+  }
+
+  private TupleMetadata defineMetadata() {
+    SchemaBuilder builder = new SchemaBuilder();
+    genericColumns = new LinkedHashMap<>();
+    Collection<String> tags = GenericMetadataDirectory._tagNameMap.values();
+    for (String tagName : tags) {
+      if (!config.hasFileSystemMetadata() && ImageMetadataUtils.isSkipTag(tagName)) {
+        continue;
+      }
+      ColumnDefn columnDefn = new GenericColumnDefn(tagName);
+      if (config.isDescriptive()) {
+        columnDefn.defineText(builder);
+      } else {
+        columnDefn.define(builder);
+      }
+      genericColumns.put(ImageMetadataUtils.formatName(tagName), columnDefn);
+    }
+    return builder.buildSchema();
+  }
+
+  private void bindColumns(RowSetLoader loader) {
+    for (ColumnDefn columnDefn : genericColumns.values()) {
+      columnDefn.bind(loader);
+    }
+  }
+
+  private void processGenericMetadata() throws IOException, ImageProcessingException {
+    FileType fileType = FileTypeDetector.detectFileType(metaInputStream);
+    metadata = ImageMetadataReader.readMetadata(metaInputStream);
+    // Read for generic metadata at first
+    new GenericMetadataReader().read(fileType, fileStatus, metadata);
+    GenericMetadataDirectory genericMetadata = metadata.getFirstDirectoryOfType(GenericMetadataDirectory.class);
+    // Process the `Generic Metadata Directory`
+    ImageDirectoryProcessor.processGenericMetadataDirectory(genericMetadata, genericColumns, config);
+  }
+
+  private void processExtenalMetadata() {
+    boolean skipEPSPreview = false;
+    for (Directory directory : metadata.getDirectories()) {
+      // Skip the `Generic Metadata Directory`
+      String dictName = ImageMetadataUtils.formatName(directory.getName());
+      if (directory instanceof GenericMetadataDirectory) {
+        continue;
+      }
+      if (directory instanceof ExifIFD0Directory && skipEPSPreview) {
+        skipEPSPreview = false;
+        continue;
+      }
+      if (directory instanceof EpsDirectory) {
+        // If an EPS file contains a TIFF preview, skip the next IFD0
+        skipEPSPreview = directory.containsTag(EpsDirectory.TAG_TIFF_PREVIEW_SIZE);
+      }
+      // Process the `External Metadata Directory`
+      MapColumnDefn columnDefn = new MapColumnDefn(dictName).builder(loader);
+      ImageDirectoryProcessor.processDirectory(columnDefn, directory, metadata, config);
+      // Continue to process XmpDirectory if exists
+      if (directory instanceof XmpDirectory) {
+        ImageDirectoryProcessor.processXmpDirectory(columnDefn, (XmpDirectory) directory);
+      }
+    }
+  }
+
+  /**
+   * The class mainly process schema definition, index binding,
+   * and set up the vector (Column Writers) values.
+   * Because the new vector needs to specify schema
+   * depends on data type, must override some methods in derived classes.
+   */
+  protected abstract static class ColumnDefn {
+
+    private final String name;
+    private final String originName; // not format
+    private ScalarWriter writer;
+
+    public ColumnDefn(String name) {
+      this.originName = name;
+      this.name = ImageMetadataUtils.formatName(name);
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public String getOriginName() {
+      return originName;
+    }
+
+    public ScalarWriter getWriter() {
+      return writer;
+    }
+
+    public void bind(RowSetLoader loader) {
+      writer = loader.scalar(name);
+    }
+
+    public void defineText(SchemaBuilder builder) {
+      builder.add(getName(), Types.optional(MinorType.VARCHAR));
+    }
+
+    public abstract void define(SchemaBuilder builder);
+
+    public abstract void load(Object value);
+
+    public ScalarWriter addText(String name) {
+      throw new UnsupportedOperationException();
+    }
+
+    public ArrayWriter addList(String name) {
+      throw new UnsupportedOperationException();
+    }
+
+    public ArrayWriter addListMap(String name) {
+      throw new UnsupportedOperationException();
+    }
+
+    public TupleWriter addMap(String name) {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  /**
+   * Responsible for process of the image GenericMetadataDirectory
+   * metadata and create data type based on different tags.
+   * @see org.apache.drill.exec.store.image.GenericMetadataDirectory
+   */
+  protected static class GenericColumnDefn extends ColumnDefn {
+
+    public GenericColumnDefn(String name) {
+      super(name);
+    }
+
+    @Override
+    public void define(SchemaBuilder builder) {
+      if (ImageMetadataUtils.isVarchar(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.VARCHAR));
+      } else if (ImageMetadataUtils.isInt(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.INT));
+      } else if (ImageMetadataUtils.isLong(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.BIGINT));
+      } else if (ImageMetadataUtils.isDouble(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.FLOAT8));
+      } else if (ImageMetadataUtils.isBoolean(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.BIT));
+      } else if (ImageMetadataUtils.isDate(getOriginName())) {
+        builder.add(getName(), Types.optional(MinorType.TIMESTAMP));
+      }
+    }
+
+    @Override
+    public void load(Object value) {
+      getWriter().setObject(value);
+    }
+  }
+
+  /**
+   * Responsible for process of the map writer (nested structure).
+   * Not only work with scalar, but also provide an entry point
+   * for create the nested structures, such as List or List-Map in a Map.
+   */
+  protected static class MapColumnDefn extends ColumnDefn {
+
+    private int index;
+    private TupleWriter writer;
+
+    public MapColumnDefn(String name) {
+      super(name);
+    }
+
+    @Override
+    public void bind(RowSetLoader loader) {
+      index = loader.tupleSchema().index(getName());
+      if (index == -1) {
+        index = loader.addColumn(SchemaBuilder.columnSchema(getName(), MinorType.MAP, DataMode.REQUIRED));
+      }
+      writer = loader.tuple(index);
+    }
+
+    @Override
+    public void define(SchemaBuilder builder) { }
+
+    @Override
+    public void load(Object value) { }
+
+    public MapColumnDefn builder(RowSetLoader loader) {
+      bind(loader);
+      return this;
+    }
+
+    public MapColumnDefn builder(TupleWriter writer) {
+      this.writer = writer;
+      return this;
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, b : "2" }
+     */
+    @Override
+    public ScalarWriter addText(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.VARCHAR, DataMode.OPTIONAL));
+      } else { // rewrite the value
+        writer.column(name).events().restartRow();
+      }
+      return writer.scalar(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, [ b : "2" ] }
+     */
+    @Override
+    public ArrayWriter addList(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.VARCHAR, DataMode.REPEATED));
+      }
+      return writer.array(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, [ { b : 2 } ] }
+     */
+    @Override
+    public ArrayWriter addListMap(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.MAP, DataMode.REPEATED));
+      }
+      return writer.array(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, { b : 2 } }
+     */
+    @Override
+    public TupleWriter addMap(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.MAP, DataMode.REQUIRED));
+        return writer.tuple(index);
+      }
+      return writer.tuple(name);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, [ 0, -1, 0, -1 ] }
+     */
+    public ArrayWriter addListByte(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.TINYINT, DataMode.REPEATED));
+      }
+      return writer.array(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, b : 2.0 }
+     */
+    public ScalarWriter addDouble(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.FLOAT8, DataMode.OPTIONAL));
+      }
+      return writer.scalar(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, b : date() }
+     */
+    public ScalarWriter addDate(String name) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.TIMESTAMP, DataMode.OPTIONAL));
+      }
+      return writer.scalar(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, b : object() }
+     */
+    public ScalarWriter addObject(String name, MinorType type) {
+      index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, type, DataMode.OPTIONAL));
+      }
+      return writer.scalar(index);
+    }
+
+    /**
+     * example : { a : 1 } > { a : 1, b : 2 }
+     */
+    public ScalarWriter addIntToMap(TupleWriter writer, String name) {
+      int index = writer.tupleSchema().index(name);
+      if (index == -1) {
+        index = writer.addColumn(SchemaBuilder.columnSchema(name, MinorType.INT, DataMode.OPTIONAL));
+      }
+      return writer.scalar(index);
+    }
+  }
+
+  /**
+   * Responsible for process of the list-map with array writer.
+   */
+  protected static class ListColumnDefn extends ColumnDefn {
+
+    private ArrayWriter writer;
+
+    public ListColumnDefn(String name) {
+      super(name);
+    }
+
+    @Override
+    public void define(SchemaBuilder builder) { }
+
+    @Override
+    public void load(Object value) { }
+
+    public ListColumnDefn builder(ArrayWriter writer) {
+      this.writer = writer;
+      return this;
+    }
+
+    /**
+     * example : [ ] > [ { a : "1" } ]
+     */
+    @Override
+    public ScalarWriter addText(String name) {
+      TupleWriter map = writer.tuple();
+      int index = map.tupleSchema().index(name);
+      if (index == -1) {
+        index = map.addColumn(SchemaBuilder.columnSchema(name, MinorType.VARCHAR, DataMode.OPTIONAL));
+      }
+      return map.scalar(index);
+    }
+  }
+}
\ No newline at end of file
diff --git a/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageDirectoryProcessor.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageDirectoryProcessor.java
new file mode 100644
index 0000000..ba5ddfd
--- /dev/null
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageDirectoryProcessor.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.image;
+
+import java.util.Date;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.TimeZone;
+
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.store.image.ImageBatchReader.ColumnDefn;
+import org.apache.drill.exec.store.image.ImageBatchReader.ListColumnDefn;
+import org.apache.drill.exec.store.image.ImageBatchReader.MapColumnDefn;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.ColumnWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.joda.time.Instant;
+import org.slf4j.LoggerFactory;
+
+import com.adobe.internal.xmp.XMPException;
+import com.adobe.internal.xmp.XMPIterator;
+import com.adobe.internal.xmp.XMPMeta;
+import com.adobe.internal.xmp.options.IteratorOptions;
+import com.adobe.internal.xmp.properties.XMPPropertyInfo;
+import com.drew.lang.KeyValuePair;
+import com.drew.lang.Rational;
+import com.drew.metadata.Directory;
+import com.drew.metadata.Metadata;
+import com.drew.metadata.StringValue;
+import com.drew.metadata.Tag;
+import com.drew.metadata.exif.ExifIFD0Directory;
+import com.drew.metadata.exif.ExifSubIFDDirectory;
+import com.drew.metadata.exif.GpsDirectory;
+import com.drew.metadata.jpeg.JpegComponent;
+import com.drew.metadata.png.PngDirectory;
+import com.drew.metadata.xmp.XmpDirectory;
+
+/**
+ * Although each image format can contain different metadata,
+ * they also have common basic information. The class handles
+ * basic metadata as well as complex tags.
+ * @see org.apache.drill.exec.store.image.GenericMetadataDirectory
+ * @see com.drew.metadata.Directory
+ */
+public class ImageDirectoryProcessor {
+
+  private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ImageDirectoryProcessor.class);
+
+  protected static void processGenericMetadataDirectory(final GenericMetadataDirectory directory,
+                                                        final LinkedHashMap<String, ColumnDefn> genericColumns,
+                                                        final ImageFormatConfig config) {
+    for (Tag tag : directory.getTags()) {
+      final int tagType = tag.getTagType();
+      if (!config.hasFileSystemMetadata() && ImageMetadataUtils.isSkipTag(tag.getTagName())) {
+        continue;
+      }
+      genericColumns.get(ImageMetadataUtils.formatName(tag.getTagName())).load(config.isDescriptive()
+          ? directory.getDescription(tagType)
+          : directory.getObject(tagType));
+    }
+  }
+
+  protected static void processXmpDirectory(final MapColumnDefn writer, final XmpDirectory directory) {
+    XMPMeta xmpMeta = directory.getXMPMeta();
+    if (xmpMeta != null) {
+      try {
+        IteratorOptions iteratorOptions = new IteratorOptions().setJustLeafnodes(true);
+        for (final XMPIterator i = xmpMeta.iterator(iteratorOptions); i.hasNext();) {
+          try {
+            XMPPropertyInfo prop = (XMPPropertyInfo) i.next();
+            String path = prop.getPath();
+            String value = prop.getValue();
+            if (path != null && value != null) {
+              // handling lang-alt array items
+              if (prop.getOptions().getHasLanguage()) {
+                XMPPropertyInfo langProp = (XMPPropertyInfo) i.next();
+                if (langProp.getPath().endsWith("/xml:lang")) {
+                  String lang = langProp.getValue();
+                  path = path.replaceFirst("\\[\\d+\\]$", "") +
+                      (lang.equals("x-default") ? "" : "_" + lang);
+                }
+              }
+              ColumnDefn rootColumn = writer;
+              ColumnWriter subColumn = null;
+              String[] elements = path.replaceAll("/\\w+:", "/").split(":|/|(?=\\[)");
+              // 1. lookup and create nested structure
+              for (int j = 1; j < elements.length; j++) {
+                String parent = elements[j - 1];
+                boolean isList = elements[j].startsWith("[");
+                if (!parent.startsWith("[")) { // skipped. such as parent is [1] but not the last element
+                  final String formatName = ImageMetadataUtils.formatName(parent);
+                  if (isList) {
+                    if (j + 1 == elements.length) { // for list
+                      subColumn = rootColumn.addList(formatName);
+                    } else { // for list-map
+                      subColumn = rootColumn.addListMap(formatName);
+                    }
+                    rootColumn = new ListColumnDefn(formatName).builder((ArrayWriter) subColumn);
+                  } else { // for map
+                    subColumn = ((MapColumnDefn) rootColumn).addMap(formatName);
+                    // set up the current writer in nested structure
+                    rootColumn = new MapColumnDefn(formatName).builder((TupleWriter) subColumn);
+                  }
+                }
+              }
+              // 2. set up the value for writer
+              String parent = elements[elements.length - 1];
+              if (parent.startsWith("[")) {
+                subColumn.setObject(new String[] { value });
+              } else {
+                rootColumn.addText(ImageMetadataUtils.formatName(parent)).setString(value);
+                if (subColumn instanceof ArrayWriter) {
+                  ((ArrayWriter) subColumn).save();
+                }
+              }
+            }
+          } catch (Exception skipped) { // simply skip this property
+            logger.warn("Error in written xmp metadata : {}", skipped.getMessage());
+          }
+        }
+      } catch (XMPException ignored) {
+        logger.warn("Error in processing xmp directory : {}", ignored.getMessage());
+      }
+    }
+  }
+
+  protected static void processDirectory(final MapColumnDefn writer,
+                                         final Directory directory,
+                                         final Metadata metadata,
+                                         final ImageFormatConfig config) {
+    TimeZone timeZone = (config.getTimeZone() != null)
+        ? TimeZone.getTimeZone(config.getTimeZone())
+        : TimeZone.getDefault();
+    for (Tag tag : directory.getTags()) {
+      try {
+        final int tagType = tag.getTagType();
+        Object value;
+        if (config.isDescriptive() || ImageMetadataUtils.isDescriptionTag(directory, tagType)) {
+          value = directory.getDescription(tagType);
+          if (directory instanceof PngDirectory) {
+            if (((PngDirectory) directory).getPngChunkType().areMultipleAllowed()) {
+              value = new String[] { (String) value };
+            }
+          }
+        } else {
+          value = directory.getObject(tagType);
+          if (directory instanceof ExifIFD0Directory && tagType == ExifIFD0Directory.TAG_DATETIME) {
+            ExifSubIFDDirectory exifSubIFDDir = metadata.getFirstDirectoryOfType(ExifSubIFDDirectory.class);
+            String subsecond = null;
+            if (exifSubIFDDir != null) {
+              subsecond = exifSubIFDDir.getString(ExifSubIFDDirectory.TAG_SUBSECOND_TIME);
+            }
+            value = directory.getDate(tagType, subsecond, timeZone);
+          } else if (directory instanceof ExifSubIFDDirectory) {
+            if (tagType == ExifSubIFDDirectory.TAG_DATETIME_ORIGINAL) {
+              value = ((ExifSubIFDDirectory) directory).getDateOriginal(timeZone);
+            } else if (tagType == ExifSubIFDDirectory.TAG_DATETIME_DIGITIZED) {
+              value = ((ExifSubIFDDirectory) directory).getDateDigitized(timeZone);
+            }
+          } else if (directory instanceof GpsDirectory) {
+            if (tagType == GpsDirectory.TAG_LATITUDE) {
+              value = ((GpsDirectory) directory).getGeoLocation().getLatitude();
+            } else if (tagType == GpsDirectory.TAG_LONGITUDE) {
+              value = ((GpsDirectory) directory).getGeoLocation().getLongitude();
+            }
+          }
+          if (ImageMetadataUtils.isVersionTag(directory, tagType)) {
+            value = directory.getString(tagType, "US-ASCII");
+          } else if (ImageMetadataUtils.isDateTag(directory, tagType)) {
+            value = directory.getDate(tagType, timeZone);
+          }
+        }
+        processValue(writer, ImageMetadataUtils.formatName(tag.getTagName()), value);
+      } catch (Exception skipped) {
+        logger.warn("Error in processing image directory : {}", skipped.getMessage());
+      }
+    }
+  }
+
+  /**
+   * Convert the value if necessary
+   * @see org.apache.drill.exec.vector.accessor.writer.AbstractScalarWriter#setObject(Object)
+   * @param writer MapColumnDefn
+   * @param name Tag Name
+   * @param value  Tag Value
+   */
+  protected static void processValue(final MapColumnDefn writer, final String name, final Object value) {
+    if (value == null) {
+      return;
+    }
+    if (value instanceof Boolean) {
+      writer.addObject(name, MinorType.BIT).setObject(value);
+    } else if (value instanceof Byte) {
+      writer.addObject(name, MinorType.TINYINT).setObject(value);
+    } else if (value instanceof Short) {
+      writer.addObject(name, MinorType.SMALLINT).setObject(value);
+    } else if (value instanceof Integer) {
+      writer.addObject(name, MinorType.INT).setObject(value);
+    } else if (value instanceof Long) {
+      writer.addObject(name, MinorType.BIGINT).setObject(value);
+    } else if (value instanceof Float) {
+      writer.addObject(name, MinorType.FLOAT4).setObject(value);
+    } else if (value instanceof Double) {
+      writer.addObject(name, MinorType.FLOAT8).setObject(value);
+    } else if (value instanceof Rational) {
+      writer.addDouble(name).setDouble(((Rational) value).doubleValue());
+    } else if (value instanceof StringValue) {
+      writer.addText(name).setString(((StringValue) value).toString());
+    } else if (value instanceof Date) {
+      writer.addDate(name).setTimestamp(Instant.ofEpochMilli(((Date) value).getTime()));
+    } else if (value instanceof String[]) {
+      writer.addList(name).setObject(value);
+    } else if (value instanceof byte[]) {
+      writer.addListByte(name).setObject(value);
+    } else if (value instanceof JpegComponent) {
+      JpegComponent v = (JpegComponent) value;
+      TupleWriter component = writer.addMap(name);
+      writer.addIntToMap(component, TagName.JPEGCOMPONENT_CID).setInt(v.getComponentId());
+      writer.addIntToMap(component, TagName.JPEGCOMPONENT_HSF).setInt(v.getHorizontalSamplingFactor());
+      writer.addIntToMap(component, TagName.JPEGCOMPONENT_VSF).setInt(v.getVerticalSamplingFactor());
+      writer.addIntToMap(component, TagName.JPEGCOMPONENT_QTN).setInt(v.getQuantizationTableNumber());
+    } else if (value instanceof List<?>) {
+      ArrayWriter listMap = writer.addListMap(name);
+      ListColumnDefn list = new ListColumnDefn(name).builder(listMap);
+      for (Object v : (List<?>) value) {
+        if (v instanceof KeyValuePair) {
+          list.addText(TagName.KEYVALUEPAIR_K).setString(((KeyValuePair) v).getKey());
+          list.addText(TagName.KEYVALUEPAIR_V).setString(((KeyValuePair) v).getValue().toString());
+        } else {
+          list.addText(TagName.KEYVALUEPAIR_V).setString(v.toString());
+        }
+        listMap.save();
+      }
+    } else {
+      writer.addText(name).setString(value.toString());
+    }
+  }
+
+  private static class TagName {
+    public static final String JPEGCOMPONENT_CID = "ComponentId";
+    public static final String JPEGCOMPONENT_HSF = "HorizontalSamplingFactor";
+    public static final String JPEGCOMPONENT_VSF = "VerticalSamplingFactor";
+    public static final String JPEGCOMPONENT_QTN = "QuantizationTableNumber";
+    public static final String KEYVALUEPAIR_K = "Key";
+    public static final String KEYVALUEPAIR_V = "Value";
+  }
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java
similarity index 85%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java
rename to contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java
index a41a0d5..9519130 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.drill.exec.store.image;
 
 import java.util.List;
@@ -23,51 +22,51 @@
 
 import org.apache.drill.common.PlanStringBuilder;
 import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
 import com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 
-@JsonTypeName("image") @JsonInclude(Include.NON_DEFAULT)
+@JsonTypeName(ImageFormatConfig.NAME)
+@JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class ImageFormatConfig implements FormatPluginConfig {
 
+  public static final String NAME = "image";
   private final List<String> extensions;
   private final boolean fileSystemMetadata;
   private final boolean descriptive;
   private final String timeZone;
 
-  public ImageFormatConfig() {
-    this(null, null, null, null);
-  }
-
   @JsonCreator
   public ImageFormatConfig(
       @JsonProperty("extensions") List<String> extensions,
       @JsonProperty("fileSystemMetadata") Boolean fileSystemMetadata,
       @JsonProperty("descriptive") Boolean descriptive,
       @JsonProperty("timeZone") String timeZone) {
-    this.extensions = extensions == null ?
-        ImmutableList.of() : ImmutableList.copyOf(extensions);
-    this.fileSystemMetadata = fileSystemMetadata == null ? true : fileSystemMetadata;
-    this.descriptive = descriptive == null ? true : descriptive;
+    this.extensions = extensions == null ? ImmutableList.of() : ImmutableList.copyOf(extensions);
+    this.fileSystemMetadata = fileSystemMetadata == null || fileSystemMetadata;
+    this.descriptive = descriptive == null || descriptive;
     this.timeZone = timeZone;
   }
 
+  @JsonProperty("extensions")
   public List<String> getExtensions() {
     return extensions;
   }
 
+  @JsonProperty("fileSystemMetadata")
   public boolean hasFileSystemMetadata() {
     return fileSystemMetadata;
   }
 
+  @JsonProperty("descriptive")
   public boolean isDescriptive() {
     return descriptive;
   }
 
+  @JsonProperty("timeZone")
   public String getTimeZone() {
     return timeZone;
   }
diff --git a/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java
new file mode 100644
index 0000000..22ee644
--- /dev/null
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.image;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.hadoop.conf.Configuration;
+
+public class ImageFormatPlugin extends EasyFormatPlugin<ImageFormatConfig> {
+
+  public ImageFormatPlugin(String name,
+                           DrillbitContext context,
+                           Configuration fsConf,
+                           StoragePluginConfig storageConfig,
+                           ImageFormatConfig formatConfig) {
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
+  }
+
+  private static EasyFormatConfig easyConfig(Configuration fsConf, ImageFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .supportsProjectPushdown(true)
+        .defaultName(ImageFormatConfig.NAME)
+        .build();
+  }
+
+  private static class ImageReaderFactory extends FileReaderFactory {
+
+    private final ImageFormatConfig config;
+    private final EasySubScan scan;
+
+    public ImageReaderFactory(ImageFormatConfig config, EasySubScan scan) {
+      this.config = config;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileSchemaNegotiator> newReader() {
+      return new ImageBatchReader(config, scan);
+    }
+  }
+
+  @Override
+  public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(EasySubScan scan, OptionManager options)
+      throws ExecutionSetupException {
+    return new ImageBatchReader(formatConfig, scan);
+  }
+
+  @Override
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan)
+      throws ExecutionSetupException {
+    FileScanBuilder builder = new FileScanBuilder();
+    builder.setReaderFactory(new ImageReaderFactory(formatConfig, scan));
+
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(MinorType.VARCHAR));
+    return builder;
+  }
+}
\ No newline at end of file
diff --git a/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageMetadataUtils.java b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageMetadataUtils.java
new file mode 100644
index 0000000..25be769
--- /dev/null
+++ b/contrib/format-image/src/main/java/org/apache/drill/exec/store/image/ImageMetadataUtils.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.image;
+
+import java.util.HashMap;
+
+import com.drew.metadata.Directory;
+import com.drew.metadata.exif.ExifInteropDirectory;
+import com.drew.metadata.exif.ExifSubIFDDirectory;
+import com.drew.metadata.exif.PanasonicRawIFD0Directory;
+import com.drew.metadata.exif.makernotes.FujifilmMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.NikonType2MakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusCameraSettingsMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusEquipmentMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusFocusInfoMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusImageProcessingMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusRawDevelopment2MakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusRawDevelopmentMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.OlympusRawInfoMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.PanasonicMakernoteDirectory;
+import com.drew.metadata.exif.makernotes.SamsungType2MakernoteDirectory;
+import com.drew.metadata.exif.makernotes.SonyType6MakernoteDirectory;
+import com.drew.metadata.icc.IccDirectory;
+import com.drew.metadata.photoshop.PhotoshopDirectory;
+import com.drew.metadata.png.PngDirectory;
+
+public class ImageMetadataUtils {
+
+  public static boolean isVarchar(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // Format,Color Mode,Video Codec,Audio Codec
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_FORMAT))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_COLOR_MODE))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_VIDEO_CODEC))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_AUDIO_CODEC));
+  }
+
+  public static boolean isInt(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // Pixel Width,Pixel Height,Orientation,Bits Per Pixel,Audio Sample Size
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_PIXEL_WIDTH))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_PIXEL_HEIGHT))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_ORIENTATION))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_BITS_PER_PIXEL))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_AUDIO_SAMPLE_SIZE));
+  }
+
+  public static boolean isLong(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // File Size,Duration
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_FILE_SIZE))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_DURATION));
+  }
+
+  public static boolean isDouble(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // DPI Width,DPI Height,Frame Rate,Audio Sample Rate
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_DPI_WIDTH))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_DPI_HEIGHT))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_FRAME_RATE))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_AUDIO_SAMPLE_RATE));
+  }
+
+  public static boolean isBoolean(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // Has Alpha
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_HAS_ALPHA));
+  }
+
+  public static boolean isDate(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    // File Date Time
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_FILE_DATE_TIME));
+  }
+
+  /**
+   * Format the tag name (remove the spaces and special characters)
+   * @param tagName
+   * @return
+   */
+  public static String formatName(final String tagName) {
+    StringBuilder builder = new StringBuilder();
+    boolean upperCase = true;
+    for (char c : tagName.toCharArray()) {
+      if (c == ' ' || c == '-' || c == '/') {
+        upperCase = true;
+      } else {
+        builder.append(upperCase ? Character.toUpperCase(c) : c);
+        upperCase = false;
+      }
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Skip the tag if parameter (fileSystemMetadata) is false
+   * @param name
+   * @return
+   */
+  public static boolean isSkipTag(String name) {
+    HashMap<Integer, String> tags = GenericMetadataDirectory._tagNameMap;
+    return name.equals(tags.get(GenericMetadataDirectory.TAG_FILE_SIZE))
+        || name.equals(tags.get(GenericMetadataDirectory.TAG_FILE_DATE_TIME));
+  }
+
+  public static boolean isDescriptionTag(final Directory directory, final int tagType) {
+    return directory instanceof IccDirectory
+        && tagType > 0x20202020
+        && tagType < 0x7a7a7a7a
+        || directory instanceof PhotoshopDirectory;
+  }
+
+  public static boolean isVersionTag(final Directory directory, final int tagType) {
+    return directory instanceof ExifSubIFDDirectory &&
+        (tagType == ExifSubIFDDirectory.TAG_EXIF_VERSION || tagType == ExifSubIFDDirectory.TAG_FLASHPIX_VERSION) ||
+        directory instanceof ExifInteropDirectory &&
+        tagType == ExifInteropDirectory.TAG_INTEROP_VERSION ||
+        directory instanceof FujifilmMakernoteDirectory &&
+        tagType == FujifilmMakernoteDirectory.TAG_MAKERNOTE_VERSION ||
+        directory instanceof NikonType2MakernoteDirectory &&
+        tagType == NikonType2MakernoteDirectory.TAG_FIRMWARE_VERSION ||
+        directory instanceof OlympusCameraSettingsMakernoteDirectory &&
+        tagType == OlympusCameraSettingsMakernoteDirectory.TagCameraSettingsVersion ||
+        directory instanceof OlympusEquipmentMakernoteDirectory &&
+        tagType == OlympusEquipmentMakernoteDirectory.TAG_EQUIPMENT_VERSION ||
+        directory instanceof OlympusFocusInfoMakernoteDirectory &&
+        tagType == OlympusFocusInfoMakernoteDirectory.TagFocusInfoVersion ||
+        directory instanceof OlympusImageProcessingMakernoteDirectory &&
+        tagType == OlympusImageProcessingMakernoteDirectory.TagImageProcessingVersion ||
+        directory instanceof OlympusMakernoteDirectory &&
+        tagType == OlympusMakernoteDirectory.TAG_MAKERNOTE_VERSION ||
+        directory instanceof OlympusRawDevelopment2MakernoteDirectory &&
+        tagType == OlympusRawDevelopment2MakernoteDirectory.TagRawDevVersion ||
+        directory instanceof OlympusRawDevelopmentMakernoteDirectory &&
+        tagType == OlympusRawDevelopmentMakernoteDirectory.TagRawDevVersion ||
+        directory instanceof OlympusRawInfoMakernoteDirectory &&
+        tagType == OlympusRawInfoMakernoteDirectory.TagRawInfoVersion ||
+        directory instanceof PanasonicMakernoteDirectory &&
+        (tagType == PanasonicMakernoteDirectory.TAG_FIRMWARE_VERSION
+         || tagType == PanasonicMakernoteDirectory.TAG_MAKERNOTE_VERSION
+         || tagType == PanasonicMakernoteDirectory.TAG_EXIF_VERSION) ||
+        directory instanceof SamsungType2MakernoteDirectory &&
+        tagType == SamsungType2MakernoteDirectory.TagMakerNoteVersion ||
+        directory instanceof SonyType6MakernoteDirectory &&
+        tagType == SonyType6MakernoteDirectory.TAG_MAKERNOTE_THUMB_VERSION ||
+        directory instanceof PanasonicRawIFD0Directory &&
+        tagType == PanasonicRawIFD0Directory.TagPanasonicRawVersion;
+  }
+
+  public static boolean isDateTag(final Directory directory, final int tagType) {
+    return directory instanceof IccDirectory && tagType == IccDirectory.TAG_PROFILE_DATETIME
+             || directory instanceof PngDirectory && tagType == PngDirectory.TAG_LAST_MODIFICATION_TIME;
+  }
+}
\ No newline at end of file
diff --git a/contrib/format-image/src/main/resources/bootstrap-format-plugins.json b/contrib/format-image/src/main/resources/bootstrap-format-plugins.json
new file mode 100644
index 0000000..a74d820
--- /dev/null
+++ b/contrib/format-image/src/main/resources/bootstrap-format-plugins.json
@@ -0,0 +1,46 @@
+{
+  "storage": {
+    "dfs": {
+      "type": "file",
+      "formats": {
+        "image": {
+          "type": "image",
+          "fileSystemMetadata" : false,
+          "descriptive" : true,
+          "timeZone" : null,
+          "extensions": [
+            "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f"
+          ]
+        }
+      }
+    },
+    "cp": {
+      "type": "file",
+      "formats": {
+        "image": {
+          "type": "image",
+          "fileSystemMetadata" : false,
+          "descriptive" : true,
+          "timeZone" : null,
+          "extensions": [
+            "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f"
+          ]
+        }
+      }
+    },
+    "s3": {
+      "type": "file",
+      "formats": {
+        "image": {
+          "type": "image",
+          "fileSystemMetadata" : false,
+          "descriptive" : true,
+          "timeZone" : null,
+          "extensions": [
+            "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f"
+          ]
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/contrib/format-image/src/main/resources/drill-module.conf b/contrib/format-image/src/main/resources/drill-module.conf
new file mode 100644
index 0000000..761949f
--- /dev/null
+++ b/contrib/format-image/src/main/resources/drill-module.conf
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#  This file tells Drill to consider this module when class path scanning.
+#  This file can also include any supplementary configuration information.
+#  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+
+drill.classpath.scanning.packages += "org.apache.drill.exec.store.image"
diff --git a/contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java b/contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java
new file mode 100644
index 0000000..5c2a24e
--- /dev/null
+++ b/contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.image;
+
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapArray;
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapValue;
+import static org.apache.drill.test.rowSet.RowSetUtilities.singleMap;
+import static org.apache.drill.test.rowSet.RowSetUtilities.strArray;
+import static org.junit.Assert.assertEquals;
+
+import java.nio.file.Paths;
+import java.util.Arrays;
+
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.QueryBuilder;
+import org.apache.drill.test.QueryTestUtil;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.joda.time.Instant;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(RowSetTests.class)
+public class TestImageRecordReader extends ClusterTest {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+    dirTestWatcher.copyResourceToRoot(Paths.get("image/"));
+  }
+
+  @Test
+  public void testStarQuery() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("bmp"), false, false, null));
+    String sql = "select * from dfs.`image/*.bmp`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(1, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testExplicitQuery() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("jpg"), false, false, null));
+    String sql = "select Format, PixelWidth, HasAlpha, `XMP` from dfs.`image/withExifAndIptc.jpg`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("Format", MinorType.VARCHAR)
+        .addNullable("PixelWidth", MinorType.INT)
+        .addNullable("HasAlpha", MinorType.BIT)
+        .addMap("XMP")
+          .addNullable("XMPValueCount", MinorType.INT)
+          .addMap("Photoshop")
+            .addNullable("CaptionWriter", MinorType.VARCHAR)
+            .addNullable("Headline", MinorType.VARCHAR)
+            .addNullable("AuthorsPosition", MinorType.VARCHAR)
+            .addNullable("Credit", MinorType.VARCHAR)
+            .addNullable("Source", MinorType.VARCHAR)
+            .addNullable("City", MinorType.VARCHAR)
+            .addNullable("State", MinorType.VARCHAR)
+            .addNullable("Country", MinorType.VARCHAR)
+            .addNullable("Category", MinorType.VARCHAR)
+            .addNullable("DateCreated", MinorType.VARCHAR)
+            .addNullable("Urgency", MinorType.VARCHAR)
+            .addArray("SupplementalCategories", MinorType.VARCHAR)
+            .resumeMap()
+          .addMap("XmpBJ")
+            .addMapArray("JobRef")
+              .addNullable("Name", MinorType.VARCHAR)
+              .resumeMap()
+            .resumeMap()
+          .addMap("XmpMM")
+            .addNullable("DocumentID", MinorType.VARCHAR)
+            .addNullable("InstanceID", MinorType.VARCHAR)
+            .resumeMap()
+          .addMap("XmpRights")
+            .addNullable("WebStatement", MinorType.VARCHAR)
+            .addNullable("Marked", MinorType.VARCHAR)
+            .resumeMap()
+          .addMap("Dc")
+            .addNullable("Description", MinorType.VARCHAR)
+            .addArray("Creator", MinorType.VARCHAR)
+            .addNullable("Title", MinorType.VARCHAR)
+            .addNullable("Rights", MinorType.VARCHAR)
+            .addArray("Subject", MinorType.VARCHAR)
+            .resumeMap()
+          .resumeSchema()
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("JPEG", 600, false,
+          mapValue(25,
+          mapValue("Ian Britton", "Communications", "Photographer", "Ian Britton", "FreeFoto.com", " ", " ", "Ubited Kingdom", "BUS", "2002-06-20", "5", strArray("Communications")),
+          singleMap(mapArray(mapValue("Photographer"))),
+          mapValue("adobe:docid:photoshop:84d4dba8-9b11-11d6-895d-c4d063a70fb0", "uuid:3ff5d382-9b12-11d6-895d-c4d063a70fb0"),
+          mapValue("www.freefoto.com", "True"), mapValue("Communications", strArray("Ian Britton"), "Communications", "ian Britton - FreeFoto.com", strArray("Communications"))))
+        .build();
+
+    assertEquals(1, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("mp4"), false, false, null));
+    String sql = "select * from dfs.`image/*.mp4` limit 1";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(1, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("jpg"), false, false, null));
+    String sql = "select count(*) from dfs.`image/*.jpg`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+
+    assertEquals("Counts should match", 2, cnt);
+  }
+
+  @Test
+  public void testExplicitQueryWithCompressedFile() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("jpg"), false, false, null));
+    QueryTestUtil.generateCompressedFile("image/LearningApacheDrill.jpg", "zip", "store/image/LearningApacheDrill.jpg.zip");
+    String sql = "select Format, PixelWidth, PixelHeight, `FileType` from dfs.`store/image/LearningApacheDrill.jpg.zip`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("Format", MinorType.VARCHAR)
+        .addNullable("PixelWidth", MinorType.INT)
+        .addNullable("PixelHeight", MinorType.INT)
+        .addMap("FileType")
+          .addNullable("DetectedFileTypeName", MinorType.VARCHAR)
+          .addNullable("DetectedFileTypeLongName", MinorType.VARCHAR)
+          .addNullable("DetectedMIMEType", MinorType.VARCHAR)
+          .addNullable("ExpectedFileNameExtension", MinorType.VARCHAR)
+          .resumeSchema()
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("JPEG", 800, 800, mapValue("JPEG", "Joint Photographic Experts Group", "image/jpeg", "jpg"))
+        .build();
+
+    assertEquals(1, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testFileSystemMetadataOption() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("gif"), true, false, null));
+    String sql = "select FileSize, Format, PixelWidth, PixelHeight, ColorMode, BitsPerPixel,"
+        + " Orientaion, DPIWidth, DPIHeight, HasAlpha, Duration, VideoCodec, FrameRate, AudioCodec,"
+        + " AudioSampleSize, AudioSampleRate from dfs.`image/*.gif`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("FileSize", MinorType.BIGINT)
+        .addNullable("Format", MinorType.VARCHAR)
+        .addNullable("PixelWidth", MinorType.INT)
+        .addNullable("PixelHeight", MinorType.INT)
+        .addNullable("ColorMode", MinorType.VARCHAR)
+        .addNullable("BitsPerPixel", MinorType.INT)
+        .addNullable("Orientaion", MinorType.INT)
+        .addNullable("DPIWidth", MinorType.FLOAT8)
+        .addNullable("DPIHeight", MinorType.FLOAT8)
+        .addNullable("HasAlpha", MinorType.BIT)
+        .addNullable("Duration", MinorType.BIGINT)
+        .addNullable("VideoCodec", MinorType.VARCHAR)
+        .addNullable("FrameRate", MinorType.FLOAT8)
+        .addNullable("AudioCodec", MinorType.VARCHAR)
+        .addNullable("AudioSampleSize", MinorType.INT)
+        .addNullable("AudioSampleRate", MinorType.FLOAT8)
+        .build();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow(10463, "GIF", 128, 174, "Indexed", 8, 0, 0.0, 0.0, true, 0, "Unknown", 0.0, "Unknown", 0, 0.0)
+        .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testTimeZoneOption() throws Exception {
+    cluster.defineFormat("dfs", "image", new ImageFormatConfig(Arrays.asList("psd"), true, false, "UTC"));
+    String sql = "select ExifIFD0 from dfs.`image/*.psd`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addMap("ExifIFD0")
+          .addNullable("Orientation", MinorType.INT)
+          .addNullable("XResolution", MinorType.FLOAT8)
+          .addNullable("YResolution", MinorType.FLOAT8)
+          .addNullable("ResolutionUnit", MinorType.INT)
+          .addNullable("Software", MinorType.VARCHAR)
+          .addNullable("DateTime", MinorType.TIMESTAMP)
+          .resumeSchema()
+        .build();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow(singleMap(mapValue(1, 72.009, 72.009, 2, "Adobe Photoshop CS2 Windows", Instant.ofEpochMilli(1454717337000L))))
+        .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java b/contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageTagValue.java
similarity index 89%
rename from exec/java-exec/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java
rename to contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageTagValue.java
index e5d513b..fb3a8b9 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/image/TestImageRecordReader.java
+++ b/contrib/format-image/src/test/java/org/apache/drill/exec/store/image/TestImageTagValue.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.drill.exec.store.image;
 
 import java.util.TimeZone;
@@ -25,7 +24,7 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class TestImageRecordReader extends BaseTestQuery {
+public class TestImageTagValue extends BaseTestQuery {
 
   private static TimeZone defaultTimeZone;
 
@@ -37,16 +36,16 @@
 
   private void createAndQuery(String tableName, String imageFile) throws Exception {
     final String query = String.format(
-      "select * from table(cp.`store/image/%s`(type => 'image', fileSystemMetadata => false))",
+      "select * from table(cp.`image/%s`(type => 'image', fileSystemMetadata => false))",
       imageFile);
 
     runSQL("alter session set `store.format`='json'");
-    test("create table dfs.tmp.`%s` as %s", tableName, query);
+    test("create table dfs.`%s` as %s", tableName, query);
 
     testBuilder()
-      .sqlQuery("select * from dfs.tmp.`%s`", tableName)
+      .sqlQuery("select * from dfs.`%s`", tableName)
       .ordered()
-      .jsonBaselineFile("store/image/" + tableName + ".json")
+      .jsonBaselineFile("image/" + tableName + ".json")
       .go();
     runSQL("alter session set `store.format` = 'parquet'");
   }
@@ -125,4 +124,4 @@
   public static void cleanUp() {
     TimeZone.setDefault(defaultTimeZone);
   }
-}
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/test/resources/store/image/1_webp_a.webp b/contrib/format-image/src/test/resources/image/1_webp_a.webp
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/1_webp_a.webp
rename to contrib/format-image/src/test/resources/image/1_webp_a.webp
Binary files differ
diff --git a/contrib/format-image/src/test/resources/image/LearningApacheDrill.jpg b/contrib/format-image/src/test/resources/image/LearningApacheDrill.jpg
new file mode 100644
index 0000000..ad4353f
--- /dev/null
+++ b/contrib/format-image/src/test/resources/image/LearningApacheDrill.jpg
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/adobeJpeg1.eps b/contrib/format-image/src/test/resources/image/adobeJpeg1.eps
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/adobeJpeg1.eps
rename to contrib/format-image/src/test/resources/image/adobeJpeg1.eps
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/avi.json b/contrib/format-image/src/test/resources/image/avi.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/avi.json
rename to contrib/format-image/src/test/resources/image/avi.json
diff --git a/exec/java-exec/src/test/resources/store/image/bmp.json b/contrib/format-image/src/test/resources/image/bmp.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/bmp.json
rename to contrib/format-image/src/test/resources/image/bmp.json
diff --git a/exec/java-exec/src/test/resources/store/image/eps.json b/contrib/format-image/src/test/resources/image/eps.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/eps.json
rename to contrib/format-image/src/test/resources/image/eps.json
diff --git a/exec/java-exec/src/test/resources/store/image/gif.json b/contrib/format-image/src/test/resources/image/gif.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/gif.json
rename to contrib/format-image/src/test/resources/image/gif.json
diff --git a/exec/java-exec/src/test/resources/store/image/ico.json b/contrib/format-image/src/test/resources/image/ico.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/ico.json
rename to contrib/format-image/src/test/resources/image/ico.json
diff --git a/exec/java-exec/src/test/resources/store/image/jpeg.json b/contrib/format-image/src/test/resources/image/jpeg.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/jpeg.json
rename to contrib/format-image/src/test/resources/image/jpeg.json
diff --git a/exec/java-exec/src/test/resources/store/image/mov.json b/contrib/format-image/src/test/resources/image/mov.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/mov.json
rename to contrib/format-image/src/test/resources/image/mov.json
diff --git a/exec/java-exec/src/test/resources/store/image/mp4.json b/contrib/format-image/src/test/resources/image/mp4.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/mp4.json
rename to contrib/format-image/src/test/resources/image/mp4.json
diff --git a/exec/java-exec/src/test/resources/store/image/pcx.json b/contrib/format-image/src/test/resources/image/pcx.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/pcx.json
rename to contrib/format-image/src/test/resources/image/pcx.json
diff --git a/exec/java-exec/src/test/resources/store/image/png.json b/contrib/format-image/src/test/resources/image/png.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/png.json
rename to contrib/format-image/src/test/resources/image/png.json
diff --git a/exec/java-exec/src/test/resources/store/image/psd.json b/contrib/format-image/src/test/resources/image/psd.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/psd.json
rename to contrib/format-image/src/test/resources/image/psd.json
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-24bit-lzw.tiff b/contrib/format-image/src/test/resources/image/rose-128x174-24bit-lzw.tiff
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-24bit-lzw.tiff
rename to contrib/format-image/src/test/resources/image/rose-128x174-24bit-lzw.tiff
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-24bit.bmp b/contrib/format-image/src/test/resources/image/rose-128x174-24bit.bmp
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-24bit.bmp
rename to contrib/format-image/src/test/resources/image/rose-128x174-24bit.bmp
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-24bit.pcx b/contrib/format-image/src/test/resources/image/rose-128x174-24bit.pcx
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-24bit.pcx
rename to contrib/format-image/src/test/resources/image/rose-128x174-24bit.pcx
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-32bit-alpha.png b/contrib/format-image/src/test/resources/image/rose-128x174-32bit-alpha.png
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-32bit-alpha.png
rename to contrib/format-image/src/test/resources/image/rose-128x174-32bit-alpha.png
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-32bit-alpha.psd b/contrib/format-image/src/test/resources/image/rose-128x174-32bit-alpha.psd
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-32bit-alpha.psd
rename to contrib/format-image/src/test/resources/image/rose-128x174-32bit-alpha.psd
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-128x174-8bit-alpha.gif b/contrib/format-image/src/test/resources/image/rose-128x174-8bit-alpha.gif
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-128x174-8bit-alpha.gif
rename to contrib/format-image/src/test/resources/image/rose-128x174-8bit-alpha.gif
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/rose-32x32-32bit-alpha.ico b/contrib/format-image/src/test/resources/image/rose-32x32-32bit-alpha.ico
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/rose-32x32-32bit-alpha.ico
rename to contrib/format-image/src/test/resources/image/rose-32x32-32bit-alpha.ico
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/sample.avi b/contrib/format-image/src/test/resources/image/sample.avi
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/sample.avi
rename to contrib/format-image/src/test/resources/image/sample.avi
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/sample.mov b/contrib/format-image/src/test/resources/image/sample.mov
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/sample.mov
rename to contrib/format-image/src/test/resources/image/sample.mov
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/sample.mp4 b/contrib/format-image/src/test/resources/image/sample.mp4
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/sample.mp4
rename to contrib/format-image/src/test/resources/image/sample.mp4
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/sample.wav b/contrib/format-image/src/test/resources/image/sample.wav
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/sample.wav
rename to contrib/format-image/src/test/resources/image/sample.wav
Binary files differ
diff --git a/exec/java-exec/src/test/resources/store/image/tiff.json b/contrib/format-image/src/test/resources/image/tiff.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/tiff.json
rename to contrib/format-image/src/test/resources/image/tiff.json
diff --git a/exec/java-exec/src/test/resources/store/image/wav.json b/contrib/format-image/src/test/resources/image/wav.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/wav.json
rename to contrib/format-image/src/test/resources/image/wav.json
diff --git a/exec/java-exec/src/test/resources/store/image/webp.json b/contrib/format-image/src/test/resources/image/webp.json
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/webp.json
rename to contrib/format-image/src/test/resources/image/webp.json
diff --git a/exec/java-exec/src/test/resources/store/image/withExifAndIptc.jpg b/contrib/format-image/src/test/resources/image/withExifAndIptc.jpg
similarity index 100%
rename from exec/java-exec/src/test/resources/store/image/withExifAndIptc.jpg
rename to contrib/format-image/src/test/resources/image/withExifAndIptc.jpg
Binary files differ
diff --git a/contrib/format-ltsv/pom.xml b/contrib/format-ltsv/pom.xml
index 9dca2ce..12c44cf 100644
--- a/contrib/format-ltsv/pom.xml
+++ b/contrib/format-ltsv/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-ltsv</artifactId>
-  <name>contrib/ltsv-format-plugin</name>
+  <name>Drill : Contrib : Format : LTSV</name>
 
   <dependencies>
     <dependency>
diff --git a/contrib/format-ltsv/src/main/java/org/apache/drill/exec/store/ltsv/LTSVFormatPlugin.java b/contrib/format-ltsv/src/main/java/org/apache/drill/exec/store/ltsv/LTSVFormatPlugin.java
index a1d5c20..c28b101 100644
--- a/contrib/format-ltsv/src/main/java/org/apache/drill/exec/store/ltsv/LTSVFormatPlugin.java
+++ b/contrib/format-ltsv/src/main/java/org/apache/drill/exec/store/ltsv/LTSVFormatPlugin.java
@@ -21,7 +21,6 @@
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.planner.common.DrillStatsTable.TableStatistics;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.RecordWriter;
@@ -54,14 +53,8 @@
     return new LTSVRecordReader(context, fileWork.getPath(), dfs, columns);
   }
 
-
   @Override
-  public int getReaderOperatorType() {
-    return UserBitShared.CoreOperatorType.LTSV_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public int getWriterOperatorType() {
+  public String getWriterOperatorType() {
     throw new UnsupportedOperationException("Drill doesn't currently support writing to LTSV files.");
   }
 
diff --git a/contrib/format-maprdb/pom.xml b/contrib/format-maprdb/pom.xml
index a867434..699b66b 100644
--- a/contrib/format-maprdb/pom.xml
+++ b/contrib/format-maprdb/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-mapr</artifactId>
-  <name>contrib/mapr-format-plugin</name>
+  <name>Drill : Contrib : Format : MaprDB</name>
 
   <properties>
     <mapr-format-plugin.hbase.version>1.1.1-mapr-1602-m7-5.2.0</mapr-format-plugin.hbase.version>
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScan.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScan.java
index 92195f5..9b352c6 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScan.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBSubScan.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.base.AbstractDbSubScan;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
@@ -43,6 +42,8 @@
 
 public class MapRDBSubScan extends AbstractDbSubScan {
 
+  public static final String OPERATOR_TYPE = "MAPRDB_SUB_SCAN";
+
   private final MapRDBFormatPlugin formatPlugin;
   private final List<MapRDBSubScanSpec> regionScanSpecList;
   private final List<SchemaPath> columns;
@@ -143,8 +144,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.MAPRDB_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @JsonIgnore
diff --git a/contrib/format-spss/pom.xml b/contrib/format-spss/pom.xml
index bfbec85..2ba9808 100644
--- a/contrib/format-spss/pom.xml
+++ b/contrib/format-spss/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-spss</artifactId>
-  <name>contrib/format-spss</name>
+  <name>Drill : Contrib : Format : SPSS</name>
 
   <dependencies>
     <dependency>
diff --git a/contrib/format-spss/src/main/java/org/apache/drill/exec/store/spss/SpssFormatPlugin.java b/contrib/format-spss/src/main/java/org/apache/drill/exec/store/spss/SpssFormatPlugin.java
index 53e618b..7e3e94f 100644
--- a/contrib/format-spss/src/main/java/org/apache/drill/exec/store/spss/SpssFormatPlugin.java
+++ b/contrib/format-spss/src/main/java/org/apache/drill/exec/store/spss/SpssFormatPlugin.java
@@ -26,7 +26,6 @@
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
 
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
@@ -59,19 +58,18 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, SpssFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = false;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = pluginConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = DEFAULT_NAME;
-    config.readerOperatorType = UserBitShared.CoreOperatorType.SPSS_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
diff --git a/contrib/format-syslog/pom.xml b/contrib/format-syslog/pom.xml
index 99052f3..fdd07ec 100644
--- a/contrib/format-syslog/pom.xml
+++ b/contrib/format-syslog/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-format-syslog</artifactId>
-  <name>contrib/format-syslog</name>
+  <name>Drill : Contrib : Format : Syslog</name>
 
   <dependencies>
     <dependency>
@@ -50,7 +50,6 @@
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
-
     <dependency>
       <groupId>org.apache.drill</groupId>
       <artifactId>drill-common</artifactId>
diff --git a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogBatchReader.java b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogBatchReader.java
new file mode 100644
index 0000000..6dd3e55
--- /dev/null
+++ b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogBatchReader.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.syslog;
+
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.hadoop.mapred.FileSplit;
+import org.joda.time.Instant;
+import org.realityforge.jsyslog.message.StructuredDataParameter;
+import org.realityforge.jsyslog.message.SyslogMessage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+public class SyslogBatchReader implements ManagedReader<FileSchemaNegotiator> {
+  private static final Logger logger = LoggerFactory.getLogger(SyslogBatchReader.class);
+  private final String STRUCTURED_DATA_PREFIX = "structured_data_";
+  private final String STRUCTURED_DATA_MAP_NAME = "structured_data";
+  private final String RAW_COLUMN_NAME = "_raw";
+
+  private final int maxRecords;
+  private final SyslogFormatConfig config;
+  private final EasySubScan subScan;
+  private final Map<String, MinorType> mappedColumns = new LinkedHashMap<>();
+  private int lineCount;
+  private int errorCount;
+  private CustomErrorContext errorContext;
+  private InputStream fsStream;
+  private FileSplit split;
+  private BufferedReader reader;
+  private RowSetLoader rowWriter;
+  private List<ScalarWriter> writerArray;
+  private ScalarWriter rawColumnWriter;
+  private ScalarWriter messageWriter;
+  private TupleWriter structuredDataWriter;
+
+
+  public SyslogBatchReader(int maxRecords, SyslogFormatConfig config, EasySubScan scan) {
+    this.maxRecords = maxRecords;
+    this.config = config;
+    this.subScan = scan;
+    populateMappedColumns();
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    split = negotiator.split();
+    openFile(negotiator);
+    negotiator.tableSchema(buildSchema(), false);
+    errorContext = negotiator.parentErrorContext();
+
+    ResultSetLoader loader = negotiator.build();
+    rowWriter = loader.writer();
+    writerArray = populateRowWriters();
+    rawColumnWriter = rowWriter.scalar(RAW_COLUMN_NAME);
+    messageWriter = rowWriter.scalar("message");
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    while (!rowWriter.isFull()) {
+      if (!processNextLine()) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public void close() {
+    if (fsStream != null) {
+      AutoCloseables.closeSilently(fsStream);
+      fsStream = null;
+    }
+
+    if (reader != null) {
+      AutoCloseables.closeSilently(reader);
+      reader = null;
+    }
+  }
+
+  private void openFile(FileSchemaNegotiator negotiator) {
+    try {
+      fsStream = negotiator.fileSystem().openPossiblyCompressedStream(split.getPath());
+    } catch (IOException e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Unable to open Syslog File %s", split.getPath())
+        .addContext(e.getMessage())
+        .addContext(errorContext)
+        .build(logger);
+    }
+    this.lineCount = 0;
+    reader = new BufferedReader(new InputStreamReader(fsStream));
+  }
+
+  public TupleMetadata buildSchema() {
+    SchemaBuilder builder = new SchemaBuilder();
+    for (Map.Entry<String, MinorType> entry : mappedColumns.entrySet()) {
+      builder.addNullable(entry.getKey(), entry.getValue());
+    }
+    if (! config.flattenStructuredData()) {
+      ColumnMetadata structuredDataMap = MetadataUtils.newMap(STRUCTURED_DATA_MAP_NAME);
+      builder.add(structuredDataMap);
+    }
+
+    builder.addNullable("message", MinorType.VARCHAR);
+
+    // Add _raw column
+    ColumnMetadata colSchema = MetadataUtils.newScalar(RAW_COLUMN_NAME, MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL);
+    colSchema.setBooleanProperty(ColumnMetadata.EXCLUDE_FROM_WILDCARD, true);
+    builder.add(colSchema);
+    return builder.buildSchema();
+  }
+
+  private List<ScalarWriter> populateRowWriters() {
+    List<ScalarWriter> writerArray = new ArrayList<>();
+    for (Map.Entry<String, MinorType> entry : mappedColumns.entrySet()) {
+      writerArray.add(rowWriter.scalar(entry.getKey()));
+    }
+
+    if (! config.flattenStructuredData()) {
+       structuredDataWriter = rowWriter.tuple(STRUCTURED_DATA_MAP_NAME);
+    }
+
+    return writerArray;
+  }
+
+  private void populateMappedColumns() {
+    mappedColumns.put("event_date", MinorType.TIMESTAMP);
+    mappedColumns.put("severity_code", MinorType.INT);
+    mappedColumns.put("facility_code", MinorType.INT);
+    mappedColumns.put("severity", MinorType.VARCHAR);
+    mappedColumns.put("facility", MinorType.VARCHAR);
+    mappedColumns.put("ip", MinorType.VARCHAR);
+    mappedColumns.put("app_name", MinorType.VARCHAR);
+    mappedColumns.put("process_id", MinorType.VARCHAR);
+    mappedColumns.put("message_id", MinorType.VARCHAR);
+    mappedColumns.put("structured_data_text", MinorType.VARCHAR);
+  }
+
+  private boolean processNextLine() {
+    // Check to see if the limit has been reached
+    if (rowWriter.limitReached(maxRecords)) {
+      return false;
+    }
+
+    String line;
+    try {
+      line = reader.readLine();
+
+      // If the line is empty, return false
+      if (line == null) {
+        return false;
+      }
+
+      // Remove leading and trailing whitespace
+      line = line.trim();
+      if (line.length() == 0) {
+        // Skip empty lines
+        return true;
+      }
+
+      SyslogMessage parsedMessage = SyslogMessage.parseStructuredSyslogMessage(line);
+      rowWriter.start();
+      writeStructuredColumns(parsedMessage);
+      writeStructuredData(parsedMessage);
+
+      if (isProjected(rawColumnWriter)) {
+        rawColumnWriter.setString(line);
+      }
+
+      if (isProjected(messageWriter)) {
+        logger.debug("Message: {}", parsedMessage.getMessage());
+        messageWriter.setString(parsedMessage.getMessage());
+      }
+
+    } catch (IOException e) {
+      errorCount++;
+      if (errorCount > config.getMaxErrors()) {
+        throw UserException
+          .dataReadError()
+          .message("Maximum Error Threshold Exceeded. Error reading Syslog file at line %d", lineCount)
+          .addContext(e.getMessage())
+          .build(logger);
+      }
+    }
+    lineCount++;
+    rowWriter.save();
+    return true;
+  }
+
+  private void writeStructuredColumns(SyslogMessage parsedMessage) {
+    long milliseconds = parsedMessage.getTimestamp().getMillis();
+    writerArray.get(0).setTimestamp(new Instant(milliseconds));
+    writerArray.get(1).setInt(parsedMessage.getLevel().ordinal());
+    writerArray.get(2).setInt(parsedMessage.getFacility().ordinal());
+    setString(writerArray.get(3), parsedMessage.getLevel().name());
+    setString(writerArray.get(4), parsedMessage.getFacility().name());
+    setString(writerArray.get(5), parsedMessage.getHostname());
+    setString(writerArray.get(6), parsedMessage.getAppName());
+    setString(writerArray.get(7), parsedMessage.getProcId());
+    setString(writerArray.get(8), parsedMessage.getMsgId());
+
+    Map<String, List<StructuredDataParameter>> structuredData = parsedMessage.getStructuredData();
+
+    if (structuredData != null) {
+      writerArray.get(9).setString(parsedMessage.getStructuredData().toString());
+    }
+    logger.debug("Successfully mapped known fields");
+  }
+
+  /**
+   * Write the flattened structured data fields to Drill vectors. The data in the structured fields is not known in
+   * advance and also is not consistent between syslog entries, so we have to add these fields on the fly.  The only possible
+   * data type in these cases are VARCHARs.
+   * @param parsedMessage The parsed syslog message
+   */
+  private void writeStructuredData(SyslogMessage parsedMessage) {
+    Map<String, List<StructuredDataParameter>> structuredData = parsedMessage.getStructuredData();
+    // Prevent NPE if there is no structured data text
+    if (structuredData == null) {
+      return;
+    }
+
+    if (config.flattenStructuredData()) {
+      // Iterate over the structured data fields and map to Drill vectors
+      for (Map.Entry<String, List<StructuredDataParameter>> entry : structuredData.entrySet()) {
+        for (StructuredDataParameter parameter : entry.getValue()) {
+          // These fields are not known in advance and are not necessarily consistent
+          String fieldName = STRUCTURED_DATA_PREFIX + parameter.getName();
+          String fieldValue = parameter.getValue();
+          writeStringColumn(rowWriter, fieldName, fieldValue);
+          logger.debug("Writing {} {}", fieldName, fieldValue);
+        }
+      }
+    } else {
+      writeStructuredDataToMap(structuredData);
+    }
+  }
+
+  private void writeStructuredDataToMap(Map<String, List<StructuredDataParameter>> structuredData) {
+    // Iterate over the structured data fields and write to a Drill map
+    for (Map.Entry<String, List<StructuredDataParameter>> entry : structuredData.entrySet()) {
+      for (StructuredDataParameter parameter : entry.getValue()) {
+        // These fields are not known in advance and are not necessarily consistent
+        String fieldName = parameter.getName();
+        String fieldValue = parameter.getValue();
+        writeStringColumn(structuredDataWriter, fieldName, fieldValue);
+      }
+    }
+  }
+
+  /**
+   * Writes data to a String column.  If there is no ScalarWriter for the particular column, this function will create one.
+   * @param rowWriter The ScalarWriter to which we are writing
+   * @param name The field name to be written
+   * @param value The field value to be written
+   */
+  private void writeStringColumn(TupleWriter rowWriter, String name, String value) {
+    ScalarWriter colWriter = getColWriter(rowWriter, name, TypeProtos.MinorType.VARCHAR);
+    colWriter.setString(value);
+  }
+
+  private ScalarWriter getColWriter(TupleWriter tupleWriter, String fieldName, TypeProtos.MinorType type) {
+    int index = tupleWriter.tupleSchema().index(fieldName);
+    if (index == -1) {
+      ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, type, TypeProtos.DataMode.OPTIONAL);
+      index = tupleWriter.addColumn(colSchema);
+    }
+    return tupleWriter.scalar(index);
+  }
+
+  /**
+   * The ScalarWriter objects have method to verify whether the writer is projected or not, however it does not
+   * seem to take the star queries into account.  This method checks to see if the query is a star query and includes that
+   * in the determination of whether the column is projected or not.
+   * @param writer A scalarWriter
+   * @return True if the column is projected, false if not.
+   */
+  private boolean isProjected(ScalarWriter writer) {
+    // Case for star query
+    if (subScan.getColumns().size() == 1 && subScan.getColumns().get(0).isDynamicStar()) {
+      return true;
+    } else {
+      return writer.isProjected();
+    }
+  }
+
+  private void setString(ScalarWriter writer, String value) {
+    if (value == null) {
+      return;
+    }
+    writer.setString(value);
+  }
+}
diff --git a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatConfig.java b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatConfig.java
index e851e31..8f2fc3f 100644
--- a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatConfig.java
+++ b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatConfig.java
@@ -49,14 +49,17 @@
     this.flattenStructuredData = flattenStructuredData == null ? false : flattenStructuredData;
   }
 
-  public boolean getFlattenStructuredData() {
+  @JsonProperty("flattenStructuredData")
+  public boolean flattenStructuredData() {
     return flattenStructuredData;
   }
 
+  @JsonProperty("maxErrors")
   public int getMaxErrors() {
     return maxErrors;
   }
 
+  @JsonProperty("extensions")
   public List<String> getExtensions() {
     return extensions;
   }
diff --git a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatPlugin.java b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatPlugin.java
index d21035b..b439c5e 100644
--- a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatPlugin.java
+++ b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogFormatPlugin.java
@@ -18,82 +18,75 @@
 
 package org.apache.drill.exec.store.syslog;
 
-import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.planner.common.DrillStatsTable.TableStatistics;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
 import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.RecordWriter;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
+import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
-import org.apache.drill.exec.store.dfs.easy.EasyWriter;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import java.util.List;
 
 public class SyslogFormatPlugin extends EasyFormatPlugin<SyslogFormatConfig> {
 
   public static final String DEFAULT_NAME = "syslog";
-  private final SyslogFormatConfig formatConfig;
+
+  private static class SyslogReaderFactory extends FileReaderFactory {
+
+    private final int maxRecords;
+    private final SyslogFormatConfig formatConfig;
+    private final EasySubScan scan;
+
+    public SyslogReaderFactory(int maxRecords, SyslogFormatConfig formatConfig, EasySubScan scan) {
+      this.maxRecords = maxRecords;
+      this.formatConfig = formatConfig;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileSchemaNegotiator> newReader() {
+      return new SyslogBatchReader(maxRecords, formatConfig, scan);
+    }
+  }
 
   public SyslogFormatPlugin(String name, DrillbitContext context,
-                            Configuration fsConf, StoragePluginConfig storageConfig,
-                            SyslogFormatConfig formatConfig) {
-    super(name, context, fsConf, storageConfig, formatConfig,
-            true,  // readable
-            false, // writable
-            true, // blockSplittable
-            true,  // compressible
-            Lists.newArrayList(formatConfig.getExtensions()),
-            DEFAULT_NAME);
-    this.formatConfig = formatConfig;
+                          Configuration fsConf, StoragePluginConfig storageConfig,
+                          SyslogFormatConfig formatConfig) {
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
+  }
+
+  private static EasyFormatConfig easyConfig(Configuration fsConf, SyslogFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
-  public RecordReader getRecordReader(FragmentContext context, DrillFileSystem dfs, FileWork fileWork,
-                                      List<SchemaPath> columns, String userName) {
-    return new SyslogRecordReader(context, dfs, fileWork, columns, userName, formatConfig);
+  public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(
+    EasySubScan scan, OptionManager options)  {
+    return new SyslogBatchReader(scan.getMaxRecords(), formatConfig, scan);
   }
 
   @Override
-  public boolean supportsPushDown() {
-    return true;
-  }
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) {
+    FileScanBuilder builder = new FileScanBuilder();
+    builder.setReaderFactory(new SyslogReaderFactory(scan.getMaxRecords(), formatConfig, scan));
 
-  @Override
-  public RecordWriter getRecordWriter(FragmentContext context,
-                                      EasyWriter writer) throws UnsupportedOperationException {
-    throw new UnsupportedOperationException("Drill does not support writing records to Syslog format.");
-  }
-
-  @Override
-  public int getReaderOperatorType() {
-    return CoreOperatorType.SYSLOG_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public int getWriterOperatorType() {
-    throw new UnsupportedOperationException("Drill does not support writing records to Syslog format.");
-  }
-
-  @Override
-  public boolean supportsStatistics() {
-    return false;
-  }
-
-  @Override
-  public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public void writeStatistics(TableStatistics statistics, FileSystem fs, Path statsTablePath) {
-    throw new UnsupportedOperationException("unimplemented");
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(TypeProtos.MinorType.VARCHAR));
+    return builder;
   }
 }
diff --git a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogRecordReader.java b/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogRecordReader.java
deleted file mode 100644
index 0f39887..0000000
--- a/contrib/format-syslog/src/main/java/org/apache/drill/exec/store/syslog/SyslogRecordReader.java
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.syslog;
-
-import io.netty.buffer.DrillBuf;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.exception.OutOfMemoryException;
-import org.apache.drill.exec.expr.holders.VarCharHolder;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
-import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter;
-import org.realityforge.jsyslog.message.StructuredDataParameter;
-import org.realityforge.jsyslog.message.SyslogMessage;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.BufferedReader;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.nio.charset.StandardCharsets;
-import java.text.SimpleDateFormat;
-import java.util.List;
-import java.util.Map;
-
-public class SyslogRecordReader extends AbstractRecordReader {
-
-  private static final Logger logger = LoggerFactory.getLogger(SyslogRecordReader.class);
-  private static final int MAX_RECORDS_PER_BATCH = 4096;
-
-  private final DrillFileSystem fileSystem;
-  private final FileWork fileWork;
-  private final String userName;
-  private BufferedReader reader;
-  private DrillBuf buffer;
-  private VectorContainerWriter writer;
-  private final int maxErrors;
-  private final boolean flattenStructuredData;
-  private int errorCount;
-  private int lineCount;
-  private final List<SchemaPath> projectedColumns;
-  private String line;
-
-  public SyslogRecordReader(FragmentContext context,
-                            DrillFileSystem fileSystem,
-                            FileWork fileWork,
-                            List<SchemaPath> columns,
-                            String userName,
-                            SyslogFormatConfig config) throws OutOfMemoryException {
-
-    this.fileSystem = fileSystem;
-    this.fileWork = fileWork;
-    this.userName = userName;
-    this.maxErrors = config.getMaxErrors();
-    this.errorCount = 0;
-    this.buffer = context.getManagedBuffer().reallocIfNeeded(4096);
-    this.projectedColumns = columns;
-    this.flattenStructuredData = config.getFlattenStructuredData();
-
-    setColumns(columns);
-  }
-
-  @Override
-  public void setup(final OperatorContext context, final OutputMutator output) {
-    openFile();
-    this.writer = new VectorContainerWriter(output);
-  }
-
-  private void openFile() {
-    InputStream in;
-    try {
-      in = fileSystem.openPossiblyCompressedStream(fileWork.getPath());
-    } catch (Exception e) {
-      throw UserException
-              .dataReadError(e)
-              .message("Failed to open open input file: %s", fileWork.getPath())
-              .addContext("User name", this.userName)
-              .build(logger);
-    }
-    this.lineCount = 0;
-    reader = new BufferedReader(new InputStreamReader(in));
-  }
-
-  @Override
-  public int next() {
-    this.writer.allocate();
-    this.writer.reset();
-
-    int recordCount = 0;
-
-    try {
-      BaseWriter.MapWriter map = this.writer.rootAsMap();
-      String line;
-
-      while (recordCount < MAX_RECORDS_PER_BATCH && (line = this.reader.readLine()) != null) {
-        lineCount++;
-
-        // Skip empty lines
-        line = line.trim();
-        if (line.length() == 0) {
-          continue;
-        }
-        this.line = line;
-
-        try {
-          SyslogMessage parsedMessage = SyslogMessage.parseStructuredSyslogMessage(line);
-
-          this.writer.setPosition(recordCount);
-          map.start();
-
-          if (isStarQuery()) {
-            writeAllColumns(map, parsedMessage);
-          } else {
-            writeProjectedColumns(map, parsedMessage);
-          }
-          map.end();
-          recordCount++;
-
-        } catch (Exception e) {
-          errorCount++;
-          if (errorCount > maxErrors) {
-            throw UserException
-                    .dataReadError()
-                    .message("Maximum Error Threshold Exceeded: ")
-                    .addContext("Line: " + lineCount)
-                    .addContext(e.getMessage())
-                    .build(logger);
-          }
-        }
-      }
-
-      this.writer.setValueCount(recordCount);
-      return recordCount;
-
-    } catch (final Exception e) {
-      errorCount++;
-      if (errorCount > maxErrors) {
-        throw UserException.dataReadError()
-                .message("Error parsing file")
-                .addContext(e.getMessage())
-                .build(logger);
-      }
-    }
-
-    return recordCount;
-  }
-
-  private void writeAllColumns(BaseWriter.MapWriter map, SyslogMessage parsedMessage) {
-
-    long milliseconds = 0;
-    try {
-      milliseconds = parsedMessage.getTimestamp().getMillis();
-    } catch (final Exception e) {
-      errorCount++;
-      if (errorCount > maxErrors) {
-        throw UserException.dataReadError()
-                .message("Syslog Format Plugin: Error parsing date")
-                .addContext(e.getMessage())
-                .build(logger);
-      }
-    }
-    map.timeStamp("event_date").writeTimeStamp(milliseconds);
-    map.integer("severity_code").writeInt(parsedMessage.getLevel().ordinal());
-    map.integer("facility_code").writeInt(parsedMessage.getFacility().ordinal());
-
-    mapStringField("severity", parsedMessage.getLevel().name(), map);
-    mapStringField("facility", parsedMessage.getFacility().name(), map);
-    mapStringField("ip", parsedMessage.getHostname(), map);
-    mapStringField("app_name", parsedMessage.getAppName(), map);
-    mapStringField("process_id", parsedMessage.getProcId(), map);
-    mapStringField("message_id", parsedMessage.getMsgId(), map);
-
-    if (parsedMessage.getStructuredData() != null) {
-      mapStringField("structured_data_text", parsedMessage.getStructuredData().toString(), map);
-      Map<String, List<StructuredDataParameter>> structuredData = parsedMessage.getStructuredData();
-      if (flattenStructuredData) {
-        mapFlattenedStructuredData(structuredData, map);
-      } else {
-        mapComplexField("structured_data", structuredData, map);
-      }
-    }
-    mapStringField("message", parsedMessage.getMessage(), map);
-  }
-
-  private void writeProjectedColumns(BaseWriter.MapWriter map, SyslogMessage parsedMessage) throws UserException {
-    String columnName;
-
-    for (SchemaPath col : projectedColumns) {
-
-      //Case for nested fields
-      if (col.getAsNamePart().hasChild()) {
-        String fieldName = col.getAsNamePart().getChild().getName();
-        mapStructuredDataField(fieldName, map, parsedMessage);
-      } else {
-        columnName = col.getAsNamePart().getName();
-
-        //Extracts fields from structured data IF the user selected to flatten these fields
-        if ((!columnName.equals("structured_data_text")) && columnName.startsWith("structured_data_")) {
-          String fieldName = columnName.replace("structured_data_", "");
-          String value = getFieldFromStructuredData(fieldName, parsedMessage);
-          mapStringField(columnName, value, map);
-        } else {
-          switch (columnName) {
-            case "event_date":
-              long milliseconds = parsedMessage.getTimestamp().getMillis(); //TODO put in try/catch
-              map.timeStamp("event_date").writeTimeStamp(milliseconds);
-              break;
-            case "severity_code":
-              map.integer("severity_code").writeInt(parsedMessage.getLevel().ordinal());
-              break;
-            case "facility_code":
-              map.integer("facility_code").writeInt(parsedMessage.getFacility().ordinal());
-              break;
-            case "severity":
-              mapStringField("severity", parsedMessage.getLevel().name(), map);
-              break;
-            case "facility":
-              mapStringField("facility", parsedMessage.getFacility().name(), map);
-              break;
-            case "ip":
-              mapStringField("ip", parsedMessage.getHostname(), map);
-              break;
-            case "app_name":
-              mapStringField("app_name", parsedMessage.getAppName(), map);
-              break;
-            case "process_id":
-              mapStringField("process_id", parsedMessage.getProcId(), map);
-              break;
-            case "msg_id":
-              mapStringField("message_id", parsedMessage.getMsgId(), map);
-              break;
-            case "structured_data":
-              if (parsedMessage.getStructuredData() != null) {
-                Map<String, List<StructuredDataParameter>> structured_data = parsedMessage.getStructuredData();
-                mapComplexField("structured_data", structured_data, map);
-              }
-              break;
-            case "structured_data_text":
-              if (parsedMessage.getStructuredData() != null) {
-                mapStringField("structured_data_text", parsedMessage.getStructuredData().toString(), map);
-              } else {
-                mapStringField("structured_data_text", "", map);
-              }
-              break;
-            case "message":
-              mapStringField("message", parsedMessage.getMessage(), map);
-              break;
-            case "_raw":
-              mapStringField("_raw", this.line, map);
-              break;
-
-            default:
-              mapStringField(columnName, "", map);
-          }
-        }
-      }
-    }
-  }
-
-  //Helper function to map strings
-  private void mapStringField(String name, String value, BaseWriter.MapWriter map) {
-    if (value == null) {
-      return;
-    }
-    try {
-      byte[] bytes = value.getBytes(StandardCharsets.UTF_8);
-      int stringLength = bytes.length;
-      this.buffer = buffer.reallocIfNeeded(stringLength);
-      this.buffer.setBytes(0, bytes, 0, stringLength);
-      map.varChar(name).writeVarChar(0, stringLength, buffer);
-    } catch (Exception e) {
-      throw UserException
-              .dataWriteError()
-              .addContext("Could not write string: ")
-              .addContext(e.getMessage())
-              .build(logger);
-    }
-  }
-
-  //Helper function to flatten structured data
-  private void mapFlattenedStructuredData(Map<String, List<StructuredDataParameter>> data, BaseWriter.MapWriter map) {
-    for (Map.Entry<String, List<StructuredDataParameter>> entry : data.entrySet()) {
-      for (StructuredDataParameter parameter : entry.getValue()) {
-        String fieldName = "structured_data_" + parameter.getName();
-        String fieldValue = parameter.getValue();
-        mapStringField(fieldName, fieldValue, map);
-      }
-    }
-  }
-
-  //Gets field from the Structured Data Construct
-  private String getFieldFromStructuredData(String fieldName, SyslogMessage parsedMessage) {
-    for (Map.Entry<String, List<StructuredDataParameter>> entry : parsedMessage.getStructuredData().entrySet()) {
-      for (StructuredDataParameter d : entry.getValue()) {
-        if (d.getName().equals(fieldName)) {
-          return d.getValue();
-        }
-      }
-    }
-    return null;
-  }
-
-  //Helper function to map arrays
-  private void mapComplexField(String mapName, Map<String, List<StructuredDataParameter>> data, BaseWriter.MapWriter map) {
-    for (Map.Entry<String, List<StructuredDataParameter>> entry : data.entrySet()) {
-      List<StructuredDataParameter> dataParameters = entry.getValue();
-      String fieldName;
-      String fieldValue;
-
-      for (StructuredDataParameter parameter : dataParameters) {
-        fieldName = parameter.getName();
-        fieldValue = parameter.getValue();
-
-        VarCharHolder rowHolder = new VarCharHolder();
-
-        byte[] rowStringBytes = fieldValue.getBytes();
-        this.buffer.reallocIfNeeded(rowStringBytes.length);
-        this.buffer.setBytes(0, rowStringBytes);
-        rowHolder.start = 0;
-        rowHolder.end = rowStringBytes.length;
-        rowHolder.buffer = this.buffer;
-
-        map.map(mapName).varChar(fieldName).write(rowHolder);
-      }
-    }
-  }
-
-  private void mapStructuredDataField(String fieldName, BaseWriter.MapWriter map, SyslogMessage parsedMessage) {
-    String fieldValue = getFieldFromStructuredData(fieldName, parsedMessage);
-    VarCharHolder rowHolder = new VarCharHolder();
-
-    byte[] rowStringBytes = fieldValue.getBytes();
-    this.buffer.reallocIfNeeded(rowStringBytes.length);
-    this.buffer.setBytes(0, rowStringBytes);
-    rowHolder.start = 0;
-    rowHolder.end = rowStringBytes.length;
-    rowHolder.buffer = this.buffer;
-
-    map.map("structured_data").varChar(fieldName).write(rowHolder);
-  }
-
-  public SimpleDateFormat getValidDateObject(String d) {
-    SimpleDateFormat tempDateFormat;
-    if (d != null && !d.isEmpty()) {
-      tempDateFormat = new SimpleDateFormat(d);
-    } else {
-      throw UserException
-              .parseError()
-              .message("Invalid date format")
-              .build(logger);
-    }
-    return tempDateFormat;
-  }
-
-  public void close() throws Exception {
-    this.reader.close();
-  }
-}
diff --git a/contrib/format-syslog/src/main/resources/bootstrap-format-plugins.json b/contrib/format-syslog/src/main/resources/bootstrap-format-plugins.json
index ee5a396..ea55012 100644
--- a/contrib/format-syslog/src/main/resources/bootstrap-format-plugins.json
+++ b/contrib/format-syslog/src/main/resources/bootstrap-format-plugins.json
@@ -7,7 +7,8 @@
           "type": "syslog",
           "extensions": [
             "syslog"
-          ]
+          ],
+          "maxErrors" : 0
         }
       }
     },
@@ -18,7 +19,8 @@
           "type": "syslog",
           "extensions": [
             "syslog"
-          ]
+          ],
+          "maxErrors" : 0
         }
       }
     }
diff --git a/contrib/format-syslog/src/test/java/org/apache/drill/exec/store/syslog/TestSyslogFormat.java b/contrib/format-syslog/src/test/java/org/apache/drill/exec/store/syslog/TestSyslogFormat.java
index c7bd833..4fb15c5 100644
--- a/contrib/format-syslog/src/test/java/org/apache/drill/exec/store/syslog/TestSyslogFormat.java
+++ b/contrib/format-syslog/src/test/java/org/apache/drill/exec/store/syslog/TestSyslogFormat.java
@@ -17,17 +17,19 @@
  */
 package org.apache.drill.exec.store.syslog;
 
+import java.nio.file.Paths;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.categories.RowSetTests;
 import org.apache.drill.common.logical.FormatPluginConfig;
 import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.test.ClusterTest;
-import org.apache.drill.test.BaseDirTestWatcher;
 import org.apache.drill.exec.physical.rowSet.RowSet;
 import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
 import org.apache.drill.test.ClusterFixture;
@@ -35,20 +37,26 @@
 import org.apache.drill.exec.record.metadata.SchemaBuilder;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.junit.ClassRule;
+import org.junit.experimental.categories.Category;
 
+import static org.apache.drill.test.QueryTestUtil.generateCompressedFile;
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapArray;
+import static org.junit.Assert.assertEquals;
+
+@Category(RowSetTests.class)
 public class TestSyslogFormat extends ClusterTest {
 
-  @ClassRule
-  public static final BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
-
   @BeforeClass
   public static void setup() throws Exception {
-    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher).maxParallelization(1));
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    // Needed for compressed file unit test
+    dirTestWatcher.copyResourceToRoot(Paths.get("syslog/"));
+
     defineSyslogPlugin();
   }
 
-  private static void defineSyslogPlugin() throws ExecutionSetupException {
+  private static void defineSyslogPlugin() {
     Map<String, FormatPluginConfig> formats = new HashMap<>();
     formats.put("sample", new SyslogFormatConfig(
         Collections.singletonList("syslog"), null, null));
@@ -58,6 +66,7 @@
 
     // Define a temporary plugin for the "cp" storage plugin.
     cluster.defineFormats("cp", formats);
+    cluster.defineFormats("dfs", formats);
   }
 
   @Test
@@ -76,88 +85,91 @@
     RowSet results = client.queryBuilder().sql(sql).rowSet();
 
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
-            .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
+      .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "", "")
-            .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "", "")
-            .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "", "")
-            .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "", "")
-            .addRow(1061727255000L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "", "")
-            .addRow(1061727255000L, 5, "NOTICE", 20, "LOCAL4", "192.0.2.1", "8710", "", "")
-            .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, eventSource=Application, eventID=1011]}")
-            .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, eventSource=Application, eventID=1011]}")
-            .build();
+      .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1061727255000L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1061727255000L, 5, "NOTICE", 20, "LOCAL4", "192.0.2.1", "8710", null, null)
+      .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "ID47", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, " +
+    "eventSource=Application, eventID=1011]}")
+      .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "ID47", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, " +
+    "eventSource=Application, eventID=1011]}")
+      .build();
 
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 
   @Test
-  public void testStarQuery() throws RpcException {
+  public void testStarQuery() throws Exception {
     String sql = "SELECT * FROM cp.`syslog/logs1.syslog`";
 
-
     RowSet results = client.queryBuilder().sql(sql).rowSet();
-
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
-            .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
+      .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data", MinorType.MAP, DataMode.REQUIRED)
+      .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow(1065910455003L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", "ID47", "BOM'su root' failed for lonvick on /dev/pts/8", null)
-            .addRow(482196050520L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", "ID47", "BOM'su root' failed for lonvick on /dev/pts/8", null)
-            .addRow(482196050520L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", "ID47", "BOM'su root' failed for lonvick on /dev/pts/8", null)
-            .addRow(1065910455003L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", "ID47", "BOM'su root' failed for lonvick on /dev/pts/8", null)
-            .addRow(1061727255000L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", "ID47", "BOM'su root' failed for lonvick on /dev/pts/8", null)
-            .addRow(1061727255000L, 5, 20, "NOTICE", "LOCAL4", "192.0.2.1", "myproc", null, "%% It's time to make the do-nuts.", "8710")
-            .build();
+      .addRow(1065910455003L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", null, "ID47", null, mapArray(), "BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow(482196050520L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", null, "ID47", null, mapArray(),"BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow(482196050520L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", null, "ID47", null, mapArray(),"BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow(1065910455003L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", null,  "ID47", null, mapArray(),"BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow(1061727255000L, 2, 4, "CRIT", "AUTH", "mymachine.example.com", "su", null, "ID47", null, mapArray(),"BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow(1061727255000L, 5, 20, "NOTICE", "LOCAL4", "192.0.2.1", "myproc", "8710", null, null, mapArray(),"%% It's time to make the do-nuts.")
+      .build();
 
+    assertEquals(6, results.rowCount());
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 
   @Test
-  public void testRawQuery() throws RpcException {
+  public void testRawQuery() throws Exception {
     String sql = "SELECT _raw FROM cp.`syslog/logs.syslog`";
 
     RowSet results = client.queryBuilder().sql(sql).rowSet();
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("_raw", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("_raw", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
-            .addRow("<34>1 1985-04-12T19:20:50.52-04:00 mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
-            .addRow("<34>1 1985-04-12T23:20:50.52Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
-            .addRow("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
-            .addRow("<34>1 2003-08-24T05:14:15.000003-07:00 mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
-            .addRow("<165>1 2003-08-24T05:14:15.000003-07:00 192.0.2.1 myproc 8710 - - %% It's time to make the do-nuts.")
-            .addRow("<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut=\"3\" eventSource=\"Application\" eventID=\"1011\"][examplePriority@32473 class=\"high\"]")
-            .addRow("<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut=\"3\" eventSource=\"Application\" eventID=\"1011\"][examplePriority@32473 class=\"high\"] - and thats a wrap!")
-            .build();
+      .addRow("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow("<34>1 1985-04-12T19:20:50.52-04:00 mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow("<34>1 1985-04-12T23:20:50.52Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow("<34>1 2003-08-24T05:14:15.000003-07:00 mymachine.example.com su - ID47 - BOM'su root' failed for lonvick on /dev/pts/8")
+      .addRow("<165>1 2003-08-24T05:14:15.000003-07:00 192.0.2.1 myproc 8710 - - %% It's time to make the do-nuts.")
+      .addRow("<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut=\"3\" eventSource=\"Application\" eventID=\"1011\"][examplePriority@32473 class=\"high\"]")
+      .addRow("<165>1 2003-10-11T22:14:15.003Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut=\"3\" eventSource=\"Application\" eventID=\"1011\"][examplePriority@32473 class=\"high\"] - and thats a wrap!")
+      .build();
 
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 
   @Test
-  public void testStructuredDataQuery() throws RpcException {
+  public void testStructuredDataQuery() throws Exception {
     String sql = "SELECT syslog_data.`structured_data`.`UserAgent` AS UserAgent, " +
             "syslog_data.`structured_data`.`UserHostAddress` AS UserHostAddress," +
             "syslog_data.`structured_data`.`BrowserSession` AS BrowserSession," +
@@ -177,71 +189,72 @@
 
     RowSet results = client.queryBuilder().sql(sql).rowSet();
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132",
-                    "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation",
-                    "Tester2", "27389", "192.168.2.132", "AUDIT", "4")
-            .build();
+      .addRow("Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132",
+              "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation",
+              "Tester2", "27389", "192.168.2.132", "AUDIT", "4")
+      .build();
 
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 
   @Test
-  public void testStarFlattenedStructuredDataQuery() throws RpcException {
+  public void testStarFlattenedStructuredDataQuery() throws Exception {
     String sql = "SELECT * FROM cp.`syslog/test.syslog1`";
 
     RowSet results = client.queryBuilder().sql(sql).rowSet();
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
-            .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
+      .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow(1438811939693L, 6, 10, "INFO", "AUTHPRIV", "192.168.2.132", "SecureAuth0", "23108", "ID52020",
-                    "{SecureAuth@27389=[UserAgent=Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko, UserHostAddress=192.168.2.132, BrowserSession=0gvhdi5udjuqtweprbgoxilc, Realm=SecureAuth0, Appliance=secureauthqa.gosecureauth.com, Company=SecureAuth Corporation, UserID=Tester2, PEN=27389, HostName=192.168.2.132, Category=AUDIT, Priority=4]}",
-                    "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132",
-                    "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation",
-                    "Tester2", "27389", "192.168.2.132", "AUDIT", "4", "Found the user for retrieving user's profile")
+      .addRow(1438811939693L, 6, 10, "INFO", "AUTHPRIV", "192.168.2.132", "SecureAuth0", "23108", "ID52020", "{SecureAuth@27389=[UserAgent=Mozilla/5.0 (Windows NT 6.1; WOW64; " +
+        "Trident/7.0; rv:11.0) like Gecko, UserHostAddress=192.168.2.132, BrowserSession=0gvhdi5udjuqtweprbgoxilc, Realm=SecureAuth0, Appliance=secureauthqa.gosecureauth.com, " +
+        "Company=SecureAuth Corporation, UserID=Tester2, PEN=27389, HostName=192.168.2.132, Category=AUDIT, Priority=4]}", "Found the user for retrieving user's profile", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132", "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation", "Tester2", "27389", "192.168.2.132", "AUDIT", "4")
+      .addRow(1459529040580L, 6, 16, "INFO", "LOCAL0", "MacBook-Pro-3", null, "94473", null, null,
+        "{\"pid\":94473,\"hostname\":\"MacBook-Pro-3\",\"level\":30,\"msg\":\"hello world\",\"time\":1459529098958,\"v\":1}", null, null, null, null, null, null, null, null, null, null, null)
             .build();
 
+    assertEquals(2, results.rowCount());
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 
   @Test
-  public void testExplicitFlattenedStructuredDataQuery() throws RpcException {
+  public void testExplicitFlattenedStructuredDataQuery() throws Exception {
     String sql = "SELECT event_date," +
             "severity_code," +
             "facility_code," +
@@ -268,39 +281,112 @@
 
     RowSet results = client.queryBuilder().sql(sql).rowSet();
     TupleMetadata expectedSchema = new SchemaBuilder()
-            .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
-            .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
-            .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("structured_data_Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
-            .buildSchema();
+      .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
+      .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("app_name", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserAgent", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserHostAddress", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_BrowserSession", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Realm", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Appliance", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Company", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_UserID", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_PEN", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_HostName", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Category", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_Priority", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
 
+    assertEquals(2, results.rowCount());
 
     RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-            .addRow(1438811939693L, 6, 10, "INFO", "AUTHPRIV", "192.168.2.132", "SecureAuth0", "23108", "",
-                    "{SecureAuth@27389=[UserAgent=Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko, UserHostAddress=192.168.2.132, BrowserSession=0gvhdi5udjuqtweprbgoxilc, Realm=SecureAuth0, Appliance=secureauthqa.gosecureauth.com, Company=SecureAuth Corporation, UserID=Tester2, PEN=27389, HostName=192.168.2.132, Category=AUDIT, Priority=4]}",
-                    "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132",
-                    "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation",
-                    "Tester2", "27389", "192.168.2.132", "AUDIT", "4", "Found the user for retrieving user's profile")
+      .addRow(1438811939693L, 6, 10, "INFO", "AUTHPRIV", "192.168.2.132", "SecureAuth0", "23108", "ID52020", "{SecureAuth@27389=[UserAgent=Mozilla/5.0 (Windows NT 6.1; " +
+    "WOW64; Trident/7.0; rv:11.0) like Gecko, UserHostAddress=192.168.2.132, BrowserSession=0gvhdi5udjuqtweprbgoxilc, Realm=SecureAuth0, Appliance=secureauthqa.gosecureauth.com," +
+    " Company=SecureAuth Corporation, UserID=Tester2, PEN=27389, HostName=192.168.2.132, Category=AUDIT, Priority=4]}", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "192.168.2.132", "0gvhdi5udjuqtweprbgoxilc", "SecureAuth0", "secureauthqa.gosecureauth.com", "SecureAuth Corporation", "Tester2", "27389", "192.168.2.132", "AUDIT", "4", "Found the user for retrieving user's profile")
+      .addRow(1459529040580L, 6, 16, "INFO", "LOCAL0", "MacBook-Pro-3", null, "94473", null, null, null, null, null, null, null, null, null, null, null, null, null,
+    "{\"pid\":94473,\"hostname\":\"MacBook-Pro-3\",\"level\":30,\"msg\":\"hello world\",\"time\":1459529098958,\"v\":1}")
             .build();
 
+    assertEquals(2, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testCount() throws Exception {
+    String sql = "SELECT COUNT(*) FROM cp.`syslog/logs1.syslog` ";
+    long result = client.queryBuilder().sql(sql).singletonLong();
+    assertEquals(6L, result);
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    String sql = "SELECT COUNT(*) AS cnt FROM dfs.`syslog/logs1.syslog`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+    assertEquals("Counts should match",6L, cnt);
+  }
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    String sql = "SELECT * FROM cp.`syslog/logs1.syslog` LIMIT 5";
+
+    queryBuilder()
+      .sql(sql)
+      .planMatcher()
+      .include("Limit", "maxRecords=5")
+      .match();
+  }
+
+  @Test
+  public void testNonComplexFieldsWithCompressedFile() throws Exception {
+    generateCompressedFile("syslog/logs.syslog", "zip", "syslog/logs.syslog.zip" );
+
+    String sql = "SELECT event_date," +
+      "severity_code," +
+      "severity," +
+      "facility_code," +
+      "facility," +
+      "ip," +
+      "process_id," +
+      "message_id," +
+      "structured_data_text " +
+      "FROM dfs.`syslog/logs.syslog.zip`";
+
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("event_date", TypeProtos.MinorType.TIMESTAMP, TypeProtos.DataMode.OPTIONAL)
+      .add("severity_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("severity", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("facility_code", TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)
+      .add("facility", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("ip", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("process_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("message_id", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .add("structured_data_text", TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL)
+      .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+      .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(482196050520L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1065910455003L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1061727255000L, 2, "CRIT", 4, "AUTH", "mymachine.example.com", null, "ID47", null)
+      .addRow(1061727255000L, 5, "NOTICE", 20, "LOCAL4", "192.0.2.1", "8710", null, null)
+      .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "ID47", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, " +
+        "eventSource=Application, eventID=1011]}")
+      .addRow(1065910455003L, 5, "NOTICE", 20, "LOCAL4", "mymachine.example.com", null, "ID47", "{examplePriority@32473=[class=high], exampleSDID@32473=[iut=3, " +
+        "eventSource=Application, eventID=1011]}")
+      .build();
+
     new RowSetComparison(expected).verifyAndClearAll(results);
   }
 }
diff --git a/contrib/format-syslog/src/test/resources/syslog/test.syslog1 b/contrib/format-syslog/src/test/resources/syslog/test.syslog1
index d8e19d9..752bd60 100644
--- a/contrib/format-syslog/src/test/resources/syslog/test.syslog1
+++ b/contrib/format-syslog/src/test/resources/syslog/test.syslog1
@@ -1,2 +1,2 @@
 <86>1 2015-08-05T21:58:59.693Z 192.168.2.132 SecureAuth0 23108 ID52020 [SecureAuth@27389 UserAgent="Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" UserHostAddress="192.168.2.132" BrowserSession="0gvhdi5udjuqtweprbgoxilc" Realm="SecureAuth0" Appliance="secureauthqa.gosecureauth.com" Company="SecureAuth Corporation" UserID="Tester2" PEN="27389" HostName="192.168.2.132" Category="AUDIT" Priority="4"] Found the user for retrieving user's profile
-<134>1 2016-04-01T16:44:58Z MacBook-Pro-3 - 94473 - - {"pid":94473,"hostname":"MacBook-Pro-3","level":30,"msg":"hello world","time":1459529098958,"v":1}
\ No newline at end of file
+<134>1 2016-04-01T16:44:00.58Z MacBook-Pro-3 - 94473 - - {"pid":94473,"hostname":"MacBook-Pro-3","level":30,"msg":"hello world","time":1459529098958,"v":1}
\ No newline at end of file
diff --git a/contrib/format-xml/README.md b/contrib/format-xml/README.md
new file mode 100644
index 0000000..3c50ce2
--- /dev/null
+++ b/contrib/format-xml/README.md
@@ -0,0 +1,138 @@
+# XML Format Reader
+This plugin enables Drill to read XML files without defining any kind of schema. 
+
+## Configuration
+Aside from the file extension, there is one configuration option:
+
+* `dataLevel`: XML data often contains a considerable amount of nesting which is not necesarily useful for data analysis. This parameter allows you to set the nesting level 
+  where the data actually starts.  The levels start at `1`.
+
+The default configuration is shown below:
+
+```json
+"xml": {
+  "type": "xml",
+  "extensions": [
+    "xml"
+  ],
+  "dataLevel": 2
+}
+```
+
+## Data Types
+All fields are read as strings.  Nested fields are read as maps.  Future functionality could include support for lists.
+
+### Attributes
+XML events can have attributes which can also be useful.
+```xml
+<book>
+  <author>O.-J. Dahl</author>
+  <title binding="hardcover" subcategory="non-fiction">Structured Programming</title>
+  <category>PROGRAMMING</category>
+  <year>1972</year>
+</book>
+```
+
+In the example above, the `title` field contains two attributes, the `binding` and `subcategory`.  In order to access these fields, Drill creates a map called `attributes` and 
+adds an entry for each attribute with the field name and then the attribute name.  Every XML file will have a field called `atttributes` regardless of whether the data actually 
+has attributes or not.
+
+```xml
+<books>
+   <book>
+     <author>Mark Twain</author>
+     <title>The Adventures of Tom Sawyer</title>
+     <category>FICTION</category>
+     <year>1876</year>
+   </book>
+   <book>
+     <authors>
+         <author>Niklaus Wirth</author>
+         <author>Somebody else</author>
+     </authors>
+     <title binding="paperback">The Programming Language Pascal</title>
+     <category >PASCAL</category>
+     <year>1971</year>
+   </book>
+   <book>
+     <author>O.-J. Dahl</author>
+     <title binding="hardcover" subcategory="non-fiction">Structured Programming</title>
+     <category>PROGRAMMING</category>
+     <year>1972</year>
+   </book>
+ </books>
+```
+If you queried this data in Drill you'd get the table below:
+
+```sql
+SELECT * 
+FROM <path>.`attributes.xml`
+```
+
+```
+apache drill> select * from dfs.test.`attributes.xml`;
++-----------------------------------------------------------------+------------+---------------------------------+-------------+------+-----------------------------------------+
+|                           attributes                            |   author   |              title              |  category   | year |                 authors                 |
++-----------------------------------------------------------------+------------+---------------------------------+-------------+------+-----------------------------------------+
+| {}                                                              | Mark Twain | The Adventures of Tom Sawyer    | FICTION     | 1876 | {}                                      |
+| {"title_binding":"paperback"}                                   | null       | The Programming Language Pascal | PASCAL      | 1971 | {"author":"Niklaus WirthSomebody else"} |
+| {"title_binding":"hardcover","title_subcategory":"non-fiction"} | O.-J. Dahl | Structured Programming          | PROGRAMMING | 1972 | {}                                      |
++-----------------------------------------------------------------+------------+---------------------------------+-------------+------+-----------------------------------------+
+```
+
+## Limitations:  Malformed XML
+Drill can read properly formatted XML.  If the XML is not properly formatted, Drill will throw errors. Some issues include illegal characters in field names, or attribute names.
+Future functionality will include some degree of data cleaning and fault tolerance. 
+
+## Limitations: Schema Ambiguity
+XML is a challenging format to process as the structure does not give any hints about the schema.  For example, a JSON file might have the following record:
+
+```json
+"record" : {
+  "intField:" : 1,
+  "listField" : [1, 2],
+  "otherField" : {
+    "nestedField1" : "foo",
+    "nestedField2" : "bar"
+  }
+}
+```
+
+From this data, it is clear that `listField` is a `list` and `otherField` is a map.  This same data could be represented in XML as follows:
+
+```xml
+<record>
+  <intField>1</intField>
+  <listField>
+    <value>1</value>
+    <value>2</value>
+  </listField>
+  <otherField>
+    <nestedField1>foo</nestedField1>
+    <nestedField2>bar</nestedField2>
+  </otherField>
+</record>
+```
+This is no problem to parse this data. But consider what would happen if we encountered the following first:
+```xml
+<record>
+  <intField>1</intField>
+  <listField>
+    <value>2</value>
+  </listField>
+  <otherField>
+    <nestedField1>foo</nestedField1>
+    <nestedField2>bar</nestedField2>
+  </otherField>
+</record>
+```
+In this example, there is no way for Drill to know whether `listField` is a `list` or a `map` because it only has one entry. 
+
+## Future Functionality
+
+* **Build schema from XSD file or link**:  One of the major challenges of this reader is having to infer the schema of the data. XML files do provide a schema although this is not
+ required.  In the future, if there is interest, we can extend this reader to use an XSD file to build the schema which will be used to parse the actual XML file. 
+  
+* **Infer Date Fields**: It may be possible to add the ability to infer data fields.
+
+* **List Support**:  Future functionality may include the ability to infer lists from data structures.  
\ No newline at end of file
diff --git a/contrib/format-xml/pom.xml b/contrib/format-xml/pom.xml
new file mode 100644
index 0000000..f3a8943
--- /dev/null
+++ b/contrib/format-xml/pom.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <artifactId>drill-contrib-parent</artifactId>
+    <groupId>org.apache.drill.contrib</groupId>
+    <version>1.19.0-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>drill-format-xml</artifactId>
+  <name>Drill : Contrib : Format : XML</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+
+    <!-- Test dependencies -->
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-java-sources</id>
+            <phase>process-sources</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/classes/org/apache/drill/exec/store/xml
+              </outputDirectory>
+              <resources>
+                <resource>
+                  <directory>src/main/java/org/apache/drill/exec/store/xml</directory>
+                  <filtering>true</filtering>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLBatchReader.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLBatchReader.java
new file mode 100644
index 0000000..83f549f
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLBatchReader.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.hadoop.mapred.FileSplit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.InputStream;
+
+
+public class XMLBatchReader implements ManagedReader<FileSchemaNegotiator> {
+
+  private static final Logger logger = LoggerFactory.getLogger(XMLBatchReader.class);
+
+  private FileSplit split;
+  private RowSetLoader rootRowWriter;
+  private CustomErrorContext errorContext;
+
+  private XMLReader reader;
+  private final int maxRecords;
+  private final int dataLevel;
+
+
+  static class XMLReaderConfig {
+    final XMLFormatPlugin plugin;
+    final int dataLevel;
+
+    XMLReaderConfig(XMLFormatPlugin plugin) {
+      this.plugin = plugin;
+      dataLevel = plugin.getConfig().dataLevel;
+    }
+  }
+
+  public XMLBatchReader(XMLReaderConfig readerConfig, EasySubScan scan) {
+    this.maxRecords = scan.getMaxRecords();
+    this.dataLevel = readerConfig.dataLevel;
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    split = negotiator.split();
+    ResultSetLoader loader = negotiator.build();
+    errorContext = negotiator.parentErrorContext();
+    rootRowWriter = loader.writer();
+
+    openFile(negotiator);
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    return reader.next();
+  }
+
+  @Override
+  public void close() {
+    reader.close();
+  }
+
+  private void openFile(FileScanFramework.FileSchemaNegotiator negotiator) {
+    try {
+      InputStream fsStream = negotiator.fileSystem().openPossiblyCompressedStream(split.getPath());
+      reader = new XMLReader(fsStream, dataLevel, maxRecords);
+      reader.open(rootRowWriter, errorContext);
+    } catch (Exception e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Failed to open open input file: {}", split.getPath().toString())
+        .addContext(errorContext)
+        .addContext(e.getMessage())
+        .build(logger);
+    }
+  }
+}
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatConfig.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatConfig.java
new file mode 100644
index 0000000..0babf20
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatConfig.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import org.apache.drill.common.PlanStringBuilder;
+import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+
+@JsonTypeName(XMLFormatPlugin.DEFAULT_NAME)
+@JsonInclude(JsonInclude.Include.NON_DEFAULT)
+public class XMLFormatConfig implements FormatPluginConfig {
+
+  public final List<String> extensions;
+  public final int dataLevel;
+
+  public XMLFormatConfig(@JsonProperty("extensions") List<String> extensions,
+                         @JsonProperty("dataLevel") int dataLevel) {
+    this.extensions = extensions == null ? Collections.singletonList("xml") : ImmutableList.copyOf(extensions);
+    this.dataLevel = Math.max(dataLevel, 1);
+  }
+
+  @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+  public List<String> getExtensions() {
+    return extensions;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(extensions, dataLevel);
+  }
+
+  public XMLBatchReader.XMLReaderConfig getReaderConfig(XMLFormatPlugin plugin) {
+    return new XMLBatchReader.XMLReaderConfig(plugin);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    XMLFormatConfig other = (XMLFormatConfig) obj;
+    return Objects.equals(extensions, other.extensions)
+      && Objects.equals(dataLevel, other.dataLevel);
+  }
+
+  @Override
+  public String toString() {
+    return new PlanStringBuilder(this)
+      .field("extensions", extensions)
+      .field("dataLevel", dataLevel)
+      .toString();
+  }
+}
\ No newline at end of file
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatPlugin.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatPlugin.java
new file mode 100644
index 0000000..b0a3e82
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLFormatPlugin.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.hadoop.conf.Configuration;
+
+public class XMLFormatPlugin extends EasyFormatPlugin<XMLFormatConfig> {
+
+  public static final String DEFAULT_NAME = "xml";
+
+  public static class XMLReaderFactory extends FileScanFramework.FileReaderFactory {
+    private final XMLBatchReader.XMLReaderConfig readerConfig;
+    private final EasySubScan scan;
+
+    public XMLReaderFactory(XMLBatchReader.XMLReaderConfig config, EasySubScan scan) {
+      this.readerConfig = config;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileScanFramework.FileSchemaNegotiator> newReader() {
+      return new XMLBatchReader(readerConfig, scan);
+    }
+  }
+
+  public XMLFormatPlugin(String name,
+                         DrillbitContext context,
+                         Configuration fsConf,
+                         StoragePluginConfig storageConfig,
+                         XMLFormatConfig formatConfig) {
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
+  }
+
+  private static EasyFormatConfig easyConfig(Configuration fsConf, XMLFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
+  }
+
+  @Override
+  public ManagedReader<? extends FileScanFramework.FileSchemaNegotiator> newBatchReader(
+    EasySubScan scan, OptionManager options) {
+    return new XMLBatchReader(formatConfig.getReaderConfig(this), scan);
+  }
+
+  @Override
+  protected FileScanFramework.FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) {
+    FileScanBuilder builder = new FileScanBuilder();
+    builder.setReaderFactory(new XMLReaderFactory(new XMLBatchReader.XMLReaderConfig(this), scan));
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(MinorType.VARCHAR));
+    return builder;
+  }
+}
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLMap.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLMap.java
new file mode 100644
index 0000000..557762c
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLMap.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.common.PlanStringBuilder;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+
+import java.util.Objects;
+
+public class XMLMap {
+
+  private final String mapName;
+  private final TupleWriter mapWriter;
+
+  public XMLMap (String mapName, TupleWriter mapWriter) {
+    this.mapName = mapName;
+    this.mapWriter = mapWriter;
+  }
+
+  public TupleWriter getMapWriter() {
+    return mapWriter;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+    XMLMap other = (XMLMap) obj;
+    return Objects.equals(mapName, other.mapName);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(mapName);
+  }
+
+  @Override
+  public String toString() {
+    return new PlanStringBuilder(this)
+      .field("Map Name", mapName)
+      .toString();
+  }
+}
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLReader.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLReader.java
new file mode 100644
index 0000000..e51ded6
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLReader.java
@@ -0,0 +1,458 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.xml.stream.XMLEventReader;
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLStreamConstants;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.events.Attribute;
+import javax.xml.stream.events.StartElement;
+import javax.xml.stream.events.XMLEvent;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Stack;
+
+public class XMLReader {
+  private static final Logger logger = LoggerFactory.getLogger(XMLReader.class);
+  private static final String ATTRIBUTE_MAP_NAME = "attributes";
+
+  private final Stack<String> fieldNameStack;
+  private final Stack<TupleWriter> rowWriterStack;
+  private final int dataLevel;
+  private final int maxRecords;
+  private final Map<String, XMLMap> nestedMapCollection;
+
+  private TupleWriter attributeWriter;
+  private CustomErrorContext errorContext;
+  private RowSetLoader rootRowWriter;
+  private int currentNestingLevel;
+  private XMLEvent currentEvent;
+  private String rootDataFieldName;
+  private String fieldName;
+  private xmlState currentState;
+  private TupleWriter currentTupleWriter;
+  private boolean rowStarted;
+  private String attributePrefix;
+  private String fieldValue;
+  private InputStream fsStream;
+  private XMLEventReader reader;
+
+  /**
+   * This field indicates the various states in which the reader operates. The names should be self explanatory,
+   * but they are used as the reader iterates over the XML tags to know what to do.
+   */
+  private enum xmlState {
+    ROW_STARTED,
+    POSSIBLE_MAP,
+    NESTED_MAP_STARTED,
+    GETTING_DATA,
+    WRITING_DATA,
+    FIELD_ENDED,
+    ROW_ENDED
+  }
+
+  public XMLReader(InputStream fsStream, int dataLevel, int maxRecords) throws XMLStreamException {
+    this.fsStream = fsStream;
+    XMLInputFactory inputFactory = XMLInputFactory.newInstance();
+    reader = inputFactory.createXMLEventReader(fsStream);
+    fieldNameStack = new Stack<>();
+    rowWriterStack = new Stack<>();
+    nestedMapCollection = new HashMap<>();
+    this.dataLevel = dataLevel;
+    this.maxRecords = maxRecords;
+
+  }
+
+  public void open(RowSetLoader rootRowWriter, CustomErrorContext errorContext ) {
+    this.errorContext = errorContext;
+    this.rootRowWriter = rootRowWriter;
+    attributeWriter = getAttributeWriter();
+  }
+
+  public boolean next() {
+    while (!rootRowWriter.isFull()) {
+      try {
+        if (!processElements()) {
+          return false;
+        }
+      } catch (Exception e) {
+        throw UserException
+          .dataReadError(e)
+          .message("Error parsing file: " + e.getMessage())
+          .addContext(errorContext)
+          .build(logger);
+      }
+    }
+    return true;
+  }
+
+
+  public void close() {
+    if (fsStream != null) {
+      AutoCloseables.closeSilently(fsStream);
+      fsStream = null;
+    }
+
+    if (reader != null) {
+      try {
+        reader.close();
+      } catch (XMLStreamException e) {
+        logger.warn("Error when closing XML stream: {}", e.getMessage());
+      }
+      reader = null;
+    }
+  }
+
+  /**
+   * This function processes the XML elements.  This function stops reading when the
+   * limit (if any) which came from the query has been reached or the Iterator runs out of
+   * elements.
+   * @return True if there are more elements to parse, false if not
+   */
+  private boolean processElements() {
+    XMLEvent nextEvent;
+
+    if (!reader.hasNext()) {
+      // Stop reading if there are no more results
+      return false;
+    } else if (rootRowWriter.limitReached(maxRecords)) {
+      // Stop if the query limit has been reached
+      return false;
+    }
+
+    // Iterate over XML events
+    while (reader.hasNext()) {
+      // get the current event
+      try {
+        nextEvent = reader.nextEvent();
+
+        // If the next event is whitespace, newlines, or other cruft that we don't need
+        // ignore and move to the next event
+        if (XMLUtils.isEmptyWhiteSpace(nextEvent)) {
+          continue;
+        }
+
+        // Capture the previous and current event
+        XMLEvent lastEvent = currentEvent;
+        currentEvent = nextEvent;
+
+        // Process the event
+        processEvent(currentEvent, lastEvent);
+      } catch (XMLStreamException e) {
+        throw UserException
+          .dataReadError(e)
+          .message("Error parsing XML file: " + e.getMessage())
+          .addContext(errorContext)
+          .build(logger);
+      }
+    }
+    return true;
+  }
+
+  /**
+   * This function processes an actual XMLEvent. There are three possibilities:
+   * 1.  The event is a start event
+   * 2.  The event contains text
+   * 3.  The event is a closing tag
+   * There are other possible elements, but they are not relevant for our purposes.
+   *
+   * @param currentEvent The current event to be processed
+   * @param lastEvent The previous event which was processed
+   */
+  private void processEvent(XMLEvent currentEvent,
+                            XMLEvent lastEvent) {
+    String mapName;
+    switch (currentEvent.getEventType()) {
+
+      /*
+       * This case handles start elements.
+       * Case 1:  The current nesting level is less than the data level.
+       * In this case, increase the nesting level and stop processing.
+       *
+       * Case 2: The nesting level is higher than the data level.
+       * In this case, a few things must happen.
+       * 1.  We capture the field name
+       * 2.  If the row has not started, we start the row
+       * 3.  Set the possible map flag
+       * 4.  Process attributes
+       * 5.  Push both the field name and writer to the stacks
+       */
+      case XMLStreamConstants.START_ELEMENT:
+        currentNestingLevel++;
+
+        // Case 1: Current nesting level is less than the data level
+        if (currentNestingLevel < dataLevel) {
+          // Stop here if the current level of nesting has not reached the data.
+          break;
+        }
+
+        StartElement startElement = currentEvent.asStartElement();
+        // Get the field name
+        fieldName = startElement.getName().getLocalPart();
+
+        if (rootDataFieldName == null && currentNestingLevel == dataLevel) {
+          rootDataFieldName = fieldName;
+          logger.debug("Root field name: {}", rootDataFieldName);
+        }
+
+        if (!rowStarted) {
+          currentTupleWriter = startRow(rootRowWriter);
+        } else {
+          if (lastEvent!= null &&
+            lastEvent.getEventType() == XMLStreamConstants.START_ELEMENT) {
+            /*
+             * Check the flag in the next section.  If the next element is a character AND the flag is set,
+             * start a map.  If not... ignore it all.
+             */
+            changeState(xmlState.POSSIBLE_MAP);
+
+            rowWriterStack.push(currentTupleWriter);
+          }
+
+          fieldNameStack.push(fieldName);
+          if (currentNestingLevel > dataLevel) {
+            attributePrefix = XMLUtils.addField(attributePrefix, fieldName);
+          }
+
+          Iterator<Attribute> attributes = startElement.getAttributes();
+          if (attributes != null && attributes.hasNext()) {
+            writeAttributes(attributePrefix, attributes);
+          }
+        }
+        break;
+
+      /*
+       * This case processes character elements.
+       */
+      case XMLStreamConstants.CHARACTERS:
+        /*
+         * This is the case for comments or other characters after a closing tag
+         */
+        if (currentState == xmlState.ROW_ENDED) {
+          break;
+        }
+
+        // Get the field value but ignore characters outside of rows
+        if (rowStarted) {
+          if (currentState == xmlState.POSSIBLE_MAP && currentNestingLevel > dataLevel +1) {
+            changeState(xmlState.NESTED_MAP_STARTED);
+
+            // Remove the current field name from the stack
+            if (fieldNameStack.size() > 1) {
+              fieldNameStack.pop();
+            }
+            // Get the map name and push to stack
+            mapName = fieldNameStack.pop();
+            currentTupleWriter = getMapWriter(mapName, currentTupleWriter);
+          } else {
+            changeState(xmlState.ROW_STARTED);
+          }
+        }
+
+        // Get the field value
+        fieldValue = currentEvent.asCharacters().getData().trim();
+        changeState(xmlState.GETTING_DATA);
+        break;
+
+      case XMLStreamConstants.END_ELEMENT:
+        currentNestingLevel--;
+
+        if (currentNestingLevel < dataLevel - 1) {
+          break;
+        } else if (currentEvent.asEndElement().getName().toString().compareTo(rootDataFieldName) == 0) {
+          // End the row
+          currentTupleWriter = endRow();
+
+          // Clear stacks
+          rowWriterStack.clear();
+          fieldNameStack.clear();
+          attributePrefix = "";
+
+        } else if (currentState == xmlState.FIELD_ENDED && currentNestingLevel >= dataLevel) {
+          // Case to end nested maps
+          // Pop tupleWriter off stack
+          if (rowWriterStack.size() > 0) {
+            currentTupleWriter = rowWriterStack.pop();
+          }
+          // Pop field name
+          if (fieldNameStack.size() > 0) {
+            fieldNameStack.pop();
+          }
+
+          attributePrefix = XMLUtils.removeField(attributePrefix,fieldName);
+
+        } else if (currentState != xmlState.ROW_ENDED){
+          writeFieldData(fieldName, fieldValue, currentTupleWriter);
+          // Clear out field name and value
+          attributePrefix = XMLUtils.removeField(attributePrefix, fieldName);
+
+          // Pop field name
+          if (fieldNameStack.size() > 0) {
+            fieldNameStack.pop();
+          }
+          fieldName = null;
+          fieldValue = null;
+        }
+        break;
+    }
+  }
+
+  private TupleWriter startRow(RowSetLoader writer) {
+    if (currentNestingLevel == dataLevel) {
+      rootRowWriter.start();
+      rowStarted = true;
+      rowWriterStack.push(rootRowWriter);
+      changeState(xmlState.ROW_STARTED);
+      return rootRowWriter;
+    } else {
+      rowStarted = false;
+      return writer;
+    }
+  }
+
+  /**
+   * This method executes the steps to end a row from an XML dataset.
+   * @return the root row writer
+   */
+  private TupleWriter endRow() {
+    logger.debug("Ending row");
+    rootRowWriter.save();
+    rowStarted = false;
+    changeState(xmlState.ROW_ENDED);
+    return rootRowWriter;
+  }
+
+  /**
+   * Writes a field. If the field does not have a corresponding ScalarWriter, this method will
+   * create one.
+   * @param fieldName The field name
+   * @param fieldValue The field value to be written
+   * @param writer The TupleWriter which represents
+   */
+  private void writeFieldData(String fieldName, String fieldValue, TupleWriter writer) {
+    if (fieldName == null) {
+      return;
+    }
+
+    changeState(xmlState.WRITING_DATA);
+
+    // Find the TupleWriter object
+    int index = writer.tupleSchema().index(fieldName);
+    if (index == -1) {
+      ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL);
+      index = writer.addColumn(colSchema);
+    }
+    ScalarWriter colWriter = writer.scalar(index);
+    if (fieldValue != null && (currentState != xmlState.ROW_ENDED && currentState != xmlState.FIELD_ENDED)) {
+      colWriter.setString(fieldValue);
+      changeState(xmlState.FIELD_ENDED);
+    }
+  }
+
+  /**
+   * Writes a attribute. If the field does not have a corresponding ScalarWriter, this method will
+   * create one.
+   * @param fieldName The field name
+   * @param fieldValue The field value to be written
+   * @param writer The TupleWriter which represents
+   */
+  private void writeAttributeData(String fieldName, String fieldValue, TupleWriter writer) {
+    if (fieldName == null) {
+      return;
+    }
+
+    // Find the TupleWriter object
+    int index = writer.tupleSchema().index(fieldName);
+    if (index == -1) {
+      ColumnMetadata colSchema = MetadataUtils.newScalar(fieldName, TypeProtos.MinorType.VARCHAR, TypeProtos.DataMode.OPTIONAL);
+      index = writer.addColumn(colSchema);
+    }
+    ScalarWriter colWriter = writer.scalar(index);
+    if (fieldValue != null) {
+      colWriter.setString(fieldValue);
+    }
+  }
+
+  /**
+   * Returns a MapWriter for a given field.  If the writer does not exist, add one to the schema
+   * @param mapName The Map's name
+   * @param rowWriter The current TupleWriter
+   * @return A TupleWriter of the new map
+   */
+  private TupleWriter getMapWriter(String mapName, TupleWriter rowWriter) {
+    logger.debug("Adding map: {}", mapName);
+    int index = rowWriter.tupleSchema().index(mapName);
+    if (index == -1) {
+      // Check to see if the map already exists in the map collection
+      // This condition can occur in deeply nested data.
+      String tempFieldName = mapName + "-" + currentNestingLevel;
+      XMLMap mapObject = nestedMapCollection.get(tempFieldName);
+      if (mapObject != null) {
+        logger.debug("Found map {}", tempFieldName);
+        return mapObject.getMapWriter();
+      }
+
+      index = rowWriter.addColumn(SchemaBuilder.columnSchema(mapName, MinorType.MAP, DataMode.REQUIRED));
+      // Add map to map collection for future use
+      nestedMapCollection.put(tempFieldName, new XMLMap(mapName, rowWriter.tuple(index)));
+    }
+    return rowWriter.tuple(index);
+  }
+
+  private void changeState(xmlState newState) {
+    xmlState previousState = currentState;
+    currentState = newState;
+  }
+
+  private TupleWriter getAttributeWriter() {
+    int attributeIndex = rootRowWriter.addColumn(SchemaBuilder.columnSchema(ATTRIBUTE_MAP_NAME, MinorType.MAP, DataMode.REQUIRED));
+    return rootRowWriter.tuple(attributeIndex);
+  }
+
+  /**
+   * Helper function which writes attributes of an XML element.
+   * @param prefix The attribute prefix
+   * @param attributes An iterator of Attribute objects
+   */
+  private void writeAttributes(String prefix, Iterator<Attribute> attributes) {
+    while (attributes.hasNext()) {
+      Attribute currentAttribute = attributes.next();
+      String key = prefix + "_" + currentAttribute.getName().toString();
+      writeAttributeData(key, currentAttribute.getValue(), attributeWriter);
+    }
+  }
+}
diff --git a/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLUtils.java b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLUtils.java
new file mode 100644
index 0000000..f11b483
--- /dev/null
+++ b/contrib/format-xml/src/main/java/org/apache/drill/exec/store/xml/XMLUtils.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.shaded.guava.com.google.common.base.Strings;
+import javax.xml.stream.XMLStreamConstants;
+import javax.xml.stream.events.XMLEvent;
+
+public class XMLUtils {
+
+  /**
+   * Empty events are not helpful so this method checks to see if the event consists solely of whitespace
+   * or newline characters.  Unfortunately, newlines and other extraneous characters are treated as new elements, so
+   * this function wraps a lot of those checks in one function.
+   * @param event The input XMLEvent
+   * @return True if the XMLEvent is only whitespace, false if not.
+   */
+  public static boolean isEmptyWhiteSpace(XMLEvent event) {
+    if (event.getEventType() == XMLStreamConstants.COMMENT) {
+      return true;
+    } else if (event.getEventType() != XMLStreamConstants.CHARACTERS) {
+      return false;
+    }
+
+    String value = event.asCharacters().getData();
+    if (Strings.isNullOrEmpty(value.trim())) {
+      return true;
+    } else {
+      return event.asCharacters().isIgnorableWhiteSpace();
+    }
+  }
+
+  /**
+   * Identifies XML events that may be populated but are not useful for extracting data.
+   * @param event The XMLEvent in question
+   * @return True if the event is useful, false if not
+   */
+  public static boolean isNotCruft(XMLEvent event) {
+    int eventType = event.getEventType();
+    return eventType == XMLStreamConstants.CHARACTERS ||
+      eventType == XMLStreamConstants.START_ELEMENT ||
+      eventType == XMLStreamConstants.END_ELEMENT;
+  }
+
+  /**
+   * Generates a nested field name by combining a field prefix to the current field name.
+   * @param prefix The prefix to be added to the field name.
+   * @param field The field name
+   * @return the prefix, followed by an underscore and the fieldname.
+   */
+  public static String addField(String prefix, String field) {
+    if (Strings.isNullOrEmpty(prefix)) {
+      return field;
+    }
+    return prefix + "_" + field;
+  }
+
+  /**
+   * Returns the field name from nested field names.
+   * @param fieldName The nested field name
+   * @return The field name
+   */
+  public static String removeField(String prefix, String fieldName) {
+    if (fieldName == null) {
+      return "";
+    }
+
+    int index = prefix.lastIndexOf(fieldName);
+    if (index == 0) {
+      return "";
+    } else if (index < 0) {
+      return prefix;
+    }
+
+    return prefix.substring(0, index-1);
+  }
+}
diff --git a/contrib/format-xml/src/main/resources/bootstrap-format-plugins.json b/contrib/format-xml/src/main/resources/bootstrap-format-plugins.json
new file mode 100644
index 0000000..ef5f59c
--- /dev/null
+++ b/contrib/format-xml/src/main/resources/bootstrap-format-plugins.json
@@ -0,0 +1,26 @@
+{
+  "storage":{
+    "dfs": {
+      "type": "file",
+      "formats": {
+        "xml": {
+          "type": "xml",
+          "extensions": [
+            "xml"
+          ]
+        }
+      }
+    },
+    "s3": {
+      "type": "file",
+      "formats": {
+        "xml": {
+          "type": "xml",
+          "extensions": [
+            "xml"
+          ]
+        }
+      }
+    }
+  }
+}
diff --git a/contrib/format-xml/src/main/resources/drill-module.conf b/contrib/format-xml/src/main/resources/drill-module.conf
new file mode 100644
index 0000000..04406a3
--- /dev/null
+++ b/contrib/format-xml/src/main/resources/drill-module.conf
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#  This file tells Drill to consider this module when class path scanning.
+#  This file can also include any supplementary configuration information.
+#  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+
+drill.classpath.scanning: {
+  packages += "org.apache.drill.exec.store.xml"
+}
diff --git a/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLReader.java b/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLReader.java
new file mode 100644
index 0000000..e32a173
--- /dev/null
+++ b/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLReader.java
@@ -0,0 +1,546 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.nio.file.Paths;
+
+import static org.apache.drill.test.QueryTestUtil.generateCompressedFile;
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapArray;
+import static org.apache.drill.test.rowSet.RowSetUtilities.objArray;
+import static org.apache.drill.test.rowSet.RowSetUtilities.strArray;
+import static org.junit.Assert.assertEquals;
+
+@Category(RowSetTests.class)
+public class TestXMLReader extends ClusterTest {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    XMLFormatConfig formatConfig = new XMLFormatConfig(null, 2);
+    cluster.defineFormat("cp", "xml", formatConfig);
+    cluster.defineFormat("dfs", "xml", formatConfig);
+
+    // Needed for compressed file unit test
+    dirTestWatcher.copyResourceToRoot(Paths.get("xml/"));
+  }
+
+  /**
+   * This unit test tests a simple XML file with no nesting or attributes
+   * @throws Exception Throw exception if anything goes wrong
+   */
+  @Test
+  public void testWildcard() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/simple.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    assertEquals(3, results.rowCount());
+
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP)
+      .addNullable("groupID", MinorType.VARCHAR)
+      .addNullable("artifactID", MinorType.VARCHAR)
+      .addNullable("version", MinorType.VARCHAR)
+      .addNullable("classifier", MinorType.VARCHAR)
+      .addNullable("scope", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(), "org.apache.drill.exec", "drill-java-exec", "${project.version}", null, null)
+      .addRow(mapArray(),"org.apache.drill.exec", "drill-java-exec", "${project.version}", "tests", "test")
+      .addRow(mapArray(),"org.apache.drill", "drill-common", "${project.version}", "tests", "test")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testSelfClosingTags() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/weather.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    assertEquals(1, results.rowCount());
+
+      TupleMetadata expectedSchema = new SchemaBuilder()
+        .addMap("attributes")
+          .addNullable("forecast_information_city_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_postal_code_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_latitude_e6_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_longitude_e6_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_forecast_date_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_current_date_time_data", MinorType.VARCHAR)
+          .addNullable("forecast_information_unit_system_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_condition_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_temp_f_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_temp_c_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_humidity_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_icon_data", MinorType.VARCHAR)
+          .addNullable("current_conditions_wind_condition_data", MinorType.VARCHAR)
+        .resumeSchema()
+        .addNullable("city", MinorType.VARCHAR)
+        .addNullable("postal_code", MinorType.VARCHAR)
+        .addNullable("latitude_e6", MinorType.VARCHAR)
+        .addNullable("longitude_e6", MinorType.VARCHAR)
+        .addNullable("forecast_date", MinorType.VARCHAR)
+        .addNullable("current_date_time", MinorType.VARCHAR)
+        .addNullable("unit_system", MinorType.VARCHAR)
+        .addNullable("condition", MinorType.VARCHAR)
+        .addNullable("temp_f", MinorType.VARCHAR)
+        .addNullable("temp_c", MinorType.VARCHAR)
+        .addNullable("humidity", MinorType.VARCHAR)
+        .addNullable("icon", MinorType.VARCHAR)
+        .addNullable("wind_condition", MinorType.VARCHAR)
+        .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow((Object)strArray("Seattle, WA", "Seattle WA", "", "", "2011-09-29", "2011-09-29 17:53:00 +0000", "US", "Clear", "62", "17", "Humidity: 62%", "/ig/images/weather" +
+        "/sunny.gif", "Wind: N at 4 mph"), null, null, null, null, null, null, null, null, null, null, null, null, null)
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  /**
+   * This unit test tests a simple XML file with no nesting or attributes, but with explicitly selected fields.
+   * @throws Exception Throw exception if anything goes wrong
+   */
+  @Test
+  public void testExplicitWithSimpleXMLFile() throws Exception {
+    String sql = "SELECT groupID, artifactID, version, classifier, scope FROM cp.`xml/simple.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    assertEquals(3, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("groupID", MinorType.VARCHAR)
+      .addNullable("artifactID", MinorType.VARCHAR)
+      .addNullable("version", MinorType.VARCHAR)
+      .addNullable("classifier", MinorType.VARCHAR)
+      .addNullable("scope", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("org.apache.drill.exec", "drill-java-exec", "${project.version}", null, null)
+      .addRow("org.apache.drill.exec", "drill-java-exec", "${project.version}", "tests", "test")
+      .addRow("org.apache.drill", "drill-common", "${project.version}", "tests", "test")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testWildcardWithFilter() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/simple.xml` WHERE scope='test'";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    assertEquals(2, results.rowCount());
+
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP)
+      .addNullable("groupID", MinorType.VARCHAR)
+      .addNullable("artifactID", MinorType.VARCHAR)
+      .addNullable("version", MinorType.VARCHAR)
+      .addNullable("classifier", MinorType.VARCHAR)
+      .addNullable("scope", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(),"org.apache.drill.exec", "drill-java-exec", "${project.version}", "tests", "test")
+      .addRow(mapArray(),"org.apache.drill", "drill-common", "${project.version}", "tests", "test")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testWildcardWithSingleNestedDataField() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/really-simple-nested.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    assertEquals(3, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP, DataMode.REQUIRED)
+      .addMap("field1")
+        .addNullable("key1", MinorType.VARCHAR)
+        .addNullable("key2", MinorType.VARCHAR)
+      .resumeSchema()
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(), strArray("value1", "value2"))
+      .addRow(mapArray(), strArray("value3", "value4"))
+      .addRow(mapArray(), strArray("value5", "value6"))
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testExplicitWithSingleNestedDataField() throws Exception {
+    String sql = "SELECT t1.field1.key1 as key1, t1.field1.key2 as key2 FROM cp.`xml/really-simple-nested.xml` as t1";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+    assertEquals(3, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("key1", MinorType.VARCHAR)
+      .addNullable("key2", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("value1", "value2")
+      .addRow("value3", "value4")
+      .addRow("value5", "value6")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    String sql = "SELECT COUNT(*) FROM cp.`xml/simple.xml`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+    assertEquals("Counts should match", 3L, cnt);
+  }
+
+  @Test
+  public void testExplicitWithCompressedSimpleXMLFile() throws Exception {
+    generateCompressedFile("xml/simple.xml", "zip", "xml/simple.xml.zip");
+
+    String sql = "SELECT groupID, artifactID, version, classifier, scope FROM dfs.`xml/simple.xml.zip`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    assertEquals(3, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("groupID", MinorType.VARCHAR)
+      .addNullable("artifactID", MinorType.VARCHAR)
+      .addNullable("version", MinorType.VARCHAR)
+      .addNullable("classifier", MinorType.VARCHAR)
+      .addNullable("scope", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("org.apache.drill.exec", "drill-java-exec", "${project.version}", null, null)
+      .addRow("org.apache.drill.exec", "drill-java-exec", "${project.version}", "tests", "test")
+      .addRow("org.apache.drill", "drill-common", "${project.version}", "tests", "test")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testDeepNestedSpecificFields() throws Exception {
+    String sql = "select xml.level2.level3.level4.level5.level6.level7.field1 as field1, xml.level2.level3.level4.level5.level6.level7.field2 as field2, xml.level2.level3.level4" +
+      ".level5.level6.level7.field3 as field3 FROM cp.`xml/deep-nested.xml` as xml";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    assertEquals(2, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("field1", MinorType.VARCHAR)
+      .addNullable("field2", MinorType.VARCHAR)
+      .addNullable("field3", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("f1", "f2", "f3")
+      .addRow("f4", "f5", "f6")
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testDeepNesting() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/deep-nested.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    assertEquals(2, results.rowCount());
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP, DataMode.REQUIRED)
+      .addMap("level2")
+        .addNullable("field1-level2", MinorType.VARCHAR)
+        .addMap("level3")
+        .addNullable("field1-level3", MinorType.VARCHAR)
+          .addMap("level4")
+          .addNullable("field1-level4", MinorType.VARCHAR)
+            .addMap("level5")
+            .addNullable("field1-level5", MinorType.VARCHAR)
+              .addMap("level6")
+              .addNullable("field1-level6", MinorType.VARCHAR)
+                .addMap("level7")
+                .addNullable("field1", MinorType.VARCHAR)
+                .addNullable("field2", MinorType.VARCHAR)
+                .addNullable("field3", MinorType.VARCHAR)
+              .resumeMap()  // End level 7
+              .resumeMap()   // End level 6
+            .resumeMap() // End level 5
+          .resumeMap() // End level 4
+        .resumeMap() // End level 3
+      .resumeSchema()
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(), objArray(
+        objArray(
+          "l2",
+          objArray("l3",
+            objArray("l4",
+              objArray("l5",
+                objArray("l6",
+                  strArray("f1", "f2", "f3")
+                )
+              )
+            )
+          )
+        )
+      ))
+      .addRow(mapArray(), objArray(
+        objArray(
+          null,
+          objArray(null,
+            objArray(null,
+              objArray(null,
+                objArray(null,
+                  strArray("f4", "f5", "f6")
+                )
+              )
+            )
+          )
+        )
+      ))
+      .build();
+
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testDataLevel() throws Exception {
+    String sql = "SELECT * FROM table(cp.`xml/deep-nested2.xml` (type => 'xml', dataLevel => 8))";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP, DataMode.REQUIRED)
+      .addNullable("field1", MinorType.VARCHAR)
+      .addNullable("field2", MinorType.VARCHAR)
+      .addNullable("field3", MinorType.VARCHAR)
+      .addNullable("field1-level6", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(), "f4", "f5", "f6", null)
+      .addRow(mapArray(), "f1", "f2", "f3", "l6")
+      .build();
+
+    assertEquals(2, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testExplicitDataLevel() throws Exception {
+    String sql = "SELECT field1, field2, field3 FROM table(cp.`xml/deep-nested2.xml` (type => 'xml', dataLevel => 8))";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("field1", MinorType.VARCHAR)
+      .addNullable("field2", MinorType.VARCHAR)
+      .addNullable("field3", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("f4", "f5", "f6")
+      .addRow("f1", "f2", "f3")
+      .build();
+
+    assertEquals(2, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testComplexWildcardStar() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/nested.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .add("attributes", MinorType.MAP, DataMode.REQUIRED)
+      .addMap("field1")
+        .addNullable("key1", MinorType.VARCHAR)
+        .addNullable("key2", MinorType.VARCHAR)
+      .resumeSchema()
+      .addMap("field2")
+        .addNullable("key3", MinorType.VARCHAR)
+        .addMap("nestedField1")
+          .addNullable("nk1", MinorType.VARCHAR)
+          .addNullable("nk2", MinorType.VARCHAR)
+          .addNullable("nk3", MinorType.VARCHAR)
+        .resumeMap()
+      .resumeSchema()
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(mapArray(), strArray("value1", "value2"), objArray("k1", strArray("nk_value1", "nk_value2", "nk_value3")))
+      .addRow(mapArray(), strArray("value3", "value4"), objArray("k2", strArray("nk_value4", "nk_value5", "nk_value6")))
+      .addRow(mapArray(), strArray("value5", "value6"), objArray("k3", strArray("nk_value7", "nk_value8", "nk_value9")))
+      .build();
+
+    assertEquals(3, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testComplexNestedExplicit() throws Exception {
+    String sql = "SELECT xml.field2.nestedField1.nk1 as nk1, xml.field2.nestedField1.nk2 as nk2, xml.field2.nestedField1.nk3 as nk3 FROM cp.`xml/nested.xml` AS xml";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("nk1", MinorType.VARCHAR)
+      .addNullable("nk2", MinorType.VARCHAR)
+      .addNullable("nk3", MinorType.VARCHAR)
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("nk_value1", "nk_value2", "nk_value3")
+      .addRow("nk_value4", "nk_value5", "nk_value6")
+      .addRow("nk_value7", "nk_value8", "nk_value9")
+      .build();
+
+    assertEquals(3, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testAttributes() throws Exception {
+    String sql = "SELECT attributes FROM cp.`xml/attributes.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addMap("attributes")
+        .addNullable("title_binding", MinorType.VARCHAR)
+        .addNullable("title_subcategory", MinorType.VARCHAR)
+      .resumeSchema()
+      .build();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow((Object) mapArray(null, null))
+      .addRow((Object) strArray("paperback", null))
+      .addRow((Object) strArray("hardcover", "non-fiction"))
+      .build();
+
+    assertEquals(3, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testNestedAttributes() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/nested-with-attributes.xml`";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addMap("attributes")
+        .addNullable("field1_f1", MinorType.VARCHAR)
+        .addNullable("field2_f2", MinorType.VARCHAR)
+        .addNullable("field2_key3_f3", MinorType.VARCHAR)
+        .addNullable("field2_nestedField1_f4", MinorType.VARCHAR)
+        .addNullable("field2_nestedField1_f5", MinorType.VARCHAR)
+        .addNullable("field2_nestedField1_nk1_f6", MinorType.VARCHAR)
+        .addNullable("field2_nestedField1_nk1_f7", MinorType.VARCHAR)
+        .addNullable("field2_nestedField1_nk3_f8", MinorType.VARCHAR)
+      .resumeSchema()
+      .addMap("field1")
+      .addNullable("key1", MinorType.VARCHAR)
+      .addNullable("key2", MinorType.VARCHAR)
+      .resumeSchema()
+      .addMap("field2")
+      .addNullable("key3", MinorType.VARCHAR)
+      .addMap("nestedField1")
+      .addNullable("nk1", MinorType.VARCHAR)
+      .addNullable("nk2", MinorType.VARCHAR)
+      .addNullable("nk3", MinorType.VARCHAR)
+      .resumeMap()
+      .resumeSchema()
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow(strArray("k1", "k2", "k3", "k4", "k5", "k6", "k7", null), strArray("value1", "value2"), objArray("k1", strArray("nk_value1", "nk_value2", "nk_value3")))
+      .addRow(strArray(null, null, null, null, null, null, null, null), strArray("value3", "value4"), objArray("k2", strArray("nk_value4", "nk_value5", "nk_value6")))
+      .addRow(strArray(null, null, null, null, null, null, null, "k8"), strArray("value5", "value6"), objArray("k3", strArray("nk_value7", "nk_value8", "nk_value9")))
+      .build();
+
+    assertEquals(3, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testExplicitNestedAttributes() throws Exception {
+    String sql = "SELECT data.attributes.field1_f1 AS field1_f1," +
+      "data.attributes.field2_f2 AS field2_f2, " +
+      "data.attributes.field2_key3_f3 AS field2_key3_f3," +
+      "data.attributes.field2_nestedField1_f4 AS field2_nestedField1_f4," +
+      "data.attributes.field2_nestedField1_f5 AS field2_nestedField1_f5, " +
+      "data.attributes.field2_nestedField1_nk1_f6 AS field2_nestedField1_nk1_f6, " +
+      "data.attributes.field2_nestedField1_nk1_f7 AS field2_nestedField1_nk1_f7," +
+      "data.attributes.field2_nestedField1_nk3_f8 AS field2_nestedField1_nk3_f8 " +
+      "FROM cp.`xml/nested-with-attributes.xml` AS data";
+    RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata expectedSchema = new SchemaBuilder()
+      .addNullable("field1_f1", MinorType.VARCHAR)
+      .addNullable("field2_f2", MinorType.VARCHAR)
+      .addNullable("field2_key3_f3", MinorType.VARCHAR)
+      .addNullable("field2_nestedField1_f4", MinorType.VARCHAR)
+      .addNullable("field2_nestedField1_f5", MinorType.VARCHAR)
+      .addNullable("field2_nestedField1_nk1_f6", MinorType.VARCHAR)
+      .addNullable("field2_nestedField1_nk1_f7", MinorType.VARCHAR)
+      .addNullable("field2_nestedField1_nk3_f8", MinorType.VARCHAR)
+      .buildSchema();
+
+    RowSet expected = client.rowSetBuilder(expectedSchema)
+      .addRow("k1", "k2", "k3", "k4", "k5", "k6", "k7", null)
+      .addRow(null, null, null, null, null, null, null, null)
+      .addRow(null, null, null, null, null, null, null, "k8")
+      .build();
+    assertEquals(3, results.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(results);
+  }
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    String sql = "SELECT * FROM cp.`xml/simple.xml` LIMIT 2";
+
+    queryBuilder()
+      .sql(sql)
+      .planMatcher()
+      .include("Limit", "maxRecords=2")
+      .match();
+  }
+}
diff --git a/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLUtils.java b/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLUtils.java
new file mode 100644
index 0000000..06d70ee
--- /dev/null
+++ b/contrib/format-xml/src/test/java/org/apache/drill/exec/store/xml/TestXMLUtils.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.xml;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestXMLUtils {
+
+  @Test
+  public void testRemoveField() {
+    String test1 = "field1_field2_field3";
+    assertEquals(XMLUtils.removeField(test1, "field3"), "field1_field2");
+
+    // Test with underscores
+    String test2 = "field_1_field_2_field_3";
+    assertEquals(XMLUtils.removeField(test2, "field_3"), "field_1_field_2");
+
+    // Test with missing field
+    String test3 = "field_1_field_2_field_3";
+    assertEquals(XMLUtils.removeField(test3, "field_4"), "field_1_field_2_field_3");
+
+    // Test with empty string
+    String test4 = "";
+    assertEquals(XMLUtils.removeField(test4, "field_4"), "");
+  }
+}
diff --git a/contrib/format-xml/src/test/resources/xml/attributes.xml b/contrib/format-xml/src/test/resources/xml/attributes.xml
new file mode 100644
index 0000000..a44eca0
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/attributes.xml
@@ -0,0 +1,42 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<books>
+  <book>
+    <author>Mark Twain</author>
+    <title>The Adventures of Tom Sawyer</title>
+    <category>FICTION</category>
+    <year>1876</year>
+  </book>
+  <book>
+    <authors>
+        <author>Niklaus Wirth</author>
+        <author>Somebody else</author>
+    </authors>
+    <title binding="paperback">The Programming Language Pascal</title>
+    <category >PASCAL</category>
+    <year>1971</year>
+  </book>
+  <book>
+    <author>O.-J. Dahl</author>
+    <title binding="hardcover" subcategory="non-fiction">Structured Programming</title>
+    <category>PROGRAMMING</category>
+    <year>1972</year>
+  </book>
+</books>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/deep-nested.xml b/contrib/format-xml/src/test/resources/xml/deep-nested.xml
new file mode 100644
index 0000000..2d28289
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/deep-nested.xml
@@ -0,0 +1,60 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<root>
+  <level1>
+    <level2>
+      <field1-level2>l2</field1-level2>
+      <level3>
+        <field1-level3>l3</field1-level3>
+        <level4>
+          <field1-level4>l4</field1-level4>
+          <level5>
+            <field1-level5>l5</field1-level5>
+            <level6>
+              <field1-level6>l6</field1-level6>
+              <level7>
+                <field1>f1</field1>
+                <field2>f2</field2>
+                <field3>f3</field3>
+              </level7>
+            </level6>
+          </level5>
+        </level4>
+      </level3>
+    </level2>
+  </level1>
+  <level1>
+    <level2>
+      <level3>
+        <level4>
+          <level5>
+            <level6>
+              <level7>
+                <field1>f4</field1>
+                <field2>f5</field2>
+                <field3>f6</field3>
+              </level7>
+            </level6>
+          </level5>
+        </level4>
+      </level3>
+    </level2>
+  </level1>
+</root>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/deep-nested2.xml b/contrib/format-xml/src/test/resources/xml/deep-nested2.xml
new file mode 100644
index 0000000..0a1b787
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/deep-nested2.xml
@@ -0,0 +1,60 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<root>
+  <level1>
+    <level2>
+      <level3>
+        <level4>
+          <level5>
+            <level6>
+              <level7>
+                <field1>f4</field1>
+                <field2>f5</field2>
+                <field3>f6</field3>
+              </level7>
+            </level6>
+          </level5>
+        </level4>
+      </level3>
+    </level2>
+  </level1>
+  <level1>
+    <level2>
+      <field1-level2>l2</field1-level2>
+      <level3>
+        <field1-level3>l3</field1-level3>
+        <level4>
+          <field1-level4>l4</field1-level4>
+          <level5>
+            <field1-level5>l5</field1-level5>
+            <level6>
+              <field1-level6>l6</field1-level6>
+              <level7>
+                <field1>f1</field1>
+                <field2>f2</field2>
+                <field3>f3</field3>
+              </level7>
+            </level6>
+          </level5>
+        </level4>
+      </level3>
+    </level2>
+  </level1>
+</root>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/nested-with-attributes.xml b/contrib/format-xml/src/test/resources/xml/nested-with-attributes.xml
new file mode 100644
index 0000000..c09ae90
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/nested-with-attributes.xml
@@ -0,0 +1,63 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<books>
+  <book>
+    <field1 f1="k1">
+      <key1>value1</key1>
+      <key2>value2</key2>
+    </field1>
+    <field2 f2="k2">
+      <key3 f3="k3">k1</key3>
+      <nestedField1 f4="k4" f5="k5">
+        <nk1 f6="k6" f7="k7">nk_value1</nk1>
+        <nk2>nk_value2</nk2>
+        <nk3>nk_value3</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+  <book>
+    <field1>
+      <key1>value3</key1>
+      <key2>value4</key2>
+    </field1>
+    <field2>
+      <key3>k2</key3>
+      <nestedField1>
+        <nk1>nk_value4</nk1>
+        <nk2>nk_value5</nk2>
+        <nk3>nk_value6</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+  <book>
+    <field1>
+      <key1>value5</key1>
+      <key2>value6</key2>
+    </field1>
+    <field2>
+      <key3>k3</key3>
+      <nestedField1>
+        <nk1>nk_value7</nk1>
+        <nk2>nk_value8</nk2>
+        <nk3 f8="k8">nk_value9</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+</books>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/nested.xml b/contrib/format-xml/src/test/resources/xml/nested.xml
new file mode 100644
index 0000000..da94687
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/nested.xml
@@ -0,0 +1,63 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<books>
+  <book>
+    <field1>
+      <key1>value1</key1>
+      <key2>value2</key2>
+    </field1>
+    <field2>
+      <key3>k1</key3>
+      <nestedField1>
+        <nk1>nk_value1</nk1>
+        <nk2>nk_value2</nk2>
+        <nk3>nk_value3</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+  <book>
+    <field1>
+      <key1>value3</key1>
+      <key2>value4</key2>
+    </field1>
+    <field2>
+      <key3>k2</key3>
+      <nestedField1>
+        <nk1>nk_value4</nk1>
+        <nk2>nk_value5</nk2>
+        <nk3>nk_value6</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+  <book>
+    <field1>
+      <key1>value5</key1>
+      <key2>value6</key2>
+    </field1>
+    <field2>
+      <key3>k3</key3>
+      <nestedField1>
+        <nk1>nk_value7</nk1>
+        <nk2>nk_value8</nk2>
+        <nk3>nk_value9</nk3>
+      </nestedField1>
+    </field2>
+  </book>
+</books>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/really-simple-nested.xml b/contrib/format-xml/src/test/resources/xml/really-simple-nested.xml
new file mode 100644
index 0000000..5bb1d18
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/really-simple-nested.xml
@@ -0,0 +1,39 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<books>
+  <book>
+    <field1>
+      <key1>value1</key1>
+      <key2>value2</key2>
+    </field1>
+  </book>
+  <book>
+    <field1>
+      <key1>value3</key1>
+      <key2>value4</key2>
+    </field1>
+  </book>
+  <book>
+    <field1>
+      <key1>value5</key1>
+      <key2>value6</key2>
+    </field1>
+  </book>
+</books>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/simple.xml b/contrib/format-xml/src/test/resources/xml/simple.xml
new file mode 100644
index 0000000..f651ed6
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/simple.xml
@@ -0,0 +1,42 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<dependencies>
+  <dependency>
+    <groupId>org.apache.drill.exec</groupId>
+    <artifactId>drill-java-exec</artifactId>
+    <version>${project.version}</version>
+  </dependency>
+
+  <dependency>
+    <groupId>org.apache.drill.exec</groupId>
+    <artifactId>drill-java-exec</artifactId>
+    <classifier>tests</classifier>
+    <version>${project.version}</version>
+    <scope>test</scope>
+  </dependency>
+
+  <dependency>
+    <groupId>org.apache.drill</groupId>
+    <artifactId>drill-common</artifactId>
+    <classifier>tests</classifier>
+    <version>${project.version}</version>
+    <scope>test</scope>
+  </dependency>
+</dependencies>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/simple_schema.xsd b/contrib/format-xml/src/test/resources/xml/simple_schema.xsd
new file mode 100644
index 0000000..df825b3
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/simple_schema.xsd
@@ -0,0 +1,43 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+            xmlns:tns="http://tempuri.org/PurchaseOrderSchema.xsd"
+            targetNamespace="http://tempuri.org/PurchaseOrderSchema.xsd"
+            elementFormDefault="qualified">
+  <xsd:element name="PurchaseOrder" type="tns:PurchaseOrderType"/>
+  <xsd:complexType name="PurchaseOrderType">
+    <xsd:sequence>
+      <xsd:element name="ShipTo" type="tns:USAddress" maxOccurs="2"/>
+      <xsd:element name="BillTo" type="tns:USAddress"/>
+    </xsd:sequence>
+    <xsd:attribute name="OrderDate" type="xsd:date"/>
+  </xsd:complexType>
+
+  <xsd:complexType name="USAddress">
+    <xsd:sequence>
+      <xsd:element name="name"   type="xsd:string"/>
+      <xsd:element name="street" type="xsd:string"/>
+      <xsd:element name="city"   type="xsd:string"/>
+      <xsd:element name="state"  type="xsd:string"/>
+      <xsd:element name="zip"    type="xsd:integer"/>
+    </xsd:sequence>
+    <xsd:attribute name="country" type="xsd:NMTOKEN" fixed="US"/>
+  </xsd:complexType>
+</xsd:schema>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/very-nested-with-attributes.xml b/contrib/format-xml/src/test/resources/xml/very-nested-with-attributes.xml
new file mode 100644
index 0000000..655e5d5
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/very-nested-with-attributes.xml
@@ -0,0 +1,38 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<book>
+  <field1 f1="a1" f2="a2">
+    <key1>value1</key1>
+    <key2>value2</key2>
+  </field1>
+  <field2>
+    <key3 f3="a3" f4="a4">k1</key3>
+    <nestedField1>
+      <nk1>nk_value1</nk1>
+      <nk2>nk_value2</nk2>
+      <nk3>nk_value3</nk3>
+      <nestedField2>
+        <nk1 f5="a5">nk2_value1</nk1>
+        <nk2>nk2_value2</nk2>
+        <nk3>nk2_value3</nk3>
+      </nestedField2>
+    </nestedField1>
+  </field2>
+</book>
\ No newline at end of file
diff --git a/contrib/format-xml/src/test/resources/xml/weather.xml b/contrib/format-xml/src/test/resources/xml/weather.xml
new file mode 100644
index 0000000..9ab3c67
--- /dev/null
+++ b/contrib/format-xml/src/test/resources/xml/weather.xml
@@ -0,0 +1,40 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<xml_api_reply version="1">
+  <weather module_id="0" tab_id="0" mobile_row="0" mobile_zipped="1" row="0" section="0">
+    <forecast_information>
+      <city data="Seattle, WA"/>
+      <postal_code data="Seattle WA"/>
+      <latitude_e6 data=""/>
+      <longitude_e6 data=""/>
+      <forecast_date data="2011-09-29"/>
+      <current_date_time data="2011-09-29 17:53:00 +0000"/>
+      <unit_system data="US"/>
+    </forecast_information>
+    <current_conditions>
+      <condition data="Clear"/>
+      <temp_f data="62"/>
+      <temp_c data="17"/>
+      <humidity data="Humidity: 62%"/>
+      <icon data="/ig/images/weather/sunny.gif"/>
+      <wind_condition data="Wind: N at 4 mph"/>
+    </current_conditions>
+  </weather>
+</xml_api_reply>
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.cc b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
index 6dbd625..ad98dc4 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.cc
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
@@ -469,7 +469,7 @@
     {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, 0, InitDefaultsscc_info_UserCredentials_UserBitShared_2eproto}, {}};
 
 static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_UserBitShared_2eproto[22];
-static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_UserBitShared_2eproto[8];
+static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_UserBitShared_2eproto[7];
 static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_UserBitShared_2eproto = nullptr;
 
 const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_UserBitShared_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
@@ -742,14 +742,16 @@
   PROTOBUF_FIELD_OFFSET(::exec::shared::OperatorProfile, peak_local_memory_allocated_),
   PROTOBUF_FIELD_OFFSET(::exec::shared::OperatorProfile, metric_),
   PROTOBUF_FIELD_OFFSET(::exec::shared::OperatorProfile, wait_nanos_),
+  PROTOBUF_FIELD_OFFSET(::exec::shared::OperatorProfile, operator_type_name_),
   ~0u,
-  0,
   1,
   2,
   3,
   4,
-  ~0u,
   5,
+  ~0u,
+  6,
+  0,
   PROTOBUF_FIELD_OFFSET(::exec::shared::StreamProfile, _has_bits_),
   PROTOBUF_FIELD_OFFSET(::exec::shared::StreamProfile, _internal_metadata_),
   ~0u,  // no _extensions_
@@ -817,12 +819,12 @@
   { 169, 197, sizeof(::exec::shared::QueryProfile)},
   { 220, 227, sizeof(::exec::shared::MajorFragmentProfile)},
   { 229, 245, sizeof(::exec::shared::MinorFragmentProfile)},
-  { 256, 269, sizeof(::exec::shared::OperatorProfile)},
-  { 277, 285, sizeof(::exec::shared::StreamProfile)},
-  { 288, 296, sizeof(::exec::shared::MetricValue)},
-  { 299, 305, sizeof(::exec::shared::Registry)},
-  { 306, 313, sizeof(::exec::shared::Jar)},
-  { 315, 323, sizeof(::exec::shared::SaslMessage)},
+  { 256, 270, sizeof(::exec::shared::OperatorProfile)},
+  { 279, 287, sizeof(::exec::shared::StreamProfile)},
+  { 290, 298, sizeof(::exec::shared::MetricValue)},
+  { 301, 307, sizeof(::exec::shared::Registry)},
+  { 308, 315, sizeof(::exec::shared::Jar)},
+  { 317, 325, sizeof(::exec::shared::SaslMessage)},
 };
 
 static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@@ -935,68 +937,33 @@
   "y_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 \001(\003\022(\n"
   "\010endpoint\030\t \001(\0132\026.exec.DrillbitEndpoint\022"
   "\023\n\013last_update\030\n \001(\003\022\025\n\rlast_progress\030\013 "
-  "\001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_profile"
+  "\001(\003\"\237\002\n\017OperatorProfile\0221\n\rinput_profile"
   "\030\001 \003(\0132\032.exec.shared.StreamProfile\022\023\n\013op"
-  "erator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 \001(\005\022\023"
-  "\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos\030\006 \001"
-  "(\003\022#\n\033peak_local_memory_allocated\030\007 \001(\003\022"
-  "(\n\006metric\030\010 \003(\0132\030.exec.shared.MetricValu"
-  "e\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProfile\022\017"
-  "\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n\007sche"
-  "mas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001 "
-  "\001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014double_value\030"
-  "\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 \003(\0132\020.exec.sh"
-  "ared.Jar\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022functio"
-  "n_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmech"
-  "anism\030\001 \001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status\030\003 \001("
-  "\0162\027.exec.shared.SaslStatus*5\n\nRpcChannel"
-  "\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020"
-  "\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010"
-  "PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_ST"
-  "ATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020\000"
-  "\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022\014"
-  "\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005\022"
-  "\032\n\026CANCELLATION_REQUESTED\020\006*\236\013\n\020CoreOper"
-  "atorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROADCAST"
-  "_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREGATE\020"
-  "\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HASH"
-  "_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGIN"
-  "G_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_SENDER"
-  "\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIVER\020\013\022"
-  "\032\n\026RANGE_PARTITION_SENDER\020\014\022\n\n\006SCREEN\020\r\022"
-  "\034\n\030SELECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAMI"
-  "NG_AGGREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTERN"
-  "AL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_S"
-  "ORT\020\024\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHIV"
-  "E_SUB_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\rM"
-  "OCK_SUB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017DI"
-  "RECT_SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEXT"
-  "_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_S"
-  "CHEMA_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025\n"
-  "\021PRODUCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020!"
-  "\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rAV"
-  "RO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_SCAN\020%\022\022\n\016KAFK"
-  "A_SUB_SCAN\020&\022\021\n\rKUDU_SUB_SCAN\020\'\022\013\n\007FLATT"
-  "EN\020(\022\020\n\014LATERAL_JOIN\020)\022\n\n\006UNNEST\020*\022,\n(HI"
-  "VE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN\020+"
-  "\022\r\n\tJDBC_SCAN\020,\022\022\n\016REGEX_SUB_SCAN\020-\022\023\n\017M"
-  "APRDB_SUB_SCAN\020.\022\022\n\016MONGO_SUB_SCAN\020/\022\017\n\013"
-  "KUDU_WRITER\0200\022\026\n\022OPEN_TSDB_SUB_SCAN\0201\022\017\n"
-  "\013JSON_WRITER\0202\022\026\n\022HTPPD_LOG_SUB_SCAN\0203\022\022"
-  "\n\016IMAGE_SUB_SCAN\0204\022\025\n\021SEQUENCE_SUB_SCAN\020"
-  "5\022\023\n\017PARTITION_LIMIT\0206\022\023\n\017PCAPNG_SUB_SCA"
-  "N\0207\022\022\n\016RUNTIME_FILTER\0208\022\017\n\013ROWKEY_JOIN\0209"
-  "\022\023\n\017SYSLOG_SUB_SCAN\020:\022\030\n\024STATISTICS_AGGR"
-  "EGATE\020;\022\020\n\014UNPIVOT_MAPS\020<\022\024\n\020STATISTICS_"
-  "MERGE\020=\022\021\n\rLTSV_SUB_SCAN\020>\022\021\n\rHDF5_SUB_S"
-  "CAN\020\?\022\022\n\016EXCEL_SUB_SCAN\020@\022\020\n\014SHP_SUB_SCA"
-  "N\020A\022\024\n\020METADATA_HANDLER\020B\022\027\n\023METADATA_CO"
-  "NTROLLER\020C\022\022\n\016DRUID_SUB_SCAN\020D\022\021\n\rSPSS_S"
-  "UB_SCAN\020E\022\021\n\rHTTP_SUB_SCAN\020F*g\n\nSaslStat"
-  "us\022\020\n\014SASL_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020"
-  "SASL_IN_PROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013"
-  "SASL_FAILED\020\004B.\n\033org.apache.drill.exec.p"
-  "rotoB\rUserBitSharedH\001"
+  "erator_id\030\003 \001(\005\022\031\n\roperator_type\030\004 \001(\005B\002"
+  "\030\001\022\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos"
+  "\030\006 \001(\003\022#\n\033peak_local_memory_allocated\030\007 "
+  "\001(\003\022(\n\006metric\030\010 \003(\0132\030.exec.shared.Metric"
+  "Value\022\022\n\nwait_nanos\030\t \001(\003\022\032\n\022operator_ty"
+  "pe_name\030\n \001(\t\"B\n\rStreamProfile\022\017\n\007record"
+  "s\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n\007schemas\030\003 \001("
+  "\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001 \001(\005\022\022\n\nl"
+  "ong_value\030\002 \001(\003\022\024\n\014double_value\030\003 \001(\001\")\n"
+  "\010Registry\022\035\n\003jar\030\001 \003(\0132\020.exec.shared.Jar"
+  "\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022function_signat"
+  "ure\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmechanism\030\001 "
+  "\001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status\030\003 \001(\0162\027.exec"
+  ".shared.SaslStatus*5\n\nRpcChannel\022\017\n\013BIT_"
+  "CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*V\n\tQue"
+  "ryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL"
+  "\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_STATEMENT\020"
+  "\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020\000\022\027\n\023AWAI"
+  "TING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISH"
+  "ED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005\022\032\n\026CANCE"
+  "LLATION_REQUESTED\020\006*g\n\nSaslStatus\022\020\n\014SAS"
+  "L_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020SASL_IN_P"
+  "ROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013SASL_FAIL"
+  "ED\020\004B.\n\033org.apache.drill.exec.protoB\rUse"
+  "rBitSharedH\001"
   ;
 static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_UserBitShared_2eproto_deps[3] = {
   &::descriptor_table_Coordination_2eproto,
@@ -1030,7 +997,7 @@
 static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_UserBitShared_2eproto_once;
 static bool descriptor_table_UserBitShared_2eproto_initialized = false;
 const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_UserBitShared_2eproto = {
-  &descriptor_table_UserBitShared_2eproto_initialized, descriptor_table_protodef_UserBitShared_2eproto, "UserBitShared.proto", 5821,
+  &descriptor_table_UserBitShared_2eproto_initialized, descriptor_table_protodef_UserBitShared_2eproto, "UserBitShared.proto", 4412,
   &descriptor_table_UserBitShared_2eproto_once, descriptor_table_UserBitShared_2eproto_sccs, descriptor_table_UserBitShared_2eproto_deps, 22, 3,
   schemas, file_default_instances, TableStruct_UserBitShared_2eproto::offsets,
   file_level_metadata_UserBitShared_2eproto, 22, file_level_enum_descriptors_UserBitShared_2eproto, file_level_service_descriptors_UserBitShared_2eproto,
@@ -1192,92 +1159,9 @@
   }
 }
 
-const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CoreOperatorType_descriptor() {
-  ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_UserBitShared_2eproto);
-  return file_level_enum_descriptors_UserBitShared_2eproto[6];
-}
-bool CoreOperatorType_IsValid(int value) {
-  switch (value) {
-    case 0:
-    case 1:
-    case 2:
-    case 3:
-    case 4:
-    case 5:
-    case 6:
-    case 7:
-    case 8:
-    case 9:
-    case 10:
-    case 11:
-    case 12:
-    case 13:
-    case 14:
-    case 15:
-    case 16:
-    case 17:
-    case 18:
-    case 19:
-    case 20:
-    case 21:
-    case 22:
-    case 23:
-    case 24:
-    case 25:
-    case 26:
-    case 27:
-    case 28:
-    case 29:
-    case 30:
-    case 31:
-    case 32:
-    case 33:
-    case 34:
-    case 35:
-    case 36:
-    case 37:
-    case 38:
-    case 39:
-    case 40:
-    case 41:
-    case 42:
-    case 43:
-    case 44:
-    case 45:
-    case 46:
-    case 47:
-    case 48:
-    case 49:
-    case 50:
-    case 51:
-    case 52:
-    case 53:
-    case 54:
-    case 55:
-    case 56:
-    case 57:
-    case 58:
-    case 59:
-    case 60:
-    case 61:
-    case 62:
-    case 63:
-    case 64:
-    case 65:
-    case 66:
-    case 67:
-    case 68:
-    case 69:
-    case 70:
-      return true;
-    default:
-      return false;
-  }
-}
-
 const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* SaslStatus_descriptor() {
   ::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_UserBitShared_2eproto);
-  return file_level_enum_descriptors_UserBitShared_2eproto[7];
+  return file_level_enum_descriptors_UserBitShared_2eproto[6];
 }
 bool SaslStatus_IsValid(int value) {
   switch (value) {
@@ -7453,23 +7337,26 @@
  public:
   using HasBits = decltype(std::declval<OperatorProfile>()._has_bits_);
   static void set_has_operator_id(HasBits* has_bits) {
-    (*has_bits)[0] |= 1u;
-  }
-  static void set_has_operator_type(HasBits* has_bits) {
     (*has_bits)[0] |= 2u;
   }
-  static void set_has_setup_nanos(HasBits* has_bits) {
+  static void set_has_operator_type(HasBits* has_bits) {
     (*has_bits)[0] |= 4u;
   }
-  static void set_has_process_nanos(HasBits* has_bits) {
+  static void set_has_setup_nanos(HasBits* has_bits) {
     (*has_bits)[0] |= 8u;
   }
-  static void set_has_peak_local_memory_allocated(HasBits* has_bits) {
+  static void set_has_process_nanos(HasBits* has_bits) {
     (*has_bits)[0] |= 16u;
   }
-  static void set_has_wait_nanos(HasBits* has_bits) {
+  static void set_has_peak_local_memory_allocated(HasBits* has_bits) {
     (*has_bits)[0] |= 32u;
   }
+  static void set_has_wait_nanos(HasBits* has_bits) {
+    (*has_bits)[0] |= 64u;
+  }
+  static void set_has_operator_type_name(HasBits* has_bits) {
+    (*has_bits)[0] |= 1u;
+  }
 };
 
 OperatorProfile::OperatorProfile()
@@ -7484,6 +7371,10 @@
       input_profile_(from.input_profile_),
       metric_(from.metric_) {
   _internal_metadata_.MergeFrom(from._internal_metadata_);
+  operator_type_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
+  if (from._internal_has_operator_type_name()) {
+    operator_type_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.operator_type_name_);
+  }
   ::memcpy(&operator_id_, &from.operator_id_,
     static_cast<size_t>(reinterpret_cast<char*>(&wait_nanos_) -
     reinterpret_cast<char*>(&operator_id_)) + sizeof(wait_nanos_));
@@ -7492,6 +7383,7 @@
 
 void OperatorProfile::SharedCtor() {
   ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_OperatorProfile_UserBitShared_2eproto.base);
+  operator_type_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
   ::memset(&operator_id_, 0, static_cast<size_t>(
       reinterpret_cast<char*>(&wait_nanos_) -
       reinterpret_cast<char*>(&operator_id_)) + sizeof(wait_nanos_));
@@ -7503,6 +7395,7 @@
 }
 
 void OperatorProfile::SharedDtor() {
+  operator_type_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
 }
 
 void OperatorProfile::SetCachedSize(int size) const {
@@ -7523,7 +7416,10 @@
   input_profile_.Clear();
   metric_.Clear();
   cached_has_bits = _has_bits_[0];
-  if (cached_has_bits & 0x0000003fu) {
+  if (cached_has_bits & 0x00000001u) {
+    operator_type_name_.ClearNonDefaultToEmptyNoArena();
+  }
+  if (cached_has_bits & 0x0000007eu) {
     ::memset(&operator_id_, 0, static_cast<size_t>(
         reinterpret_cast<char*>(&wait_nanos_) -
         reinterpret_cast<char*>(&operator_id_)) + sizeof(wait_nanos_));
@@ -7560,7 +7456,7 @@
           CHK_(ptr);
         } else goto handle_unusual;
         continue;
-      // optional int32 operator_type = 4;
+      // optional int32 operator_type = 4 [deprecated = true];
       case 4:
         if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
           _Internal::set_has_operator_type(&has_bits);
@@ -7612,6 +7508,17 @@
           CHK_(ptr);
         } else goto handle_unusual;
         continue;
+      // optional string operator_type_name = 10;
+      case 10:
+        if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 82)) {
+          auto str = _internal_mutable_operator_type_name();
+          ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx);
+          #ifndef NDEBUG
+          ::PROTOBUF_NAMESPACE_ID::internal::VerifyUTF8(str, "exec.shared.OperatorProfile.operator_type_name");
+          #endif  // !NDEBUG
+          CHK_(ptr);
+        } else goto handle_unusual;
+        continue;
       default: {
       handle_unusual:
         if ((tag & 7) == 4 || tag == 0) {
@@ -7649,31 +7556,31 @@
 
   cached_has_bits = _has_bits_[0];
   // optional int32 operator_id = 3;
-  if (cached_has_bits & 0x00000001u) {
+  if (cached_has_bits & 0x00000002u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(3, this->_internal_operator_id(), target);
   }
 
-  // optional int32 operator_type = 4;
-  if (cached_has_bits & 0x00000002u) {
+  // optional int32 operator_type = 4 [deprecated = true];
+  if (cached_has_bits & 0x00000004u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt32ToArray(4, this->_internal_operator_type(), target);
   }
 
   // optional int64 setup_nanos = 5;
-  if (cached_has_bits & 0x00000004u) {
+  if (cached_has_bits & 0x00000008u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(5, this->_internal_setup_nanos(), target);
   }
 
   // optional int64 process_nanos = 6;
-  if (cached_has_bits & 0x00000008u) {
+  if (cached_has_bits & 0x00000010u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(6, this->_internal_process_nanos(), target);
   }
 
   // optional int64 peak_local_memory_allocated = 7;
-  if (cached_has_bits & 0x00000010u) {
+  if (cached_has_bits & 0x00000020u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(7, this->_internal_peak_local_memory_allocated(), target);
   }
@@ -7687,11 +7594,21 @@
   }
 
   // optional int64 wait_nanos = 9;
-  if (cached_has_bits & 0x00000020u) {
+  if (cached_has_bits & 0x00000040u) {
     target = stream->EnsureSpace(target);
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(9, this->_internal_wait_nanos(), target);
   }
 
+  // optional string operator_type_name = 10;
+  if (cached_has_bits & 0x00000001u) {
+    ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::VerifyUTF8StringNamedField(
+      this->_internal_operator_type_name().data(), static_cast<int>(this->_internal_operator_type_name().length()),
+      ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SERIALIZE,
+      "exec.shared.OperatorProfile.operator_type_name");
+    target = stream->WriteStringMaybeAliased(
+        10, this->_internal_operator_type_name(), target);
+  }
+
   if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
     target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray(
         _internal_metadata_.unknown_fields(), target, stream);
@@ -7723,44 +7640,51 @@
   }
 
   cached_has_bits = _has_bits_[0];
-  if (cached_has_bits & 0x0000003fu) {
-    // optional int32 operator_id = 3;
+  if (cached_has_bits & 0x0000007fu) {
+    // optional string operator_type_name = 10;
     if (cached_has_bits & 0x00000001u) {
       total_size += 1 +
+        ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
+          this->_internal_operator_type_name());
+    }
+
+    // optional int32 operator_id = 3;
+    if (cached_has_bits & 0x00000002u) {
+      total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
           this->_internal_operator_id());
     }
 
-    // optional int32 operator_type = 4;
-    if (cached_has_bits & 0x00000002u) {
+    // optional int32 operator_type = 4 [deprecated = true];
+    if (cached_has_bits & 0x00000004u) {
       total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int32Size(
           this->_internal_operator_type());
     }
 
     // optional int64 setup_nanos = 5;
-    if (cached_has_bits & 0x00000004u) {
+    if (cached_has_bits & 0x00000008u) {
       total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
           this->_internal_setup_nanos());
     }
 
     // optional int64 process_nanos = 6;
-    if (cached_has_bits & 0x00000008u) {
+    if (cached_has_bits & 0x00000010u) {
       total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
           this->_internal_process_nanos());
     }
 
     // optional int64 peak_local_memory_allocated = 7;
-    if (cached_has_bits & 0x00000010u) {
+    if (cached_has_bits & 0x00000020u) {
       total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
           this->_internal_peak_local_memory_allocated());
     }
 
     // optional int64 wait_nanos = 9;
-    if (cached_has_bits & 0x00000020u) {
+    if (cached_has_bits & 0x00000040u) {
       total_size += 1 +
         ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
           this->_internal_wait_nanos());
@@ -7801,23 +7725,27 @@
   input_profile_.MergeFrom(from.input_profile_);
   metric_.MergeFrom(from.metric_);
   cached_has_bits = from._has_bits_[0];
-  if (cached_has_bits & 0x0000003fu) {
+  if (cached_has_bits & 0x0000007fu) {
     if (cached_has_bits & 0x00000001u) {
-      operator_id_ = from.operator_id_;
+      _has_bits_[0] |= 0x00000001u;
+      operator_type_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.operator_type_name_);
     }
     if (cached_has_bits & 0x00000002u) {
-      operator_type_ = from.operator_type_;
+      operator_id_ = from.operator_id_;
     }
     if (cached_has_bits & 0x00000004u) {
-      setup_nanos_ = from.setup_nanos_;
+      operator_type_ = from.operator_type_;
     }
     if (cached_has_bits & 0x00000008u) {
-      process_nanos_ = from.process_nanos_;
+      setup_nanos_ = from.setup_nanos_;
     }
     if (cached_has_bits & 0x00000010u) {
-      peak_local_memory_allocated_ = from.peak_local_memory_allocated_;
+      process_nanos_ = from.process_nanos_;
     }
     if (cached_has_bits & 0x00000020u) {
+      peak_local_memory_allocated_ = from.peak_local_memory_allocated_;
+    }
+    if (cached_has_bits & 0x00000040u) {
       wait_nanos_ = from.wait_nanos_;
     }
     _has_bits_[0] |= cached_has_bits;
@@ -7848,6 +7776,8 @@
   swap(_has_bits_[0], other->_has_bits_[0]);
   input_profile_.InternalSwap(&other->input_profile_);
   metric_.InternalSwap(&other->metric_);
+  operator_type_name_.Swap(&other->operator_type_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
+    GetArenaNoVirtual());
   swap(operator_id_, other->operator_id_);
   swap(operator_type_, other->operator_type_);
   swap(setup_nanos_, other->setup_nanos_);
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.h b/contrib/native/client/src/protobuf/UserBitShared.pb.h
index ae87641..1e7ac8a 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.h
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.h
@@ -321,98 +321,6 @@
   return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<FragmentState>(
     FragmentState_descriptor(), name, value);
 }
-enum CoreOperatorType : int {
-  SINGLE_SENDER = 0,
-  BROADCAST_SENDER = 1,
-  FILTER = 2,
-  HASH_AGGREGATE = 3,
-  HASH_JOIN = 4,
-  MERGE_JOIN = 5,
-  HASH_PARTITION_SENDER = 6,
-  LIMIT = 7,
-  MERGING_RECEIVER = 8,
-  ORDERED_PARTITION_SENDER = 9,
-  PROJECT = 10,
-  UNORDERED_RECEIVER = 11,
-  RANGE_PARTITION_SENDER = 12,
-  SCREEN = 13,
-  SELECTION_VECTOR_REMOVER = 14,
-  STREAMING_AGGREGATE = 15,
-  TOP_N_SORT = 16,
-  EXTERNAL_SORT = 17,
-  TRACE = 18,
-  UNION = 19,
-  OLD_SORT = 20,
-  PARQUET_ROW_GROUP_SCAN = 21,
-  HIVE_SUB_SCAN = 22,
-  SYSTEM_TABLE_SCAN = 23,
-  MOCK_SUB_SCAN = 24,
-  PARQUET_WRITER = 25,
-  DIRECT_SUB_SCAN = 26,
-  TEXT_WRITER = 27,
-  TEXT_SUB_SCAN = 28,
-  JSON_SUB_SCAN = 29,
-  INFO_SCHEMA_SUB_SCAN = 30,
-  COMPLEX_TO_JSON = 31,
-  PRODUCER_CONSUMER = 32,
-  HBASE_SUB_SCAN = 33,
-  WINDOW = 34,
-  NESTED_LOOP_JOIN = 35,
-  AVRO_SUB_SCAN = 36,
-  PCAP_SUB_SCAN = 37,
-  KAFKA_SUB_SCAN = 38,
-  KUDU_SUB_SCAN = 39,
-  FLATTEN = 40,
-  LATERAL_JOIN = 41,
-  UNNEST = 42,
-  HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN = 43,
-  JDBC_SCAN = 44,
-  REGEX_SUB_SCAN = 45,
-  MAPRDB_SUB_SCAN = 46,
-  MONGO_SUB_SCAN = 47,
-  KUDU_WRITER = 48,
-  OPEN_TSDB_SUB_SCAN = 49,
-  JSON_WRITER = 50,
-  HTPPD_LOG_SUB_SCAN = 51,
-  IMAGE_SUB_SCAN = 52,
-  SEQUENCE_SUB_SCAN = 53,
-  PARTITION_LIMIT = 54,
-  PCAPNG_SUB_SCAN = 55,
-  RUNTIME_FILTER = 56,
-  ROWKEY_JOIN = 57,
-  SYSLOG_SUB_SCAN = 58,
-  STATISTICS_AGGREGATE = 59,
-  UNPIVOT_MAPS = 60,
-  STATISTICS_MERGE = 61,
-  LTSV_SUB_SCAN = 62,
-  HDF5_SUB_SCAN = 63,
-  EXCEL_SUB_SCAN = 64,
-  SHP_SUB_SCAN = 65,
-  METADATA_HANDLER = 66,
-  METADATA_CONTROLLER = 67,
-  DRUID_SUB_SCAN = 68,
-  SPSS_SUB_SCAN = 69,
-  HTTP_SUB_SCAN = 70
-};
-bool CoreOperatorType_IsValid(int value);
-constexpr CoreOperatorType CoreOperatorType_MIN = SINGLE_SENDER;
-constexpr CoreOperatorType CoreOperatorType_MAX = HTTP_SUB_SCAN;
-constexpr int CoreOperatorType_ARRAYSIZE = CoreOperatorType_MAX + 1;
-
-const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* CoreOperatorType_descriptor();
-template<typename T>
-inline const std::string& CoreOperatorType_Name(T enum_t_value) {
-  static_assert(::std::is_same<T, CoreOperatorType>::value ||
-    ::std::is_integral<T>::value,
-    "Incorrect type passed to function CoreOperatorType_Name.");
-  return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum(
-    CoreOperatorType_descriptor(), enum_t_value);
-}
-inline bool CoreOperatorType_Parse(
-    const std::string& name, CoreOperatorType* value) {
-  return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<CoreOperatorType>(
-    CoreOperatorType_descriptor(), name, value);
-}
 enum SaslStatus : int {
   SASL_UNKNOWN = 0,
   SASL_START = 1,
@@ -4245,6 +4153,7 @@
   enum : int {
     kInputProfileFieldNumber = 1,
     kMetricFieldNumber = 8,
+    kOperatorTypeNameFieldNumber = 10,
     kOperatorIdFieldNumber = 3,
     kOperatorTypeFieldNumber = 4,
     kSetupNanosFieldNumber = 5,
@@ -4288,6 +4197,26 @@
   const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::exec::shared::MetricValue >&
       metric() const;
 
+  // optional string operator_type_name = 10;
+  bool has_operator_type_name() const;
+  private:
+  bool _internal_has_operator_type_name() const;
+  public:
+  void clear_operator_type_name();
+  const std::string& operator_type_name() const;
+  void set_operator_type_name(const std::string& value);
+  void set_operator_type_name(std::string&& value);
+  void set_operator_type_name(const char* value);
+  void set_operator_type_name(const char* value, size_t size);
+  std::string* mutable_operator_type_name();
+  std::string* release_operator_type_name();
+  void set_allocated_operator_type_name(std::string* operator_type_name);
+  private:
+  const std::string& _internal_operator_type_name() const;
+  void _internal_set_operator_type_name(const std::string& value);
+  std::string* _internal_mutable_operator_type_name();
+  public:
+
   // optional int32 operator_id = 3;
   bool has_operator_id() const;
   private:
@@ -4301,14 +4230,14 @@
   void _internal_set_operator_id(::PROTOBUF_NAMESPACE_ID::int32 value);
   public:
 
-  // optional int32 operator_type = 4;
-  bool has_operator_type() const;
+  // optional int32 operator_type = 4 [deprecated = true];
+  PROTOBUF_DEPRECATED bool has_operator_type() const;
   private:
   bool _internal_has_operator_type() const;
   public:
-  void clear_operator_type();
-  ::PROTOBUF_NAMESPACE_ID::int32 operator_type() const;
-  void set_operator_type(::PROTOBUF_NAMESPACE_ID::int32 value);
+  PROTOBUF_DEPRECATED void clear_operator_type();
+  PROTOBUF_DEPRECATED ::PROTOBUF_NAMESPACE_ID::int32 operator_type() const;
+  PROTOBUF_DEPRECATED void set_operator_type(::PROTOBUF_NAMESPACE_ID::int32 value);
   private:
   ::PROTOBUF_NAMESPACE_ID::int32 _internal_operator_type() const;
   void _internal_set_operator_type(::PROTOBUF_NAMESPACE_ID::int32 value);
@@ -4375,6 +4304,7 @@
   mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::exec::shared::StreamProfile > input_profile_;
   ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::exec::shared::MetricValue > metric_;
+  ::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr operator_type_name_;
   ::PROTOBUF_NAMESPACE_ID::int32 operator_id_;
   ::PROTOBUF_NAMESPACE_ID::int32 operator_type_;
   ::PROTOBUF_NAMESPACE_ID::int64 setup_nanos_;
@@ -9317,7 +9247,7 @@
 
 // optional int32 operator_id = 3;
 inline bool OperatorProfile::_internal_has_operator_id() const {
-  bool value = (_has_bits_[0] & 0x00000001u) != 0;
+  bool value = (_has_bits_[0] & 0x00000002u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_operator_id() const {
@@ -9325,7 +9255,7 @@
 }
 inline void OperatorProfile::clear_operator_id() {
   operator_id_ = 0;
-  _has_bits_[0] &= ~0x00000001u;
+  _has_bits_[0] &= ~0x00000002u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int32 OperatorProfile::_internal_operator_id() const {
   return operator_id_;
@@ -9335,7 +9265,7 @@
   return _internal_operator_id();
 }
 inline void OperatorProfile::_internal_set_operator_id(::PROTOBUF_NAMESPACE_ID::int32 value) {
-  _has_bits_[0] |= 0x00000001u;
+  _has_bits_[0] |= 0x00000002u;
   operator_id_ = value;
 }
 inline void OperatorProfile::set_operator_id(::PROTOBUF_NAMESPACE_ID::int32 value) {
@@ -9343,9 +9273,9 @@
   // @@protoc_insertion_point(field_set:exec.shared.OperatorProfile.operator_id)
 }
 
-// optional int32 operator_type = 4;
+// optional int32 operator_type = 4 [deprecated = true];
 inline bool OperatorProfile::_internal_has_operator_type() const {
-  bool value = (_has_bits_[0] & 0x00000002u) != 0;
+  bool value = (_has_bits_[0] & 0x00000004u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_operator_type() const {
@@ -9353,7 +9283,7 @@
 }
 inline void OperatorProfile::clear_operator_type() {
   operator_type_ = 0;
-  _has_bits_[0] &= ~0x00000002u;
+  _has_bits_[0] &= ~0x00000004u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int32 OperatorProfile::_internal_operator_type() const {
   return operator_type_;
@@ -9363,7 +9293,7 @@
   return _internal_operator_type();
 }
 inline void OperatorProfile::_internal_set_operator_type(::PROTOBUF_NAMESPACE_ID::int32 value) {
-  _has_bits_[0] |= 0x00000002u;
+  _has_bits_[0] |= 0x00000004u;
   operator_type_ = value;
 }
 inline void OperatorProfile::set_operator_type(::PROTOBUF_NAMESPACE_ID::int32 value) {
@@ -9373,7 +9303,7 @@
 
 // optional int64 setup_nanos = 5;
 inline bool OperatorProfile::_internal_has_setup_nanos() const {
-  bool value = (_has_bits_[0] & 0x00000004u) != 0;
+  bool value = (_has_bits_[0] & 0x00000008u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_setup_nanos() const {
@@ -9381,7 +9311,7 @@
 }
 inline void OperatorProfile::clear_setup_nanos() {
   setup_nanos_ = PROTOBUF_LONGLONG(0);
-  _has_bits_[0] &= ~0x00000004u;
+  _has_bits_[0] &= ~0x00000008u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int64 OperatorProfile::_internal_setup_nanos() const {
   return setup_nanos_;
@@ -9391,7 +9321,7 @@
   return _internal_setup_nanos();
 }
 inline void OperatorProfile::_internal_set_setup_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
-  _has_bits_[0] |= 0x00000004u;
+  _has_bits_[0] |= 0x00000008u;
   setup_nanos_ = value;
 }
 inline void OperatorProfile::set_setup_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
@@ -9401,7 +9331,7 @@
 
 // optional int64 process_nanos = 6;
 inline bool OperatorProfile::_internal_has_process_nanos() const {
-  bool value = (_has_bits_[0] & 0x00000008u) != 0;
+  bool value = (_has_bits_[0] & 0x00000010u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_process_nanos() const {
@@ -9409,7 +9339,7 @@
 }
 inline void OperatorProfile::clear_process_nanos() {
   process_nanos_ = PROTOBUF_LONGLONG(0);
-  _has_bits_[0] &= ~0x00000008u;
+  _has_bits_[0] &= ~0x00000010u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int64 OperatorProfile::_internal_process_nanos() const {
   return process_nanos_;
@@ -9419,7 +9349,7 @@
   return _internal_process_nanos();
 }
 inline void OperatorProfile::_internal_set_process_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
-  _has_bits_[0] |= 0x00000008u;
+  _has_bits_[0] |= 0x00000010u;
   process_nanos_ = value;
 }
 inline void OperatorProfile::set_process_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
@@ -9429,7 +9359,7 @@
 
 // optional int64 peak_local_memory_allocated = 7;
 inline bool OperatorProfile::_internal_has_peak_local_memory_allocated() const {
-  bool value = (_has_bits_[0] & 0x00000010u) != 0;
+  bool value = (_has_bits_[0] & 0x00000020u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_peak_local_memory_allocated() const {
@@ -9437,7 +9367,7 @@
 }
 inline void OperatorProfile::clear_peak_local_memory_allocated() {
   peak_local_memory_allocated_ = PROTOBUF_LONGLONG(0);
-  _has_bits_[0] &= ~0x00000010u;
+  _has_bits_[0] &= ~0x00000020u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int64 OperatorProfile::_internal_peak_local_memory_allocated() const {
   return peak_local_memory_allocated_;
@@ -9447,7 +9377,7 @@
   return _internal_peak_local_memory_allocated();
 }
 inline void OperatorProfile::_internal_set_peak_local_memory_allocated(::PROTOBUF_NAMESPACE_ID::int64 value) {
-  _has_bits_[0] |= 0x00000010u;
+  _has_bits_[0] |= 0x00000020u;
   peak_local_memory_allocated_ = value;
 }
 inline void OperatorProfile::set_peak_local_memory_allocated(::PROTOBUF_NAMESPACE_ID::int64 value) {
@@ -9496,7 +9426,7 @@
 
 // optional int64 wait_nanos = 9;
 inline bool OperatorProfile::_internal_has_wait_nanos() const {
-  bool value = (_has_bits_[0] & 0x00000020u) != 0;
+  bool value = (_has_bits_[0] & 0x00000040u) != 0;
   return value;
 }
 inline bool OperatorProfile::has_wait_nanos() const {
@@ -9504,7 +9434,7 @@
 }
 inline void OperatorProfile::clear_wait_nanos() {
   wait_nanos_ = PROTOBUF_LONGLONG(0);
-  _has_bits_[0] &= ~0x00000020u;
+  _has_bits_[0] &= ~0x00000040u;
 }
 inline ::PROTOBUF_NAMESPACE_ID::int64 OperatorProfile::_internal_wait_nanos() const {
   return wait_nanos_;
@@ -9514,7 +9444,7 @@
   return _internal_wait_nanos();
 }
 inline void OperatorProfile::_internal_set_wait_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
-  _has_bits_[0] |= 0x00000020u;
+  _has_bits_[0] |= 0x00000040u;
   wait_nanos_ = value;
 }
 inline void OperatorProfile::set_wait_nanos(::PROTOBUF_NAMESPACE_ID::int64 value) {
@@ -9522,6 +9452,77 @@
   // @@protoc_insertion_point(field_set:exec.shared.OperatorProfile.wait_nanos)
 }
 
+// optional string operator_type_name = 10;
+inline bool OperatorProfile::_internal_has_operator_type_name() const {
+  bool value = (_has_bits_[0] & 0x00000001u) != 0;
+  return value;
+}
+inline bool OperatorProfile::has_operator_type_name() const {
+  return _internal_has_operator_type_name();
+}
+inline void OperatorProfile::clear_operator_type_name() {
+  operator_type_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline const std::string& OperatorProfile::operator_type_name() const {
+  // @@protoc_insertion_point(field_get:exec.shared.OperatorProfile.operator_type_name)
+  return _internal_operator_type_name();
+}
+inline void OperatorProfile::set_operator_type_name(const std::string& value) {
+  _internal_set_operator_type_name(value);
+  // @@protoc_insertion_point(field_set:exec.shared.OperatorProfile.operator_type_name)
+}
+inline std::string* OperatorProfile::mutable_operator_type_name() {
+  // @@protoc_insertion_point(field_mutable:exec.shared.OperatorProfile.operator_type_name)
+  return _internal_mutable_operator_type_name();
+}
+inline const std::string& OperatorProfile::_internal_operator_type_name() const {
+  return operator_type_name_.GetNoArena();
+}
+inline void OperatorProfile::_internal_set_operator_type_name(const std::string& value) {
+  _has_bits_[0] |= 0x00000001u;
+  operator_type_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value);
+}
+inline void OperatorProfile::set_operator_type_name(std::string&& value) {
+  _has_bits_[0] |= 0x00000001u;
+  operator_type_name_.SetNoArena(
+    &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value));
+  // @@protoc_insertion_point(field_set_rvalue:exec.shared.OperatorProfile.operator_type_name)
+}
+inline void OperatorProfile::set_operator_type_name(const char* value) {
+  GOOGLE_DCHECK(value != nullptr);
+  _has_bits_[0] |= 0x00000001u;
+  operator_type_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+  // @@protoc_insertion_point(field_set_char:exec.shared.OperatorProfile.operator_type_name)
+}
+inline void OperatorProfile::set_operator_type_name(const char* value, size_t size) {
+  _has_bits_[0] |= 0x00000001u;
+  operator_type_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
+      ::std::string(reinterpret_cast<const char*>(value), size));
+  // @@protoc_insertion_point(field_set_pointer:exec.shared.OperatorProfile.operator_type_name)
+}
+inline std::string* OperatorProfile::_internal_mutable_operator_type_name() {
+  _has_bits_[0] |= 0x00000001u;
+  return operator_type_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
+}
+inline std::string* OperatorProfile::release_operator_type_name() {
+  // @@protoc_insertion_point(field_release:exec.shared.OperatorProfile.operator_type_name)
+  if (!_internal_has_operator_type_name()) {
+    return nullptr;
+  }
+  _has_bits_[0] &= ~0x00000001u;
+  return operator_type_name_.ReleaseNonDefaultNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
+}
+inline void OperatorProfile::set_allocated_operator_type_name(std::string* operator_type_name) {
+  if (operator_type_name != nullptr) {
+    _has_bits_[0] |= 0x00000001u;
+  } else {
+    _has_bits_[0] &= ~0x00000001u;
+  }
+  operator_type_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), operator_type_name);
+  // @@protoc_insertion_point(field_set_allocated:exec.shared.OperatorProfile.operator_type_name)
+}
+
 // -------------------------------------------------------------------
 
 // StreamProfile
@@ -10148,11 +10149,6 @@
 inline const EnumDescriptor* GetEnumDescriptor< ::exec::shared::FragmentState>() {
   return ::exec::shared::FragmentState_descriptor();
 }
-template <> struct is_proto_enum< ::exec::shared::CoreOperatorType> : ::std::true_type {};
-template <>
-inline const EnumDescriptor* GetEnumDescriptor< ::exec::shared::CoreOperatorType>() {
-  return ::exec::shared::CoreOperatorType_descriptor();
-}
 template <> struct is_proto_enum< ::exec::shared::SaslStatus> : ::std::true_type {};
 template <>
 inline const EnumDescriptor* GetEnumDescriptor< ::exec::shared::SaslStatus>() {
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 22393e0..40c66f1 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -28,7 +28,7 @@
 
   <groupId>org.apache.drill.contrib</groupId>
   <artifactId>drill-contrib-parent</artifactId>
-  <name>contrib/Parent Pom</name>
+  <name>Drill : Contrib : </name>
   <packaging>pom</packaging>
 
   <properties>
@@ -46,9 +46,12 @@
     <module>format-syslog</module>
     <module>format-ltsv</module>
     <module>format-excel</module>
+    <module>format-httpd</module>
     <module>format-esri</module>
     <module>format-hdf5</module>
     <module>format-spss</module>
+    <module>format-xml</module>
+    <module>format-image</module>
     <module>storage-hive</module>
     <module>storage-mongo</module>
     <module>storage-jdbc</module>
@@ -57,6 +60,7 @@
     <module>storage-opentsdb</module>
     <module>storage-http</module>
     <module>storage-druid</module>
+    <module>storage-elasticsearch</module>
   </modules>
 
 </project>
diff --git a/contrib/storage-druid/pom.xml b/contrib/storage-druid/pom.xml
index e647641..eb12d22 100755
--- a/contrib/storage-druid/pom.xml
+++ b/contrib/storage-druid/pom.xml
@@ -27,7 +27,7 @@
     <modelVersion>4.0.0</modelVersion>
 
     <artifactId>drill-druid-storage</artifactId>
-    <name>contrib/druid-storage-plugin</name>
+    <name>Drill : Contrib : Storage : Druid</name>
     <properties>
         <druid.TestSuite>**/DruidTestSuit.class</druid.TestSuite>
     </properties>
diff --git a/contrib/storage-druid/src/main/java/org/apache/drill/exec/store/druid/DruidSubScan.java b/contrib/storage-druid/src/main/java/org/apache/drill/exec/store/druid/DruidSubScan.java
old mode 100755
new mode 100644
index e8beb3d..6eff2ea
--- a/contrib/storage-druid/src/main/java/org/apache/drill/exec/store/druid/DruidSubScan.java
+++ b/contrib/storage-druid/src/main/java/org/apache/drill/exec/store/druid/DruidSubScan.java
@@ -29,7 +29,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.druid.common.DruidFilter;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
@@ -45,6 +44,9 @@
  */
 @JsonTypeName("druid-datasource-scan")
 public class DruidSubScan extends AbstractBase implements SubScan {
+
+  public static final String OPERATOR_TYPE = "DRUID_SUB_SCAN";
+
   @JsonIgnore
   private final DruidStoragePlugin druidStoragePlugin;
   private final List<DruidSubScanSpec> scanSpec;
@@ -112,8 +114,8 @@
 
   @JsonIgnore
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.DRUID_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/contrib/storage-druid/src/test/resources/druid/docker-compose.yaml b/contrib/storage-druid/src/test/resources/druid/docker-compose.yaml
index ab81225..6efb68e 100644
--- a/contrib/storage-druid/src/test/resources/druid/docker-compose.yaml
+++ b/contrib/storage-druid/src/test/resources/druid/docker-compose.yaml
@@ -7,14 +7,13 @@
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
 #
-#   http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
 #
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 #
 
 version: "2.2"
diff --git a/contrib/storage-elasticsearch/README.md b/contrib/storage-elasticsearch/README.md
new file mode 100644
index 0000000..2ef77ad
--- /dev/null
+++ b/contrib/storage-elasticsearch/README.md
@@ -0,0 +1,51 @@
+# Drill ElasticSearch Plugin
+
+Drill ElasticSearch storage plugin allows you to perform SQL queries against ElasticSearch indices.
+This storage plugin implementation is based on [Apache Calcite adapter for ElasticSearch](https://calcite.apache.org/docs/elasticsearch_adapter.html).
+
+For more details about supported versions please refer to [Supported versions](https://calcite.apache.org/docs/elasticsearch_adapter.html#supported-versions) page.
+
+### Supported optimizations and features
+
+This storage plugin supports the following optimizations:
+
+- Project pushdown
+- Filter pushdown (only expressions supported by Calcite adapter for ElasticSearch. Filter with unsupported expressions 
+  wouldn't be pushed to ElasticSearch but will be produced by Drill)
+- Limit pushdown
+- Aggregation pushdown
+- Sort pushdown
+
+Besides these optimizations, ElasticSearch storage plugin supports the schema provisioning feature.
+For more details please refer to [Specifying the Schema as Table Function Parameter](https://drill.apache.org/docs/plugin-configuration-basics/#specifying-the-schema-as-table-function-parameter).
+
+### Plugin registration
+
+The plugin can be registered in Apache Drill using the drill web interface by navigating to the `storage` page.
+Following is the default registration configuration.
+
+```json
+{
+  "type": "elastic",
+  "hosts": [
+    "http://localhost:9200"
+  ],
+  "username": null,
+  "password": null,
+  "enabled": false
+}
+```
+
+### Developer notes
+
+Most of the common classes required for creating storage plugins based on Calcite adapters are placed in the 
+`java-exec` module, so they can be reused in future plugin implementations.
+
+Here is the list of the classes that may be useful:
+
+- `VertexDrelConverterRule` with `VertexDrel` - used to hold plugin-specific part of the plan at the end of the 
+  `LOGICAL` planning phase.
+- `EnumerableIntermediatePrelConverterRule` with `EnumerableIntermediatePrel` - the same as above, but for the 
+  `PHYSICAL` planning phase.
+- `EnumerablePrel` - responsible for generating java code that will be executed to query the storage plugin data source.
+- `EnumerableRecordReader` - executes java code generated in `EnumerablePrel` and transforms obtained results to Drill internal representation.
diff --git a/contrib/storage-elasticsearch/pom.xml b/contrib/storage-elasticsearch/pom.xml
new file mode 100644
index 0000000..e793e80
--- /dev/null
+++ b/contrib/storage-elasticsearch/pom.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <properties>
+    <test.elasticsearch.version>7.10.1</test.elasticsearch.version>
+  </properties>
+  <parent>
+    <artifactId>drill-contrib-parent</artifactId>
+    <groupId>org.apache.drill.contrib</groupId>
+    <version>1.19.0-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>drill-storage-elasticsearch</artifactId>
+
+  <name>Drill : Contrib : Storage : ElasticSearch</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>${calcite.groupId}</groupId>
+      <artifactId>calcite-elasticsearch</artifactId>
+      <version>${calcite.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion></exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.elasticsearch.client</groupId>
+      <artifactId>elasticsearch-rest-high-level-client</artifactId>
+      <version>7.0.1</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>net.hydromatic</groupId>
+      <artifactId>foodmart-data-json</artifactId>
+      <version>0.4</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <forkCount combine.self="override">1</forkCount>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.github.alexcojocaru</groupId>
+        <artifactId>elasticsearch-maven-plugin</artifactId>
+        <version>6.19</version>
+        <configuration>
+          <version>${test.elasticsearch.version}</version>
+          <clusterName>test</clusterName>
+          <transportPort>9300</transportPort>
+          <httpPort>9200</httpPort>
+          <skip>${skipTests}</skip>
+        </configuration>
+        <executions>
+          <execution>
+            <id>start-elasticsearch</id>
+            <phase>process-test-classes</phase>
+            <goals>
+              <goal>runforked</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>stop-elasticsearch</id>
+            <phase>post-integration-test</phase>
+            <goals>
+              <goal>stop</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/CalciteUtils.java b/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/CalciteUtils.java
new file mode 100644
index 0000000..4c3d6ec
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/CalciteUtils.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.elasticsearch;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexNode;
+import org.apache.drill.exec.store.elasticsearch.ElasticsearchStorageConfig;
+import org.apache.drill.exec.store.elasticsearch.plan.ElasticSearchEnumerablePrelContext;
+import org.apache.drill.exec.store.elasticsearch.plan.ElasticsearchFilterRule;
+import org.apache.drill.exec.store.elasticsearch.plan.ElasticsearchProjectRule;
+import org.apache.drill.exec.store.enumerable.plan.EnumerableIntermediatePrelConverterRule;
+import org.apache.drill.exec.store.enumerable.plan.VertexDrelConverterRule;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+public class CalciteUtils {
+
+  private static final List<String> BANNED_RULES =
+      Arrays.asList("ElasticsearchProjectRule", "ElasticsearchFilterRule");
+
+  public static final Predicate<RelOptRule> RULE_PREDICATE =
+      relOptRule -> BANNED_RULES.stream()
+          .noneMatch(banned -> relOptRule.toString().startsWith(banned));
+
+  public static final VertexDrelConverterRule ELASTIC_DREL_CONVERTER_RULE =
+      new VertexDrelConverterRule(ElasticsearchRel.CONVENTION);
+
+  public static final EnumerableIntermediatePrelConverterRule ENUMERABLE_INTERMEDIATE_PREL_CONVERTER_RULE =
+      new EnumerableIntermediatePrelConverterRule(
+          new ElasticSearchEnumerablePrelContext(ElasticsearchStorageConfig.NAME));
+
+  public static Set<RelOptRule> elasticSearchRules() {
+    // filters Calcite implementations of some rules and adds alternative versions specific for Drill
+    Set<RelOptRule> rules = Arrays.stream(ElasticsearchRules.RULES)
+        .filter(RULE_PREDICATE)
+        .collect(Collectors.toSet());
+    rules.add(ENUMERABLE_INTERMEDIATE_PREL_CONVERTER_RULE);
+    rules.add(ELASTIC_DREL_CONVERTER_RULE);
+    rules.add(ElasticsearchProjectRule.INSTANCE);
+    rules.add(ElasticsearchFilterRule.INSTANCE);
+    return rules;
+  }
+
+  public static ConverterRule getElasticsearchToEnumerableConverterRule() {
+    return ElasticsearchToEnumerableConverterRule.INSTANCE;
+  }
+
+  public static ElasticsearchProject createProject(RelTraitSet traitSet, RelNode input,
+      List<? extends RexNode> projects, RelDataType rowType) {
+    return new ElasticsearchProject(input.getCluster(), traitSet, input, projects, rowType);
+  }
+
+  public static ElasticsearchFilter createFilter(RelTraitSet traitSet, RelNode input,
+      RexNode condition) {
+    return new ElasticsearchFilter(input.getCluster(), traitSet, input, condition);
+  }
+
+  public static void analyzePredicate(RexNode condition) throws PredicateAnalyzer.ExpressionNotAnalyzableException {
+    PredicateAnalyzer.analyze(condition);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/DrillElasticsearchTableScan.java b/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/DrillElasticsearchTableScan.java
new file mode 100644
index 0000000..6b12d94
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/DrillElasticsearchTableScan.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.elasticsearch;
+
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.type.RelDataType;
+
+public class DrillElasticsearchTableScan extends ElasticsearchTableScan {
+
+  public DrillElasticsearchTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, ElasticsearchTable elasticsearchTable, RelDataType projectRowType) {
+    super(cluster, traitSet, table, elasticsearchTable, projectRowType);
+  }
+
+  @Override
+  public void register(RelOptPlanner planner) {
+    // noop
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStorageConfig.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStorageConfig.java
new file mode 100644
index 0000000..a5a5be4
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStorageConfig.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+@JsonTypeName(ElasticsearchStorageConfig.NAME)
+public class ElasticsearchStorageConfig extends StoragePluginConfig {
+  public static final String NAME = "elastic";
+
+  private static final ObjectWriter OBJECT_WRITER = new ObjectMapper().writerFor(List.class);
+
+  private final List<String> hosts;
+  private final String username;
+  private final String password;
+
+  @JsonCreator
+  public ElasticsearchStorageConfig(
+      @JsonProperty("hosts") List<String> hosts,
+      @JsonProperty("username") String username,
+      @JsonProperty("password") String password) {
+    this.hosts = hosts;
+    this.username = username;
+    this.password = password;
+  }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  public String getUsername() {
+    return username;
+  }
+
+  public String getPassword() {
+    return password;
+  }
+
+  @JsonIgnore
+  public Map<String, Object> toConfigMap()
+      throws JsonProcessingException {
+    ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
+    builder.put("hosts", OBJECT_WRITER.writeValueAsString(hosts));
+    if (username != null) {
+      builder.put("username", username)
+          .put("password", password);
+    }
+    return builder.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ElasticsearchStorageConfig that = (ElasticsearchStorageConfig) o;
+    return Objects.equals(hosts, that.hosts)
+        && Objects.equals(username, that.username)
+        && Objects.equals(password, that.password);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(hosts, username, password);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStoragePlugin.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStoragePlugin.java
new file mode 100644
index 0000000..95cdbee
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/ElasticsearchStoragePlugin.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.calcite.adapter.elasticsearch.CalciteUtils;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.drill.exec.ops.OptimizerRulesContext;
+import org.apache.drill.exec.planner.PlannerPhase;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.store.AbstractStoragePlugin;
+import org.apache.drill.exec.store.SchemaConfig;
+import org.apache.drill.exec.store.elasticsearch.schema.ElasticsearchDrillSchemaFactory;
+
+import java.util.Collections;
+import java.util.Set;
+
+public class ElasticsearchStoragePlugin extends AbstractStoragePlugin {
+  private final ElasticsearchStorageConfig config;
+  private final ElasticsearchDrillSchemaFactory schemaFactory;
+
+  public ElasticsearchStoragePlugin(
+      ElasticsearchStorageConfig config, DrillbitContext context, String name) {
+    super(context, name);
+    this.config = config;
+    this.schemaFactory = new ElasticsearchDrillSchemaFactory(name, this);
+  }
+
+  @Override
+  public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws JsonProcessingException {
+    schemaFactory.registerSchemas(schemaConfig, parent);
+  }
+
+  @Override
+  public ElasticsearchStorageConfig getConfig() {
+    return config;
+  }
+
+  @Override
+  public boolean supportsRead() {
+    return true;
+  }
+
+  @Override
+  public Set<? extends RelOptRule> getOptimizerRules(OptimizerRulesContext optimizerContext, PlannerPhase phase) {
+    switch (phase) {
+      case PHYSICAL:
+      case LOGICAL:
+        return CalciteUtils.elasticSearchRules();
+      case LOGICAL_PRUNE_AND_JOIN:
+      case LOGICAL_PRUNE:
+      case PARTITION_PRUNING:
+      case JOIN_PLANNING:
+      default:
+        return Collections.emptySet();
+    }
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticPlanTransformer.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticPlanTransformer.java
new file mode 100644
index 0000000..1063462
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticPlanTransformer.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.plan;
+
+import org.apache.calcite.adapter.elasticsearch.CalciteUtils;
+import org.apache.calcite.adapter.elasticsearch.DrillElasticsearchTableScan;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchAggregate;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchFilter;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchProject;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchSort;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchTable;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.prepare.RelOptTableImpl;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelShuttleImpl;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelRecordType;
+import org.apache.calcite.rel.type.StructKind;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+/**
+ * Implementation of RelShuttleImpl that transforms plan to fit Calcite ElasticSearch rel implementor.
+ */
+public class ElasticPlanTransformer extends RelShuttleImpl {
+
+  private boolean hasProject = false;
+
+  private RelDataTypeField mapField;
+
+  /**
+   * Replaces rowType of RelOptTable by rowType obtained from ElasticsearchTable.
+   */
+  @Override
+  public RelNode visit(TableScan other) {
+    RelOptTableImpl table = (RelOptTableImpl) other.getTable();
+    ElasticsearchTable elasticsearchTable = Objects.requireNonNull(
+        table.unwrap(ElasticsearchTable.class), "ElasticSearch table cannot be null");
+    RelDataType rowType = elasticsearchTable.getRowType(other.getCluster().getTypeFactory());
+    mapField = rowType.getFieldList().get(0);
+    return new DrillElasticsearchTableScan(other.getCluster(), other.getTraitSet(), table.copy(rowType), elasticsearchTable, rowType);
+  }
+
+  @Override
+  public RelNode visit(RelNode other) {
+    // replaces project expressions with ITEM calls, since Calcite returns single map column `_MAP`
+    // with actual table fields
+    if (other instanceof ElasticsearchProject) {
+      ElasticsearchProject project = (ElasticsearchProject) other;
+      RelNode input = project.getInput().accept(this);
+      List<RexNode> convertedExpressions = project.getProjects();
+      // project closest to the scan should be rewritten only
+      if (!this.hasProject) {
+        ElasticExpressionMapper expressionMapper =
+            new ElasticExpressionMapper(project.getCluster().getRexBuilder(),
+                project.getInput().getRowType(), mapField);
+        convertedExpressions = convertedExpressions.stream()
+            .map(expression -> expression.accept(expressionMapper))
+            .collect(Collectors.toList());
+
+        RelRecordType relDataType = getRelRecordType(other.getRowType());
+        this.hasProject = true;
+        return CalciteUtils.createProject(project.getTraitSet(), input,
+            convertedExpressions, relDataType);
+      } else {
+        return input;
+      }
+    } else if (other instanceof ElasticsearchFilter) {
+      ElasticsearchFilter filter = (ElasticsearchFilter) other;
+      RexNode convertedCondition = filter.getCondition().accept(
+          new ElasticExpressionMapper(other.getCluster().getRexBuilder(), filter.getInput().getRowType(), mapField));
+      return filter.copy(other.getTraitSet(), filter.getInput().accept(this), convertedCondition);
+    } else if (other instanceof ElasticsearchSort) {
+      ElasticsearchSort sort = (ElasticsearchSort) other;
+      RelNode input = getMappedInput(sort.getInput());
+      return sort.copy(other.getTraitSet(), input, sort.getCollation(), sort.offset, sort.fetch);
+    } else if (other instanceof ElasticsearchAggregate) {
+      ElasticsearchAggregate aggregate = (ElasticsearchAggregate) other;
+      RelNode input = getMappedInput(aggregate.getInput());
+      return aggregate.copy(other.getTraitSet(), input, aggregate.getGroupSet(),
+          aggregate.getGroupSets(), aggregate.getAggCallList());
+    }
+
+    return super.visit(other);
+  }
+
+  /**
+   * Generates project with mapped expressions above specified rel node
+   * if there is no other project in the tree.
+   */
+  private RelNode getMappedInput(RelNode relNode) {
+    boolean hasProject = this.hasProject;
+    this.hasProject = false;
+    RelNode input = relNode.accept(this);
+    if (!this.hasProject) {
+      this.hasProject = hasProject;
+      RelOptCluster cluster = relNode.getCluster();
+      List<RexNode> projections = IntStream.range(0, relNode.getRowType().getFieldCount())
+          .mapToObj(i -> cluster.getRexBuilder().makeInputRef(relNode, i))
+          .collect(Collectors.toList());
+
+      return CalciteUtils.createProject(relNode.getTraitSet(), relNode,
+          projections, relNode.getRowType()).accept(this);
+    } else {
+      return input;
+    }
+  }
+
+  private RelRecordType getRelRecordType(RelDataType rowType) {
+    List<RelDataTypeField> fields = new ArrayList<>();
+    for (RelDataTypeField relDataTypeField : rowType.getFieldList()) {
+      if (relDataTypeField.isDynamicStar()) {
+        fields.add(mapField);
+      } else {
+        fields.add(relDataTypeField);
+      }
+    }
+
+    return new RelRecordType(StructKind.FULLY_QUALIFIED, fields, false);
+  }
+
+  /**
+   * Implementation of RexShuttle that replaces RexInputRef expressions with ITEM calls to _MAP field.
+   */
+  public static class ElasticExpressionMapper extends RexShuttle {
+    private final RexBuilder rexBuilder;
+    private final RelDataType relDataType;
+    private final RelDataTypeField mapField;
+
+    public ElasticExpressionMapper(RexBuilder rexBuilder, RelDataType relDataType, RelDataTypeField mapField) {
+      this.rexBuilder = rexBuilder;
+      this.relDataType = relDataType;
+      this.mapField = mapField;
+    }
+
+    @Override
+    public RexNode visitInputRef(RexInputRef inputRef) {
+      if (inputRef.getType().getSqlTypeName() == SqlTypeName.DYNAMIC_STAR) {
+        return rexBuilder.makeInputRef(mapField.getType(), 0);
+      }
+      return rexBuilder.makeCall(SqlStdOperatorTable.ITEM, rexBuilder.makeInputRef(relDataType, 0),
+          rexBuilder.makeLiteral(relDataType.getFieldNames().get(inputRef.getIndex())));
+    }
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticSearchEnumerablePrelContext.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticSearchEnumerablePrelContext.java
new file mode 100644
index 0000000..29db1ca
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticSearchEnumerablePrelContext.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.plan;
+
+import org.apache.calcite.adapter.elasticsearch.CalciteUtils;
+import org.apache.calcite.adapter.enumerable.EnumerableRel;
+import org.apache.calcite.adapter.enumerable.EnumerableRelImplementor;
+import org.apache.calcite.linq4j.tree.ClassDeclaration;
+import org.apache.calcite.linq4j.tree.Expressions;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.store.SubsetRemover;
+import org.apache.drill.exec.store.enumerable.plan.EnumerablePrelContext;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+public class ElasticSearchEnumerablePrelContext implements EnumerablePrelContext {
+
+  private final String planPrefix;
+
+  public ElasticSearchEnumerablePrelContext(String planPrefix) {
+    this.planPrefix = planPrefix;
+  }
+
+  @Override
+  public String generateCode(RelOptCluster cluster, RelNode elasticNode) {
+    RelNode enumerableRel =
+        CalciteUtils.getElasticsearchToEnumerableConverterRule().convert(elasticNode);
+
+    ClassDeclaration classDeclaration = new EnumerableRelImplementor(cluster.getRexBuilder(), Collections.emptyMap())
+        .implementRoot((EnumerableRel) enumerableRel, EnumerableRel.Prefer.ARRAY);
+    return Expressions.toString(Collections.singletonList(classDeclaration), "\n", false);
+  }
+
+  @Override
+  public RelNode transformNode(RelNode input) {
+    return input.accept(SubsetRemover.INSTANCE).accept(new ElasticPlanTransformer());
+  }
+
+  @Override
+  public Map<String, Integer> getFieldsMap(RelNode transformedNode) {
+    return transformedNode.getRowType().getFieldList().stream()
+        .collect(Collectors.toMap(
+            relDataTypeField -> relDataTypeField.getName().equals("_MAP")
+                ? SchemaPath.DYNAMIC_STAR
+                : relDataTypeField.getName(),
+            RelDataTypeField::getIndex
+        ));
+  }
+
+  @Override
+  public String getPlanPrefix() {
+    return planPrefix;
+  }
+
+  @Override
+  public String getTablePath(RelNode input) {
+    return null;
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchFilterRule.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchFilterRule.java
new file mode 100644
index 0000000..794f2e3
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchFilterRule.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.plan;
+
+import org.apache.calcite.adapter.elasticsearch.CalciteUtils;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchFilter;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchRel;
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.calcite.rel.core.Filter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ElasticsearchFilterRule extends ConverterRule {
+  private static final Logger logger = LoggerFactory.getLogger(ElasticsearchFilterRule.class);
+
+  public static final ElasticsearchFilterRule INSTANCE = new ElasticsearchFilterRule();
+
+  private final Convention out;
+
+  private ElasticsearchFilterRule() {
+    super(Filter.class, Convention.NONE, ElasticsearchRel.CONVENTION,
+        "DrillElasticsearchFilterRule");
+    this.out = ElasticsearchRel.CONVENTION;
+  }
+
+  @Override
+  public RelNode convert(RelNode relNode) {
+    Filter filter = (Filter) relNode;
+    NodeTypeFinder filterFinder = new NodeTypeFinder(ElasticsearchFilter.class);
+    filter.getInput().accept(filterFinder);
+    if (filterFinder.containsNode) {
+      return null;
+    }
+    RelTraitSet traitSet = filter.getTraitSet().replace(out);
+
+    try {
+      CalciteUtils.analyzePredicate(filter.getCondition());
+    } catch (Exception e) {
+      logger.info("Unable to push filter into ElasticSearch :{}", e.getMessage(), e);
+      return null;
+    }
+
+    return CalciteUtils.createFilter(traitSet,
+        convert(filter.getInput(), out), filter.getCondition());
+  }
+
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchProjectRule.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchProjectRule.java
new file mode 100644
index 0000000..1995262
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/ElasticsearchProjectRule.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.plan;
+
+import org.apache.calcite.adapter.elasticsearch.CalciteUtils;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchProject;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchRel;
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.drill.exec.planner.common.DrillRelOptUtil;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Rule for converting Drill project to ElasticSearch project.
+ * This rule contains a logic to split the project if it would have expressions
+ * and convert only a simple project to ElasticSearch project.
+ */
+public class ElasticsearchProjectRule extends ConverterRule {
+  private final Convention out;
+
+  public static final ElasticsearchProjectRule INSTANCE = new ElasticsearchProjectRule();
+
+  private ElasticsearchProjectRule() {
+    super(Project.class, Convention.NONE, ElasticsearchRel.CONVENTION,
+        "DrillElasticsearchProjectRule");
+    this.out = ElasticsearchRel.CONVENTION;
+  }
+
+  @Override
+  public RelNode convert(RelNode relNode) {
+    Project project = (Project) relNode;
+    NodeTypeFinder projectFinder = new NodeTypeFinder(ElasticsearchProject.class);
+    project.getInput().accept(projectFinder);
+    if (projectFinder.containsNode) {
+      // Calcite adapter allows only a single Elasticsearch project per tree
+      return null;
+    }
+    RelTraitSet traitSet = project.getTraitSet().replace(out);
+    List<RexNode> innerProjections = new ArrayList<>();
+    RelDataType rowType = project.getInput().getRowType();
+
+    // check for literals only without input exprs
+    DrillRelOptUtil.InputRefVisitor collectRefs = new DrillRelOptUtil.InputRefVisitor();
+    project.getChildExps().forEach(exp -> exp.accept(collectRefs));
+
+    if (!collectRefs.getInputRefs().isEmpty()) {
+      for (RelDataTypeField relDataTypeField : rowType.getFieldList()) {
+        innerProjections.add(project.getCluster().getRexBuilder().makeInputRef(project.getInput(), relDataTypeField.getIndex()));
+      }
+    }
+
+    boolean allExprsInputRefs = project.getChildExps().stream().allMatch(rexNode -> rexNode instanceof RexInputRef);
+    if (collectRefs.getInputRefs().isEmpty() || allExprsInputRefs) {
+      return CalciteUtils.createProject(traitSet,
+          convert(project.getInput(), out), project.getProjects(), project.getRowType());
+    } else {
+      Project elasticsearchProject = CalciteUtils.createProject(traitSet,
+          convert(project.getInput(), out), innerProjections, project.getInput().getRowType());
+      return project.copy(project.getTraitSet(), elasticsearchProject,
+          project.getProjects(), project.getRowType());
+    }
+  }
+
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/NodeTypeFinder.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/NodeTypeFinder.java
new file mode 100644
index 0000000..68125fa
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/plan/NodeTypeFinder.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.plan;
+
+import org.apache.calcite.plan.volcano.RelSubset;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelShuttleImpl;
+import org.apache.calcite.util.Util;
+
+public class NodeTypeFinder extends RelShuttleImpl {
+  public boolean containsNode = false;
+
+  private final Class<?> clazz;
+
+  public NodeTypeFinder(Class<?> clazz) {
+    this.clazz = clazz;
+  }
+
+  @Override
+  public RelNode visit(RelNode other) {
+    if (other.getClass().isAssignableFrom(clazz)) {
+      this.containsNode = true;
+      return other;
+    } else if (other instanceof RelSubset) {
+      RelSubset relSubset = (RelSubset) other;
+      return Util.first(relSubset.getBest(), relSubset.getOriginal()).accept(this);
+    }
+    return super.visit(other);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchema.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchema.java
new file mode 100644
index 0000000..ca1be74
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchema.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.schema;
+
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.StoragePlugin;
+import org.apache.drill.exec.store.elasticsearch.ElasticsearchStorageConfig;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class ElasticsearchDrillSchema extends AbstractSchema {
+  private final Schema delegatingSchema;
+  private final StoragePlugin plugin;
+  private final Map<String, Table> tables = new ConcurrentHashMap<>();
+
+  public ElasticsearchDrillSchema(String name, StoragePlugin plugin, Schema delegatingSchema) {
+    super(Collections.emptyList(), name);
+    this.plugin = plugin;
+    this.delegatingSchema = delegatingSchema;
+  }
+
+  @Override
+  public String getTypeName() {
+    return ElasticsearchStorageConfig.NAME;
+  }
+
+  @Override
+  public Table getTable(String tableName) {
+    return tables.computeIfAbsent(tableName, this::getDrillTable);
+  }
+
+  private DrillTable getDrillTable(String tableName) {
+    Table table = delegatingSchema.getTable(tableName);
+    return table == null ? null
+        : new ElasticsearchDynamicTable(plugin, tableName, null, table);
+  }
+
+  @Override
+  public Set<String> getTableNames() {
+    return delegatingSchema.getTableNames();
+  }
+
+  @Override
+  public Expression getExpression(SchemaPlus parentSchema, String name) {
+    return delegatingSchema.getExpression(parentSchema, name);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchemaFactory.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchemaFactory.java
new file mode 100644
index 0000000..5430743
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDrillSchemaFactory.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.schema;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchSchemaFactory;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.drill.exec.store.AbstractSchemaFactory;
+import org.apache.drill.exec.store.SchemaConfig;
+import org.apache.drill.exec.store.elasticsearch.ElasticsearchStoragePlugin;
+
+public class ElasticsearchDrillSchemaFactory extends AbstractSchemaFactory {
+
+  private final ElasticsearchStoragePlugin plugin;
+  private final ElasticsearchSchemaFactory delegate;
+
+  public ElasticsearchDrillSchemaFactory(String name, ElasticsearchStoragePlugin plugin) {
+    super(name);
+    this.plugin = plugin;
+    this.delegate = new ElasticsearchSchemaFactory();
+  }
+
+  @Override
+  public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws JsonProcessingException {
+    ElasticsearchDrillSchema schema = new ElasticsearchDrillSchema(getName(), plugin,
+        delegate.create(parent, getName(), plugin.getConfig().toConfigMap()));
+    parent.add(getName(), schema);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDynamicTable.java b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDynamicTable.java
new file mode 100644
index 0000000..a1919f5
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/java/org/apache/drill/exec/store/elasticsearch/schema/ElasticsearchDynamicTable.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch.schema;
+
+import org.apache.calcite.adapter.elasticsearch.DrillElasticsearchTableScan;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchRel;
+import org.apache.calcite.adapter.elasticsearch.ElasticsearchTable;
+import org.apache.calcite.linq4j.QueryProvider;
+import org.apache.calcite.linq4j.Queryable;
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.schema.QueryableTable;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.calcite.schema.TranslatableTable;
+import org.apache.calcite.schema.Wrapper;
+import org.apache.drill.exec.planner.logical.DynamicDrillTable;
+import org.apache.drill.exec.store.StoragePlugin;
+
+import java.lang.reflect.Type;
+
+public class ElasticsearchDynamicTable extends DynamicDrillTable implements TranslatableTable, QueryableTable, Wrapper {
+
+  private final ElasticsearchTable table;
+
+  public ElasticsearchDynamicTable(StoragePlugin plugin, String storageEngineName, Object selection, Table table) {
+    super(plugin, storageEngineName, selection);
+    this.table = (ElasticsearchTable) table;
+  }
+
+  @Override
+  public RelNode toRel(RelOptTable.ToRelContext context, RelOptTable relOptTable) {
+    RelOptCluster cluster = context.getCluster();
+    return new DrillElasticsearchTableScan(cluster,
+        cluster.traitSetOf(ElasticsearchRel.CONVENTION), relOptTable, table, relOptTable.getRowType());
+  }
+
+  @Override
+  public <T> Queryable<T> asQueryable(QueryProvider queryProvider, SchemaPlus schema, String tableName) {
+    return table.asQueryable(queryProvider, schema, tableName);
+  }
+
+  @Override
+  public Type getElementType() {
+    return table.getElementType();
+  }
+
+  public <C> C unwrap(Class<C> aClass) {
+    if (aClass.isInstance(this)) {
+      return aClass.cast(this);
+    } else if (aClass.isInstance(table)) {
+      return aClass.cast(table);
+    }
+    return null;
+  }
+
+  @Override
+  public Expression getExpression(SchemaPlus schema, String tableName, Class clazz) {
+    return table.getExpression(schema, tableName, clazz);
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/resources/bootstrap-storage-plugins.json b/contrib/storage-elasticsearch/src/main/resources/bootstrap-storage-plugins.json
new file mode 100644
index 0000000..ded7efa
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/resources/bootstrap-storage-plugins.json
@@ -0,0 +1,10 @@
+{
+  "storage":{
+    "elastic" : {
+      "type" : "elastic",
+      "hosts": ["http://localhost:9200"],
+      "username" : null,
+      "password" : null
+    }
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/main/resources/drill-module.conf b/contrib/storage-elasticsearch/src/main/resources/drill-module.conf
new file mode 100755
index 0000000..f0836fe
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/main/resources/drill-module.conf
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#  This file tells Drill to consider this module when class path scanning.
+#  This file can also include any supplementary configuration information.
+#  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+drill.classpath.scanning: {
+  packages += "org.apache.drill.exec.store.elasticsearch"
+}
diff --git a/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticComplexTypesTest.java b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticComplexTypesTest.java
new file mode 100644
index 0000000..75192e5
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticComplexTypesTest.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.client.indices.CreateIndexRequest;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.drill.test.TestBuilder.listOf;
+import static org.apache.drill.test.TestBuilder.mapOf;
+
+public class ElasticComplexTypesTest extends ClusterTest {
+
+  private static final String HOST = "http://localhost:9200";
+
+  private static final List<String> indexNames = new ArrayList<>();
+
+  public static RestHighLevelClient restHighLevelClient;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    ElasticsearchStorageConfig config = new ElasticsearchStorageConfig(
+        Collections.singletonList(HOST), null, null);
+    config.setEnabled(true);
+    cluster.defineStoragePlugin("elastic", config);
+
+    prepareData();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws IOException {
+    for (String indexName : indexNames) {
+      restHighLevelClient.indices().delete(new DeleteIndexRequest(indexName), RequestOptions.DEFAULT);
+    }
+  }
+
+  private static void prepareData() throws IOException {
+    restHighLevelClient = new RestHighLevelClient(RestClient.builder(HttpHost.create(HOST)));
+
+    String indexName = "arr";
+    indexNames.add(indexName);
+    CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    XContentBuilder builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("string_arr", Arrays.asList("a", "b", "c", "d"));
+    builder.field("int_arr", Arrays.asList(1, 2, 3, 4, 0));
+    builder.field("nest_int_arr", Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4, 0)));
+    builder.endObject();
+    IndexRequest indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+
+    indexName = "map";
+    indexNames.add(indexName);
+    createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("prim_field", 321);
+    builder.field("nest_field", ImmutableMap.of("a", 123, "b", "abc"));
+    builder.field("more_nest_field", ImmutableMap.of("a", 123, "b", ImmutableMap.of("c", "abc")));
+    builder.field("map_arr", Collections.singletonList(ImmutableMap.of("a", 123, "b", ImmutableMap.of("c", "abc"))));
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  @Test
+  public void testSelectStarWithArray() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from elastic.arr")
+        .unOrdered()
+        .baselineColumns("string_arr", "int_arr", "nest_int_arr")
+        .baselineValues(listOf("a", "b", "c", "d"), listOf(1, 2, 3, 4, 0),
+            listOf(listOf(1, 2), listOf(3, 4, 0)))
+        .go();
+  }
+
+  @Test
+  public void testSelectArrayElem() throws Exception {
+    testBuilder()
+        .sqlQuery("select string_arr[0] c1, int_arr[1] c2, nest_int_arr[0][1] c3 from elastic.arr")
+        .unOrdered()
+        .baselineColumns("c1", "c2", "c3")
+        .baselineValues("a", 2, 2)
+        .go();
+  }
+
+  @Test
+  public void testSelectStarWithJson() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from elastic.map")
+        .unOrdered()
+        .baselineColumns("prim_field", "nest_field", "more_nest_field", "map_arr")
+        .baselineValues(321, mapOf("a", 123, "b", "abc"),
+            mapOf("a", 123, "b", mapOf("c", "abc")),
+            listOf(mapOf("a", 123, "b", mapOf("c", "abc"))))
+        .go();
+  }
+
+  @Test
+  public void testSelectNestedFields() throws Exception {
+    testBuilder()
+        .sqlQuery("select m.nest_field.a a, m.nest_field.b b, m.more_nest_field.b.c c, map_arr[0].b.c d from elastic.map m")
+        .unOrdered()
+        .baselineColumns("a", "b", "c", "d")
+        .baselineValues(123, "abc", "abc", "abc")
+        .go();
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticInfoSchemaTest.java b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticInfoSchemaTest.java
new file mode 100644
index 0000000..2bafc11
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticInfoSchemaTest.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.client.indices.CreateIndexRequest;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class ElasticInfoSchemaTest extends ClusterTest {
+
+  private static final String HOST = "http://localhost:9200";
+
+  private static final List<String> indexNames = new ArrayList<>();
+
+  public static RestHighLevelClient restHighLevelClient;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    ElasticsearchStorageConfig config = new ElasticsearchStorageConfig(
+        Collections.singletonList(HOST), null, null);
+    config.setEnabled(true);
+    cluster.defineStoragePlugin("elastic", config);
+
+    prepareData();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws IOException {
+    for (String indexName : indexNames) {
+      restHighLevelClient.indices().delete(new DeleteIndexRequest(indexName), RequestOptions.DEFAULT);
+    }
+  }
+
+  private static void prepareData() throws IOException {
+    restHighLevelClient = new RestHighLevelClient(RestClient.builder(HttpHost.create(HOST)));
+
+    String indexName = "t1";
+    indexNames.add(indexName);
+    CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    XContentBuilder builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("string_field", "a");
+    builder.field("int_field", 123);
+    builder.endObject();
+    IndexRequest indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+
+    indexName = "t2";
+    indexNames.add(indexName);
+    createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("another_int_field", 321);
+    builder.field("another_string_field", "b");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  @Test
+  public void testShowTables() throws Exception {
+    testBuilder()
+        .sqlQuery("show tables in elastic")
+        .unOrdered()
+        .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
+        .baselineValues("elastic", "t1")
+        .baselineValues("elastic", "t2")
+        .go();
+  }
+
+  @Test
+  public void testShowTablesLike() throws Exception {
+    testBuilder()
+        .sqlQuery("show tables in elastic like '%2%'")
+        .unOrdered()
+        .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
+        .baselineValues("elastic", "t2")
+        .go();
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchPlanTest.java b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchPlanTest.java
new file mode 100644
index 0000000..89c0410
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchPlanTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.client.indices.CreateIndexRequest;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+
+public class ElasticSearchPlanTest extends ClusterTest {
+
+  private static final String HOST = "http://localhost:9200";
+
+  public static RestHighLevelClient restHighLevelClient;
+
+  private static String indexName;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    ElasticsearchStorageConfig config = new ElasticsearchStorageConfig(
+        Collections.singletonList(HOST), null, null);
+    config.setEnabled(true);
+    cluster.defineStoragePlugin("elastic", config);
+
+    prepareData();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws IOException {
+    restHighLevelClient.indices().delete(new DeleteIndexRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  private static void prepareData() throws IOException {
+    restHighLevelClient = new RestHighLevelClient(RestClient.builder(HttpHost.create(HOST)));
+
+    indexName = "nation";
+    CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    XContentBuilder builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("n_nationkey", 0);
+    builder.field("n_name", "ALGERIA");
+    builder.field("n_regionkey", 1);
+    builder.endObject();
+    IndexRequest indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  @Test
+  public void testProjectPushDown() throws Exception {
+    queryBuilder()
+        .sql("select n_name, n_nationkey from elastic.`nation`")
+        .planMatcher()
+        .include("ElasticsearchProject.*n_name.*n_nationkey")
+        .exclude("\\*\\*")
+        .match();
+  }
+
+  @Test
+  public void testFilterPushDown() throws Exception {
+    queryBuilder()
+        .sql("select n_name, n_nationkey from elastic.`nation` where n_nationkey = 0")
+        .planMatcher()
+        .include("ElasticsearchFilter")
+        .match();
+  }
+
+  @Test
+  public void testFilterPushDownWithJoin() throws Exception {
+    String query = "select * from elastic.`nation` e\n" +
+        "join elastic.`nation` s on e.n_nationkey = s.n_nationkey where e.n_name = 'algeria'";
+
+    queryBuilder()
+        .sql(query)
+        .planMatcher()
+        .include("ElasticsearchFilter")
+        .match();
+  }
+
+  @Test
+  public void testAggregationPushDown() throws Exception {
+    queryBuilder()
+        .sql("select count(*) from elastic.`nation`")
+        .planMatcher()
+        .include("ElasticsearchAggregate.*COUNT")
+        .match();
+  }
+
+  @Test
+  public void testLimitWithSortPushDown() throws Exception {
+    queryBuilder()
+        .sql("select n_nationkey from elastic.`nation` order by n_name limit 3")
+        .planMatcher()
+        .include("ElasticsearchSort.*sort.*fetch")
+        .match();
+  }
+
+  @Test
+  public void testAggregationWithGroupByPushDown() throws Exception {
+    queryBuilder()
+        .sql("select sum(n_nationkey) from elastic.`nation` group by n_regionkey")
+        .planMatcher()
+        .include("ElasticsearchAggregate.*SUM")
+        .match();
+  }
+}
diff --git a/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchQueryTest.java b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchQueryTest.java
new file mode 100644
index 0000000..f7b1738
--- /dev/null
+++ b/contrib/storage-elasticsearch/src/test/java/org/apache/drill/exec/store/elasticsearch/ElasticSearchQueryTest.java
@@ -0,0 +1,597 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.elasticsearch;
+
+import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.http.HttpHost;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestHighLevelClient;
+import org.elasticsearch.client.indices.CreateIndexRequest;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.time.LocalDate;
+import java.util.Collections;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.fail;
+
+public class ElasticSearchQueryTest extends ClusterTest {
+
+  private static final String HOST = "http://localhost:9200";
+
+  public static RestHighLevelClient restHighLevelClient;
+
+  private static String indexName;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    startCluster(ClusterFixture.builder(dirTestWatcher));
+
+    ElasticsearchStorageConfig config = new ElasticsearchStorageConfig(
+        Collections.singletonList(HOST), null, null);
+    config.setEnabled(true);
+    cluster.defineStoragePlugin("elastic", config);
+
+    prepareData();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws IOException {
+    restHighLevelClient.indices().delete(new DeleteIndexRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  private static void prepareData() throws IOException {
+    restHighLevelClient = new RestHighLevelClient(RestClient.builder(HttpHost.create(HOST)));
+
+    indexName = "employee";
+    CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
+
+    restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+
+    XContentBuilder builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 1);
+    builder.field("full_name", "Sheri Nowmer");
+    builder.field("first_name", "Sheri");
+    builder.field("last_name", "Nowmer");
+    builder.field("position_id", 1);
+    builder.field("position_title", "President");
+    builder.field("store_id", 0);
+    builder.field("department_id", 1);
+    builder.field("birth_date", "1961-08-26");
+    builder.field("hire_date", "1994-12-01 00:00:00.0");
+    builder.field("salary", 80000.0);
+    builder.field("supervisor_id", 0);
+    builder.field("education_level", "Graduate Degree");
+    builder.field("marital_status", "S");
+    builder.field("gender", "F");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    IndexRequest indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 2);
+    builder.field("full_name", "Derrick Whelply");
+    builder.field("first_name", "Derrick");
+    builder.field("last_name", "Whelply");
+    builder.field("position_id", 2);
+    builder.field("position_title", "VP Country Manager");
+    builder.field("store_id", 0);
+    builder.field("department_id", 1);
+    builder.field("birth_date", "1915-07-03");
+    builder.field("hire_date", "1994-12-01 00:00:00.0");
+    builder.field("salary", 40000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Graduate Degree");
+    builder.field("marital_status", "M");
+    builder.field("gender", "M");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 4);
+    builder.field("full_name", "Michael Spence");
+    builder.field("first_name", "Michael");
+    builder.field("last_name", "Spence");
+    builder.field("position_id", 2);
+    builder.field("position_title", "VP Country Manager");
+    builder.field("store_id", 0);
+    builder.field("department_id", 1);
+    builder.field("birth_date", "1969-06-20");
+    builder.field("hire_date", "1998-01-01 00:00:00.0");
+    builder.field("salary", 40000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Graduate Degree");
+    builder.field("marital_status", "S");
+    builder.field("gender", "M");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 5);
+    builder.field("full_name", "Maya Gutierrez");
+    builder.field("first_name", "Maya");
+    builder.field("last_name", "Gutierrez");
+    builder.field("position_id", 2);
+    builder.field("position_title", "VP Country Manager");
+    builder.field("store_id", 0);
+    builder.field("department_id", 1);
+    builder.field("birth_date", "1951-05-10");
+    builder.field("hire_date", "1998-01-01 00:00:00.0");
+    builder.field("salary", 35000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Bachelors Degree");
+    builder.field("marital_status", "M");
+    builder.field("gender", "F");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 6);
+    builder.field("full_name", "Roberta Damstra");
+    builder.field("first_name", "Roberta");
+    builder.field("last_name", "Damstra");
+    builder.field("position_id", 3);
+    builder.field("position_title", "VP Information Systems");
+    builder.field("store_id", 0);
+    builder.field("department_id", 2);
+    builder.field("birth_date", "1942-10-08");
+    builder.field("hire_date", "1994-12-01 00:00:00.0");
+    builder.field("salary", 25000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Bachelors Degree");
+    builder.field("marital_status", "M");
+    builder.field("gender", "F");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 7);
+    builder.field("full_name", "Rebecca Kanagaki");
+    builder.field("first_name", "Rebecca");
+    builder.field("last_name", "Kanagaki");
+    builder.field("position_id", 4);
+    builder.field("position_title", "VP Human Resources");
+    builder.field("store_id", 0);
+    builder.field("department_id", 3);
+    builder.field("birth_date", "1949-03-27");
+    builder.field("hire_date", "1994-12-01 00:00:00.0");
+    builder.field("salary", 15000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Bachelors Degree");
+    builder.field("marital_status", "M");
+    builder.field("gender", "F");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 8);
+    builder.field("full_name", "Kim Brunner");
+    builder.field("first_name", "Kim");
+    builder.field("last_name", "Brunner");
+    builder.field("position_id", 11);
+    builder.field("position_title", "Store Manager");
+    builder.field("store_id", 9);
+    builder.field("department_id", 11);
+    builder.field("birth_date", "1922-08-10");
+    builder.field("hire_date", "1998-01-01 00:00:00.0");
+    builder.field("salary", 10000.0);
+    builder.field("supervisor_id", 5);
+    builder.field("education_level", "Bachelors Degree");
+    builder.field("marital_status", "S");
+    builder.field("gender", "F");
+    builder.field("management_role", "Store Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 9);
+    builder.field("full_name", "Brenda Blumberg");
+    builder.field("first_name", "Brenda");
+    builder.field("last_name", "Blumberg");
+    builder.field("position_id", 11);
+    builder.field("position_title", "Store Manager");
+    builder.field("store_id", 21);
+    builder.field("department_id", 11);
+    builder.field("birth_date", "1979-06-23");
+    builder.field("hire_date", "1998-01-01 00:00:00.0");
+    builder.field("salary", 17000.0);
+    builder.field("supervisor_id", 5);
+    builder.field("education_level", "Graduate Degree");
+    builder.field("marital_status", "M");
+    builder.field("gender", "F");
+    builder.field("management_role", "Store Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 10);
+    builder.field("full_name", "Darren Stanz");
+    builder.field("first_name", "Darren");
+    builder.field("last_name", "Stanz");
+    builder.field("position_id", 5);
+    builder.field("position_title", "VP Finance");
+    builder.field("store_id", 0);
+    builder.field("department_id", 5);
+    builder.field("birth_date", "1949-08-26");
+    builder.field("hire_date", "1994-12-01 00:00:00.0");
+    builder.field("salary", 50000.0);
+    builder.field("supervisor_id", 1);
+    builder.field("education_level", "Partial College");
+    builder.field("marital_status", "M");
+    builder.field("gender", "M");
+    builder.field("management_role", "Senior Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    builder = XContentFactory.jsonBuilder();
+    builder.startObject();
+    builder.field("employee_id", 11);
+    builder.field("full_name", "Jonathan Murraiin");
+    builder.field("first_name", "Jonathan");
+    builder.field("last_name", "Murraiin");
+    builder.field("position_id", 11);
+    builder.field("position_title", "Store Manager");
+    builder.field("store_id", 1);
+    builder.field("department_id", 11);
+    builder.field("birth_date", "1967-06-20");
+    builder.field("hire_date", "1998-01-01 00:00:00.0");
+    builder.field("salary", 15000.0);
+    builder.field("supervisor_id", 5);
+    builder.field("education_level", "Graduate Degree");
+    builder.field("marital_status", "S");
+    builder.field("gender", "M");
+    builder.field("management_role", "Store Management");
+    builder.endObject();
+    indexRequest = new IndexRequest(indexName).source(builder);
+    restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT);
+
+    restHighLevelClient.indices().refresh(new RefreshRequest(indexName), RequestOptions.DEFAULT);
+  }
+
+  @Test
+  public void testSelectAll() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from elastic.`employee`")
+        .unOrdered()
+        .baselineColumns("employee_id", "full_name", "first_name", "last_name", "position_id",
+            "position_title", "store_id", "department_id", "birth_date", "hire_date", "salary",
+            "supervisor_id", "education_level", "marital_status", "gender", "management_role")
+        .baselineValues(1, "Sheri Nowmer", "Sheri", "Nowmer", 1, "President", 0, 1, "1961-08-26", "1994-12-01 00:00:00.0", 80000.0, 0, "Graduate Degree", "S", "F", "Senior Management")
+        .baselineValues(2, "Derrick Whelply", "Derrick", "Whelply", 2, "VP Country Manager", 0, 1, "1915-07-03", "1994-12-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "M", "M", "Senior Management")
+        .baselineValues(4, "Michael Spence", "Michael", "Spence", 2, "VP Country Manager", 0, 1, "1969-06-20", "1998-01-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "S", "M", "Senior Management")
+        .baselineValues(5, "Maya Gutierrez", "Maya", "Gutierrez", 2, "VP Country Manager", 0, 1, "1951-05-10", "1998-01-01 00:00:00.0", 35000.0, 1, "Bachelors Degree", "M", "F", "Senior Management")
+        .baselineValues(6, "Roberta Damstra", "Roberta", "Damstra", 3, "VP Information Systems", 0, 2, "1942-10-08", "1994-12-01 00:00:00.0", 25000.0, 1, "Bachelors Degree", "M", "F", "Senior Management")
+        .baselineValues(7, "Rebecca Kanagaki", "Rebecca", "Kanagaki", 4, "VP Human Resources", 0, 3, "1949-03-27", "1994-12-01 00:00:00.0", 15000.0, 1, "Bachelors Degree", "M", "F", "Senior Management")
+        .baselineValues(8, "Kim Brunner", "Kim", "Brunner", 11, "Store Manager", 9, 11, "1922-08-10", "1998-01-01 00:00:00.0", 10000.0, 5, "Bachelors Degree", "S", "F", "Store Management")
+        .baselineValues(9, "Brenda Blumberg", "Brenda", "Blumberg", 11, "Store Manager", 21, 11, "1979-06-23", "1998-01-01 00:00:00.0", 17000.0, 5, "Graduate Degree", "M", "F", "Store Management")
+        .baselineValues(10, "Darren Stanz", "Darren", "Stanz", 5, "VP Finance", 0, 5, "1949-08-26", "1994-12-01 00:00:00.0", 50000.0, 1, "Partial College", "M", "M", "Senior Management")
+        .baselineValues(11, "Jonathan Murraiin", "Jonathan", "Murraiin", 11, "Store Manager", 1, 11, "1967-06-20", "1998-01-01 00:00:00.0", 15000.0, 5, "Graduate Degree", "S", "M", "Store Management")
+        .go();
+  }
+
+  @Test
+  public void testSelectColumns() throws Exception {
+    testBuilder()
+        .sqlQuery("select full_name, birth_date from elastic.`employee`")
+        .unOrdered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .baselineValues("Derrick Whelply", "1915-07-03")
+        .baselineValues("Michael Spence", "1969-06-20")
+        .baselineValues("Maya Gutierrez", "1951-05-10")
+        .baselineValues("Roberta Damstra", "1942-10-08")
+        .baselineValues("Rebecca Kanagaki", "1949-03-27")
+        .baselineValues("Kim Brunner", "1922-08-10")
+        .baselineValues("Brenda Blumberg", "1979-06-23")
+        .baselineValues("Darren Stanz", "1949-08-26")
+        .baselineValues("Jonathan Murraiin", "1967-06-20")
+        .go();
+  }
+
+  @Test
+  public void testSelectAllFiltered() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from elastic.`employee` where employee_id = 1 and first_name = 'sheri'")
+        .unOrdered()
+        .baselineColumns("employee_id", "full_name", "first_name", "last_name", "position_id",
+            "position_title", "store_id", "department_id", "birth_date", "hire_date", "salary",
+            "supervisor_id", "education_level", "marital_status", "gender", "management_role")
+        .baselineValues(1, "Sheri Nowmer", "Sheri", "Nowmer", 1, "President", 0, 1, "1961-08-26",
+            "1994-12-01 00:00:00.0", 80000.0, 0, "Graduate Degree", "S", "F", "Senior Management")
+        .go();
+  }
+
+  @Test
+  public void testSelectColumnsFiltered() throws Exception {
+    testBuilder()
+        .sqlQuery("select full_name, birth_date from elastic.`employee` where employee_id = 1 and first_name = 'sheri'")
+        .unOrdered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .go();
+  }
+
+  @Test
+  public void testSelectColumnsUnsupportedFilter() throws Exception {
+    // Calcite doesn't support LIKE and other functions yet, so ensure Drill filter is used there
+    testBuilder()
+        .sqlQuery("select full_name, birth_date from elastic.`employee` where first_name like 'Sh%'")
+        .unOrdered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .go();
+  }
+
+  @Test
+  public void testSelectColumnsUnsupportedProject() throws Exception {
+    // Calcite doesn't support LIKE and other functions yet, so ensure Drill project is used there
+    testBuilder()
+        .sqlQuery("select first_name like 'Sh%' as c, birth_date from elastic.`employee` where first_name like 'Sh%'")
+        .unOrdered()
+        .baselineColumns("c", "birth_date")
+        .baselineValues(true, "1961-08-26")
+        .go();
+  }
+
+  @Test
+  public void testSelectColumnsUnsupportedAggregate() throws Exception {
+    // Calcite doesn't support stddev_samp and other functions yet, so ensure Drill agg is used there
+    testBuilder()
+        .sqlQuery("select stddev_samp(salary) as standard_deviation from elastic.`employee`")
+        .unOrdered()
+        .baselineColumns("standard_deviation")
+        .baselineValues(21333.593748410563)
+        .go();
+  }
+
+  @Test
+  public void testLimitWithSort() throws Exception {
+    testBuilder()
+        .sqlQuery("select full_name, birth_date from elastic.`employee` order by employee_id limit 3")
+        .ordered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .baselineValues("Derrick Whelply", "1915-07-03")
+        .baselineValues("Michael Spence", "1969-06-20")
+        .go();
+  }
+
+  @Test
+  public void testSelectAllWithLimitAndSort() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from elastic.`employee` order by employee_id limit 3")
+        .ordered()
+        .baselineColumns("employee_id", "full_name", "first_name", "last_name", "position_id",
+            "position_title", "store_id", "department_id", "birth_date", "hire_date", "salary",
+            "supervisor_id", "education_level", "marital_status", "gender", "management_role")
+        .baselineValues(1, "Sheri Nowmer", "Sheri", "Nowmer", 1, "President", 0, 1, "1961-08-26", "1994-12-01 00:00:00.0", 80000.0, 0, "Graduate Degree", "S", "F", "Senior Management")
+        .baselineValues(2, "Derrick Whelply", "Derrick", "Whelply", 2, "VP Country Manager", 0, 1, "1915-07-03", "1994-12-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "M", "M", "Senior Management")
+        .baselineValues(4, "Michael Spence", "Michael", "Spence", 2, "VP Country Manager", 0, 1, "1969-06-20", "1998-01-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "S", "M", "Senior Management")
+        .go();
+  }
+
+  @Test
+  public void testSingleColumn() throws Exception {
+    testBuilder()
+        .sqlQuery("select full_name from elastic.`employee` order by employee_id limit 3")
+        .ordered()
+        .baselineColumns("full_name")
+        .baselineValues("Sheri Nowmer")
+        .baselineValues("Derrick Whelply")
+        .baselineValues("Michael Spence")
+        .go();
+  }
+
+  @Test
+  public void testJoin() throws Exception {
+    testBuilder()
+        .sqlQuery("select t1.full_name, t2.birth_date from elastic.`employee` t1 join elastic.`employee` t2 on t1.employee_id = t2.employee_id")
+        .unOrdered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .baselineValues("Derrick Whelply", "1915-07-03")
+        .baselineValues("Michael Spence", "1969-06-20")
+        .baselineValues("Maya Gutierrez", "1951-05-10")
+        .baselineValues("Roberta Damstra", "1942-10-08")
+        .baselineValues("Rebecca Kanagaki", "1949-03-27")
+        .baselineValues("Kim Brunner", "1922-08-10")
+        .baselineValues("Brenda Blumberg", "1979-06-23")
+        .baselineValues("Darren Stanz", "1949-08-26")
+        .baselineValues("Jonathan Murraiin", "1967-06-20")
+        .go();
+  }
+
+  @Test
+  public void testJoinWithFileTable() throws Exception {
+    testBuilder()
+        .sqlQuery("select t1.full_name, t2.birth_date from elastic.`employee` t1 join cp.`employee.json` t2 on t1.employee_id = t2.employee_id")
+        .unOrdered()
+        .baselineColumns("full_name", "birth_date")
+        .baselineValues("Sheri Nowmer", "1961-08-26")
+        .baselineValues("Derrick Whelply", "1915-07-03")
+        .baselineValues("Michael Spence", "1969-06-20")
+        .baselineValues("Maya Gutierrez", "1951-05-10")
+        .baselineValues("Roberta Damstra", "1942-10-08")
+        .baselineValues("Rebecca Kanagaki", "1949-03-27")
+        .baselineValues("Kim Brunner", "1922-08-10")
+        .baselineValues("Brenda Blumberg", "1979-06-23")
+        .baselineValues("Darren Stanz", "1949-08-26")
+        .baselineValues("Jonathan Murraiin", "1967-06-20")
+        .go();
+  }
+
+  @Test
+  public void testAggregate() throws Exception {
+    testBuilder()
+        .sqlQuery("select count(*) c from elastic.`employee`")
+        .ordered()
+        .baselineColumns("c")
+        .baselineValues(10L)
+        .go();
+  }
+
+  @Test
+  public void testAggregateWithGroupBy() throws Exception {
+    testBuilder()
+        .sqlQuery("select sum(`salary`) sal, department_id from elastic.`employee` e group by e.`department_id`")
+        .unOrdered()
+        .baselineColumns("sal", "department_id")
+        .baselineValues(195000.0, 1)
+        .baselineValues(42000.0, 11)
+        .baselineValues(25000.0, 2)
+        .baselineValues(15000.0, 3)
+        .baselineValues(50000.0, 5)
+        .go();
+  }
+
+  @Test
+  public void testSelectNonExistingColumn() throws Exception {
+    testBuilder()
+        .sqlQuery("select full_name123 from elastic.`employee` order by employee_id limit 3")
+        .unOrdered()
+        .baselineColumns("full_name123")
+        .baselineValuesForSingleColumn(null, null, null)
+        .go();
+  }
+
+  @Test
+  public void testSelectLiterals() throws Exception {
+    testBuilder()
+        .sqlQuery("select 'abc' as full_name, 123 as id from elastic.`employee` limit 3")
+        .unOrdered()
+        .baselineColumns("full_name", "id")
+        .baselineValues("abc", 123)
+        .baselineValues("abc", 123)
+        .baselineValues("abc", 123)
+        .go();
+  }
+
+  @Test
+  public void testSelectIntLiterals() throws Exception {
+    testBuilder()
+        .sqlQuery("select 333 as full_name, 123 as id from elastic.`employee` limit 3")
+        .unOrdered()
+        .baselineColumns("full_name", "id")
+        .baselineValues(333, 123)
+        .baselineValues(333, 123)
+        .baselineValues(333, 123)
+        .go();
+  }
+
+  @Test
+  public void testSelectLiteralWithStar() throws Exception {
+    testBuilder()
+        .sqlQuery("select *, 123 as full_name from elastic.`employee` order by employee_id limit 3")
+        .unOrdered()
+        .baselineColumns("full_name")
+        .baselineColumns("employee_id", "full_name", "first_name", "last_name", "position_id",
+            "position_title", "store_id", "department_id", "birth_date", "hire_date", "salary",
+            "supervisor_id", "education_level", "marital_status", "gender", "management_role", "full_name0")
+        .baselineValues(1, "Sheri Nowmer", "Sheri", "Nowmer", 1, "President", 0, 1, "1961-08-26", "1994-12-01 00:00:00.0", 80000.0, 0, "Graduate Degree", "S", "F", "Senior Management", 123)
+        .baselineValues(2, "Derrick Whelply", "Derrick", "Whelply", 2, "VP Country Manager", 0, 1, "1915-07-03", "1994-12-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "M", "M", "Senior Management", 123)
+        .baselineValues(4, "Michael Spence", "Michael", "Spence", 2, "VP Country Manager", 0, 1, "1969-06-20", "1998-01-01 00:00:00.0", 40000.0, 1, "Graduate Degree", "S", "M", "Senior Management", 123)
+        .go();
+  }
+
+  @Test
+  public void testLiteralWithAggregateAndGroupBy() throws Exception {
+    testBuilder()
+        .sqlQuery("select sum(`salary`) sal, 1 as department_id from elastic.`employee` e group by e.`department_id`")
+        .unOrdered()
+        .baselineColumns("sal", "department_id")
+        .baselineValues(195000.0, 1)
+        .baselineValues(42000.0, 1)
+        .baselineValues(25000.0, 1)
+        .baselineValues(15000.0, 1)
+        .baselineValues(50000.0, 1)
+        .go();
+  }
+
+  @Test
+  public void testSelectNonExistingTable() throws Exception {
+    try {
+      queryBuilder().sql("select full_name from elastic.`non-existing`").run();
+      fail("Query didn't fail");
+    } catch (UserRemoteException e) {
+      assertThat(e.getMessage(), containsString("VALIDATION ERROR"));
+      assertThat(e.getMessage(), containsString("Object 'non-existing' not found within 'elastic'"));
+    }
+  }
+
+  @Test
+  public void testSelectNonExistingSubSchema() throws Exception {
+    try {
+      queryBuilder().sql("select full_name from elastic.`non-existing`.employee").run();
+      fail("Query didn't fail");
+    } catch (UserRemoteException e) {
+      assertThat(e.getMessage(), containsString("VALIDATION ERROR"));
+      assertThat(e.getMessage(), containsString("Schema [[elastic, non-existing]] is not valid with respect to either root schema or current default schema"));
+    }
+  }
+
+  @Test
+  public void testWithProvidedSchema() throws Exception {
+    testBuilder()
+        .sqlQuery("select * from " +
+                "table(elastic.`employee`(schema=>'inline=(birth_date date not null, salary decimal(10, 2))')) " +
+                "where employee_id = 1")
+        .ordered()
+        .baselineColumns("employee_id", "full_name", "first_name", "last_name", "position_id",
+            "position_title", "store_id", "department_id", "birth_date", "hire_date", "salary",
+            "supervisor_id", "education_level", "marital_status", "gender", "management_role")
+        .baselineValues(1, "Sheri Nowmer", "Sheri", "Nowmer", 1, "President", 0, 1, LocalDate.parse("1961-08-26"),
+            "1994-12-01 00:00:00.0", new BigDecimal("80000.00"), 0, "Graduate Degree", "S", "F", "Senior Management")
+        .go();
+  }
+}
diff --git a/contrib/storage-hbase/pom.xml b/contrib/storage-hbase/pom.xml
index 27c0ed9..d4259e8 100644
--- a/contrib/storage-hbase/pom.xml
+++ b/contrib/storage-hbase/pom.xml
@@ -28,7 +28,7 @@
 
   <artifactId>drill-storage-hbase</artifactId>
 
-  <name>contrib/hbase-storage-plugin</name>
+  <name>Drill : Contrib : Storage : HBase</name>
 
   <properties>
     <hbase.TestSuite>**/HBaseTestsSuite.class</hbase.TestSuite>
diff --git a/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/HBaseSubScan.java b/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/HBaseSubScan.java
index d4ea999..75bf85f 100644
--- a/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/HBaseSubScan.java
+++ b/contrib/storage-hbase/src/main/java/org/apache/drill/exec/store/hbase/HBaseSubScan.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,6 +45,8 @@
 @JsonTypeName("hbase-region-scan")
 public class HBaseSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "HBASE_SUB_SCAN";
+
   private final HBaseStoragePlugin hbaseStoragePlugin;
   private final List<HBaseSubScanSpec> regionScanSpecList;
   private final List<SchemaPath> columns;
@@ -212,7 +213,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HBASE_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/contrib/storage-hive/core/pom.xml b/contrib/storage-hive/core/pom.xml
index 7f87bb6..0157f8e 100644
--- a/contrib/storage-hive/core/pom.xml
+++ b/contrib/storage-hive/core/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-storage-hive-core</artifactId>
   <packaging>jar</packaging>
-  <name>contrib/hive-storage-plugin/core</name>
+  <name>Drill : Contrib : Storage : Hive : Core</name>
   <properties>
     <freemarker.conf.file>src/main/codegen/configHive3.fmpp</freemarker.conf.file>
   </properties>
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetRowGroupScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetRowGroupScan.java
index 6e6d469..6de82a2 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetRowGroupScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveDrillNativeParquetRowGroupScan.java
@@ -29,7 +29,6 @@
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.parquet.AbstractParquetRowGroupScan;
 import org.apache.drill.exec.store.parquet.RowGroupReadEntry;
@@ -45,6 +44,8 @@
 @JsonTypeName("hive-drill-native-parquet-row-group-scan")
 public class HiveDrillNativeParquetRowGroupScan extends AbstractParquetRowGroupScan {
 
+  public static final String OPERATOR_TYPE = "HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN";
+
   private final HiveStoragePlugin hiveStoragePlugin;
   private final HiveStoragePluginConfig hiveStoragePluginConfig;
   private final HivePartitionHolder hivePartitionHolder;
@@ -116,8 +117,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
index 5efa505..ba3a04a 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
@@ -33,7 +33,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.mapred.InputSplit;
@@ -48,6 +47,8 @@
 @JsonTypeName("hive-sub-scan")
 public class HiveSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "HIVE_SUB_SCAN";
+
   private final HiveReadEntry hiveReadEntry;
   private final List<List<InputSplit>> inputSplits = new ArrayList<>();
   private final HiveStoragePlugin hiveStoragePlugin;
@@ -175,8 +176,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HIVE_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   private static List<InputSplit> deserializeInputSplit(List<String> base64, String className)
diff --git a/contrib/storage-hive/hive-exec-shade/pom.xml b/contrib/storage-hive/hive-exec-shade/pom.xml
index 53ca60a..775e0be 100644
--- a/contrib/storage-hive/hive-exec-shade/pom.xml
+++ b/contrib/storage-hive/hive-exec-shade/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-hive-exec-shaded</artifactId>
   <packaging>jar</packaging>
-  <name>contrib/hive-storage-plugin/hive-exec-shaded</name>
+  <name>Drill : Contrib : Storage : Hive : Exec Shaded</name>
 
   <properties>
     <hive.parquet.version>1.8.3</hive.parquet.version>
diff --git a/contrib/storage-hive/pom.xml b/contrib/storage-hive/pom.xml
index c2b43b6..7095cb0 100644
--- a/contrib/storage-hive/pom.xml
+++ b/contrib/storage-hive/pom.xml
@@ -28,7 +28,7 @@
 
   <groupId>org.apache.drill.contrib.storage-hive</groupId>
   <artifactId>drill-contrib-storage-hive-parent</artifactId>
-  <name>contrib/hive-storage-plugin/Parent Pom</name>
+  <name>Drill : Contrib : Storage : Hive : </name>
   <packaging>pom</packaging>
 
   <dependencies>
diff --git a/contrib/storage-http/README.md b/contrib/storage-http/README.md
index 19da0d1..6cff6ca 100644
--- a/contrib/storage-http/README.md
+++ b/contrib/storage-http/README.md
@@ -215,6 +215,11 @@
 as that shown above. Drill assumes that the server will uses HTTP status codes to
 indicate a bad request or other error.
 
+### Input Type
+The REST plugin accepts three different types of input: `json`, `csv` and `xml`.  The default is `json`.  If you are using `XML` as a data type, there is an additional 
+configuration option called `xmlDataLevel` which reduces the level of unneeded nesting found in XML files.  You can find more information in the documentation for Drill's XML 
+format plugin. 
+
 #### Authorization
 
 `authType`: If your API requires authentication, specify the authentication
@@ -364,7 +369,8 @@
       "authType": "none",
       "userName": null,
       "password": null,
-      "postBody": null
+      "postBody": null, 
+      "inputType": "json",
     }
   }
 
@@ -500,7 +506,7 @@
    supported). Join pushdown has the potential to improve performance if you use the HTTP service
    joined to another table.
 
-4. This plugin only reads JSON and CSV responses.
+~~4. This plugin only reads JSON and CSV responses.~~
 
 5. `POST` bodies can only be in the format of key/value pairs. Some APIs accept
     JSON based `POST` bodies but this is not currently supported.
diff --git a/contrib/storage-http/pom.xml b/contrib/storage-http/pom.xml
index 2c59469..894ccf8 100644
--- a/contrib/storage-http/pom.xml
+++ b/contrib/storage-http/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-storage-http</artifactId>
-  <name>contrib/http-storage-plugin</name>
+  <name>Drill : Contrib : Storage : HTTP</name>
 
   <properties>
     <okhttp.version>4.5.0</okhttp.version>
@@ -45,6 +45,13 @@
       <artifactId>okhttp</artifactId>
       <version>${okhttp.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.drill.contrib</groupId>
+      <artifactId>drill-format-xml</artifactId>
+      <version>${project.version}</version>
+      <scope>compile</scope>
+    </dependency>
+
     <!-- Test dependencies -->
     <dependency>
       <groupId>org.apache.drill.exec</groupId>
@@ -68,7 +75,6 @@
       <version>${okhttp.version}</version>
       <scope>test</scope>
     </dependency>
-
   </dependencies>
   <build>
     <plugins>
diff --git a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpApiConfig.java b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpApiConfig.java
index d32821a..86bd2ee 100644
--- a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpApiConfig.java
+++ b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpApiConfig.java
@@ -34,6 +34,10 @@
 public class HttpApiConfig {
   private static final Logger logger = LoggerFactory.getLogger(HttpApiConfig.class);
 
+  protected static final String DEFAULT_INPUT_FORMAT = "json";
+  protected static final String CSV_INPUT_FORMAT = "csv";
+  protected static final String XML_INPUT_FORMAT = "xml";
+
   private final String url;
 
   /**
@@ -68,6 +72,7 @@
   private final String userName;
   private final String password;
   private final String inputType;
+  private final int xmlDataLevel;
 
 
   public enum HttpMethod {
@@ -91,7 +96,8 @@
                        @JsonProperty("params") List<String> params,
                        @JsonProperty("dataPath") String dataPath,
                        @JsonProperty("requireTail") Boolean requireTail,
-                       @JsonProperty("inputType") String inputType) {
+                       @JsonProperty("inputType") String inputType,
+                       @JsonProperty("xmlDataLevel") int xmlDataLevel) {
 
     this.headers = headers;
     this.method = Strings.isNullOrEmpty(method)
@@ -130,7 +136,9 @@
     this.requireTail = requireTail == null ? true : requireTail;
 
     this.inputType = inputType == null
-      ? "json" : inputType.trim().toLowerCase();
+      ? DEFAULT_INPUT_FORMAT : inputType.trim().toLowerCase();
+
+    this.xmlDataLevel = Math.max(1, xmlDataLevel);
   }
 
   @JsonProperty("url")
@@ -160,6 +168,9 @@
   @JsonProperty("dataPath")
   public String dataPath() { return dataPath; }
 
+  @JsonProperty("xmlDataLevel")
+  public int xmlDataLevel() { return xmlDataLevel; }
+
   @JsonProperty("requireTail")
   public boolean requireTail() { return requireTail; }
 
@@ -174,7 +185,7 @@
   @Override
   public int hashCode() {
     return Objects.hash(url, method, requireTail, params, headers,
-        authType, userName, password, postBody, inputType);
+        authType, userName, password, postBody, inputType, xmlDataLevel);
   }
 
   @Override
@@ -191,6 +202,7 @@
       .field("postBody", postBody)
       .field("filterFields", params)
       .field("inputType", inputType)
+      .field("xmlDataLevel", xmlDataLevel)
       .toString();
   }
 
@@ -213,6 +225,7 @@
       && Objects.equals(params, other.params)
       && Objects.equals(dataPath, other.dataPath)
       && Objects.equals(requireTail, other.requireTail)
-      && Objects.equals(inputType, other.inputType);
+      && Objects.equals(inputType, other.inputType)
+      && Objects.equals(xmlDataLevel, other.xmlDataLevel);
   }
 }
diff --git a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
index 0f93495..9ef55fc 100644
--- a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
+++ b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpScanBatchCreator.java
@@ -78,7 +78,6 @@
   }
 
   private static class HttpReaderFactory implements ReaderFactory {
-
     private final HttpSubScan subScan;
     private int count;
 
@@ -97,8 +96,10 @@
 
       // Only a single scan (in a single thread)
       if (count++ == 0) {
-        if (inputType.equalsIgnoreCase("csv")) {
+        if (inputType.equalsIgnoreCase(HttpApiConfig.CSV_INPUT_FORMAT)) {
           return new HttpCSVBatchReader(subScan);
+        } else if (inputType.equalsIgnoreCase(HttpApiConfig.XML_INPUT_FORMAT)) {
+          return new HttpXMLBatchReader(subScan);
         } else {
           return new HttpBatchReader(subScan);
         }
diff --git a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpSubScan.java b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpSubScan.java
index d818824..e517259 100644
--- a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpSubScan.java
+++ b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpSubScan.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.store.http;
 
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -32,12 +33,12 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
 
 @JsonTypeName("http-sub-scan")
 public class HttpSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "HTTP_SUB_SCAN";
+
   private final HttpScanSpec tableSpec;
   private final List<SchemaPath> columns;
   private final Map<String, String> filters;
@@ -90,13 +91,13 @@
 
   @Override
   @JsonIgnore
-  public int getOperatorType() {
-    return CoreOperatorType.HTTP_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
   public Iterator<PhysicalOperator> iterator() {
-    return ImmutableSet.<PhysicalOperator>of().iterator();
+    return Collections.emptyIterator();
   }
 
   @Override
diff --git a/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpXMLBatchReader.java b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpXMLBatchReader.java
new file mode 100644
index 0000000..2f1b38e
--- /dev/null
+++ b/contrib/storage-http/src/main/java/org/apache/drill/exec/store/http/HttpXMLBatchReader.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.http;
+
+
+import okhttp3.HttpUrl;
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.common.exceptions.ChildErrorContext;
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.store.http.util.SimpleHttp;
+import org.apache.drill.exec.store.xml.XMLReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.xml.stream.XMLStreamException;
+import java.io.File;
+import java.io.InputStream;
+
+public class HttpXMLBatchReader extends HttpBatchReader {
+  private static final Logger logger = LoggerFactory.getLogger(HttpXMLBatchReader.class);
+  private final HttpSubScan subScan;
+  private final int maxRecords;
+  private final int dataLevel;
+  private InputStream inStream;
+  private XMLReader xmlReader;
+  private CustomErrorContext errorContext;
+
+  public HttpXMLBatchReader(HttpSubScan subScan) {
+    super(subScan);
+    this.subScan = subScan;
+    this.maxRecords = subScan.maxRecords();
+    this.dataLevel = subScan.tableSpec().connectionConfig().xmlDataLevel();
+  }
+
+  @Override
+  public boolean open(SchemaNegotiator negotiator) {
+
+    HttpUrl url = buildUrl();
+
+    // Result set loader setup
+    String tempDirPath = negotiator.drillConfig().getString(ExecConstants.DRILL_TMP_DIR);
+
+    // Create user-friendly error context
+    errorContext = new ChildErrorContext(negotiator.parentErrorContext()) {
+      @Override
+      public void addContext(UserException.Builder builder) {
+        super.addContext(builder);
+        builder.addContext("URL", url.toString());
+      }
+    };
+    negotiator.setErrorContext(errorContext);
+
+    // Http client setup
+    SimpleHttp http = new SimpleHttp(subScan, url, new File(tempDirPath), proxySettings(negotiator.drillConfig(), url), errorContext);
+
+    // Get the input stream
+    inStream = http.getInputStream();
+    // Initialize the XMLReader the reader
+    try {
+      xmlReader = new XMLReader(inStream, dataLevel, maxRecords);
+      ResultSetLoader resultLoader = negotiator.build();
+      RowSetLoader rootRowWriter = resultLoader.writer();
+      xmlReader.open(rootRowWriter, errorContext);
+    } catch (XMLStreamException e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Error opening XML stream: " + e.getMessage())
+        .addContext(errorContext)
+        .build(logger);
+    }
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    return xmlReader.next();
+  }
+
+  @Override
+  public void close() {
+    AutoCloseables.closeSilently(inStream);
+    xmlReader.close();
+  }
+}
diff --git a/contrib/storage-http/src/test/java/org/apache/drill/exec/store/http/TestHttpPlugin.java b/contrib/storage-http/src/test/java/org/apache/drill/exec/store/http/TestHttpPlugin.java
index 442aa5f..fe45c41 100644
--- a/contrib/storage-http/src/test/java/org/apache/drill/exec/store/http/TestHttpPlugin.java
+++ b/contrib/storage-http/src/test/java/org/apache/drill/exec/store/http/TestHttpPlugin.java
@@ -22,6 +22,7 @@
 import okhttp3.mockwebserver.MockWebServer;
 import okhttp3.mockwebserver.RecordedRequest;
 import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.util.DrillFileUtils;
 import org.apache.drill.exec.physical.rowSet.RowSet;
 import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
@@ -43,6 +44,7 @@
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.drill.test.rowSet.RowSetUtilities.mapArray;
 import static org.apache.drill.test.rowSet.RowSetUtilities.mapValue;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -64,6 +66,7 @@
   private static final int MOCK_SERVER_PORT = 8091;
   private static String TEST_JSON_RESPONSE;
   private static String TEST_CSV_RESPONSE;
+  private static String TEST_XML_RESPONSE;
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -71,6 +74,7 @@
 
     TEST_JSON_RESPONSE = Files.asCharSource(DrillFileUtils.getResourceAsFile("/data/response.json"), Charsets.UTF_8).read();
     TEST_CSV_RESPONSE = Files.asCharSource(DrillFileUtils.getResourceAsFile("/data/response.csv"), Charsets.UTF_8).read();
+    TEST_XML_RESPONSE = Files.asCharSource(DrillFileUtils.getResourceAsFile("/data/response.xml"), Charsets.UTF_8).read();
 
     dirTestWatcher.copyResourceToRoot(Paths.get("data/"));
     makeLiveConfig();
@@ -84,12 +88,12 @@
    */
   private static void makeLiveConfig() {
 
-    HttpApiConfig sunriseConfig = new HttpApiConfig("https://api.sunrise-sunset.org/json", "GET", null, null, null, null, null, null, null, null, null);
+    HttpApiConfig sunriseConfig = new HttpApiConfig("https://api.sunrise-sunset.org/json", "GET", null, null, null, null, null, null, null, null, null, 0);
     HttpApiConfig sunriseWithParamsConfig = new HttpApiConfig("https://api.sunrise-sunset.org/json", "GET", null, null, null, null, null,
-        Arrays.asList("lat", "lng", "date"), "results", false, null);
+        Arrays.asList("lat", "lng", "date"), "results", false, null, 0);
 
     HttpApiConfig stockConfig = new HttpApiConfig("https://api.worldtradingdata.com/api/v1/stock?symbol=SNAP,TWTR,VOD" +
-      ".L&api_token=zuHlu2vZaehdZN6GmJdTiVlp7xgZn6gl6sfgmI4G6TY4ej0NLOzvy0TUl4D4", "get", null, null, null, null, null, null, null, null, null);
+      ".L&api_token=zuHlu2vZaehdZN6GmJdTiVlp7xgZn6gl6sfgmI4G6TY4ej0NLOzvy0TUl4D4", "get", null, null, null, null, null, null, null, null, null, 0);
 
     Map<String, HttpApiConfig> configs = new HashMap<>();
     configs.put("stock", stockConfig);
@@ -116,7 +120,7 @@
     // The connection acts like a schema.
     // Ignores the message body except for data.
     HttpApiConfig mockSchema = new HttpApiConfig("http://localhost:8091/json", "GET", headers,
-        "basic", "user", "pass", null, null, "results", null, null);
+        "basic", "user", "pass", null, null, "results", null, null, 0);
 
     // Use the mock server with the HTTP parameters passed as WHERE
     // clause filters. The connection acts like a table.
@@ -124,18 +128,22 @@
     // This is the preferred approach, the base URL contains as much info as possible;
     // all other parameters are specified in SQL. See README for an example.
     HttpApiConfig mockTable = new HttpApiConfig("http://localhost:8091/json", "GET", headers,
-        "basic", "user", "pass", null, Arrays.asList("lat", "lng", "date"), "results", false, null);
+        "basic", "user", "pass", null, Arrays.asList("lat", "lng", "date"), "results", false, null, 0);
 
-    HttpApiConfig mockPostConfig = new HttpApiConfig("http://localhost:8091/", "POST", headers, null, null, null, "key1=value1\nkey2=value2", null, null, null, null);
+    HttpApiConfig mockPostConfig = new HttpApiConfig("http://localhost:8091/", "POST", headers, null, null, null, "key1=value1\nkey2=value2", null, null, null, null, 0);
 
     HttpApiConfig mockCsvConfig = new HttpApiConfig("http://localhost:8091/csv", "GET", headers,
-      "basic", "user", "pass", null, null, "results", null, "csv");
+      "basic", "user", "pass", null, null, "results", null, "csv", 0);
+
+    HttpApiConfig mockXmlConfig = new HttpApiConfig("http://localhost:8091/xml", "GET", headers,
+      "basic", "user", "pass", null, null, "results", null, "xml", 2);
 
     Map<String, HttpApiConfig> configs = new HashMap<>();
     configs.put("sunrise", mockSchema);
     configs.put("mocktable", mockTable);
     configs.put("mockpost", mockPostConfig);
     configs.put("mockcsv", mockCsvConfig);
+    configs.put("mockxml", mockXmlConfig);
 
     HttpStoragePluginConfig mockStorageConfigWithWorkspace = new HttpStoragePluginConfig(false, configs, 2, "", 80, "", "", "");
     mockStorageConfigWithWorkspace.setEnabled(true);
@@ -162,6 +170,7 @@
         .addRow("local", "http")
         .addRow("local.mockcsv", "http")
         .addRow("local.mockpost", "http")
+        .addRow("local.mockxml", "http")
         .addRow("local.sunrise", "http")
         .build();
 
@@ -351,6 +360,36 @@
     }
   }
 
+  @Test
+  public void testXmlResponse() throws Exception {
+    String sql = "SELECT * FROM local.mockxml.`?arg1=4` LIMIT 5";
+    try (MockWebServer server = startServer()) {
+
+      server.enqueue(new MockResponse().setResponseCode(200).setBody(TEST_XML_RESPONSE));
+
+      RowSet results = client.queryBuilder().sql(sql).rowSet();
+
+      TupleMetadata expectedSchema = new SchemaBuilder()
+        .add("attributes", MinorType.MAP)
+        .addNullable("COMMON", MinorType.VARCHAR)
+        .addNullable("BOTANICAL", MinorType.VARCHAR)
+        .addNullable("ZONE", MinorType.VARCHAR)
+        .addNullable("LIGHT", MinorType.VARCHAR)
+        .addNullable("PRICE", MinorType.VARCHAR)
+        .addNullable("AVAILABILITY", MinorType.VARCHAR)
+        .buildSchema();
+
+      RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
+        .addRow(mapArray(), "Bloodroot", "Sanguinaria canadensis", "4", "Mostly Shady", "$2.44", "031599")
+        .addRow(mapArray(),"Columbine", "Aquilegia canadensis", "3", "Mostly Shady", "$9.37", "030699")
+        .addRow(mapArray(),"Marsh Marigold", "Caltha palustris", "4", "Mostly Sunny", "$6.81", "051799")
+        .addRow(mapArray(), "Cowslip", "Caltha palustris", "4", "Mostly Shady", "$9.90", "030699")
+        .addRow(mapArray(), "Dutchman's-Breeches", "Dicentra cucullaria", "3", "Mostly Shady", "$6.44", "012099")
+        .build();
+
+      RowSetUtilities.verify(expected, results);
+    }
+  }
 
   private void doSimpleTestWithMockServer(String sql) throws Exception {
     try (MockWebServer server = startServer()) {
diff --git a/contrib/storage-http/src/test/resources/data/response.xml b/contrib/storage-http/src/test/resources/data/response.xml
new file mode 100644
index 0000000..d9dc3f5
--- /dev/null
+++ b/contrib/storage-http/src/test/resources/data/response.xml
@@ -0,0 +1,309 @@
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<CATALOG>
+  <PLANT>
+    <COMMON>Bloodroot</COMMON>
+    <BOTANICAL>Sanguinaria canadensis</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$2.44</PRICE>
+    <AVAILABILITY>031599</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Columbine</COMMON>
+    <BOTANICAL>Aquilegia canadensis</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$9.37</PRICE>
+    <AVAILABILITY>030699</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Marsh Marigold</COMMON>
+    <BOTANICAL>Caltha palustris</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Sunny</LIGHT>
+    <PRICE>$6.81</PRICE>
+    <AVAILABILITY>051799</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Cowslip</COMMON>
+    <BOTANICAL>Caltha palustris</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$9.90</PRICE>
+    <AVAILABILITY>030699</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Dutchman's-Breeches</COMMON>
+    <BOTANICAL>Dicentra cucullaria</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$6.44</PRICE>
+    <AVAILABILITY>012099</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Ginger, Wild</COMMON>
+    <BOTANICAL>Asarum canadense</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$9.03</PRICE>
+    <AVAILABILITY>041899</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Hepatica</COMMON>
+    <BOTANICAL>Hepatica americana</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$4.45</PRICE>
+    <AVAILABILITY>012699</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Liverleaf</COMMON>
+    <BOTANICAL>Hepatica americana</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$3.99</PRICE>
+    <AVAILABILITY>010299</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Jack-In-The-Pulpit</COMMON>
+    <BOTANICAL>Arisaema triphyllum</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$3.23</PRICE>
+    <AVAILABILITY>020199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Mayapple</COMMON>
+    <BOTANICAL>Podophyllum peltatum</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$2.98</PRICE>
+    <AVAILABILITY>060599</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Phlox, Woodland</COMMON>
+    <BOTANICAL>Phlox divaricata</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$2.80</PRICE>
+    <AVAILABILITY>012299</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Phlox, Blue</COMMON>
+    <BOTANICAL>Phlox divaricata</BOTANICAL>
+    <ZONE>3</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$5.59</PRICE>
+    <AVAILABILITY>021699</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Spring-Beauty</COMMON>
+    <BOTANICAL>Claytonia Virginica</BOTANICAL>
+    <ZONE>7</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$6.59</PRICE>
+    <AVAILABILITY>020199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Trillium</COMMON>
+    <BOTANICAL>Trillium grandiflorum</BOTANICAL>
+    <ZONE>5</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$3.90</PRICE>
+    <AVAILABILITY>042999</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Wake Robin</COMMON>
+    <BOTANICAL>Trillium grandiflorum</BOTANICAL>
+    <ZONE>5</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$3.20</PRICE>
+    <AVAILABILITY>022199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Violet, Dog-Tooth</COMMON>
+    <BOTANICAL>Erythronium americanum</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$9.04</PRICE>
+    <AVAILABILITY>020199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Trout Lily</COMMON>
+    <BOTANICAL>Erythronium americanum</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$6.94</PRICE>
+    <AVAILABILITY>032499</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Adder's-Tongue</COMMON>
+    <BOTANICAL>Erythronium americanum</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$9.58</PRICE>
+    <AVAILABILITY>041399</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Anemone</COMMON>
+    <BOTANICAL>Anemone blanda</BOTANICAL>
+    <ZONE>6</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$8.86</PRICE>
+    <AVAILABILITY>122698</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Grecian Windflower</COMMON>
+    <BOTANICAL>Anemone blanda</BOTANICAL>
+    <ZONE>6</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$9.16</PRICE>
+    <AVAILABILITY>071099</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Bee Balm</COMMON>
+    <BOTANICAL>Monarda didyma</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$4.59</PRICE>
+    <AVAILABILITY>050399</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Bergamot</COMMON>
+    <BOTANICAL>Monarda didyma</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$7.16</PRICE>
+    <AVAILABILITY>042799</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Black-Eyed Susan</COMMON>
+    <BOTANICAL>Rudbeckia hirta</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Sunny</LIGHT>
+    <PRICE>$9.80</PRICE>
+    <AVAILABILITY>061899</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Buttercup</COMMON>
+    <BOTANICAL>Ranunculus</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$2.57</PRICE>
+    <AVAILABILITY>061099</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Crowfoot</COMMON>
+    <BOTANICAL>Ranunculus</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$9.34</PRICE>
+    <AVAILABILITY>040399</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Butterfly Weed</COMMON>
+    <BOTANICAL>Asclepias tuberosa</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Sunny</LIGHT>
+    <PRICE>$2.78</PRICE>
+    <AVAILABILITY>063099</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Cinquefoil</COMMON>
+    <BOTANICAL>Potentilla</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$7.06</PRICE>
+    <AVAILABILITY>052599</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Primrose</COMMON>
+    <BOTANICAL>Oenothera</BOTANICAL>
+    <ZONE>3 - 5</ZONE>
+    <LIGHT>Sunny</LIGHT>
+    <PRICE>$6.56</PRICE>
+    <AVAILABILITY>013099</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Gentian</COMMON>
+    <BOTANICAL>Gentiana</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$7.81</PRICE>
+    <AVAILABILITY>051899</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Blue Gentian</COMMON>
+    <BOTANICAL>Gentiana</BOTANICAL>
+    <ZONE>4</ZONE>
+    <LIGHT>Sun or Shade</LIGHT>
+    <PRICE>$8.56</PRICE>
+    <AVAILABILITY>050299</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Jacob's Ladder</COMMON>
+    <BOTANICAL>Polemonium caeruleum</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$9.26</PRICE>
+    <AVAILABILITY>022199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Greek Valerian</COMMON>
+    <BOTANICAL>Polemonium caeruleum</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$4.36</PRICE>
+    <AVAILABILITY>071499</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>California Poppy</COMMON>
+    <BOTANICAL>Eschscholzia californica</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Sun</LIGHT>
+    <PRICE>$7.89</PRICE>
+    <AVAILABILITY>032799</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Shooting Star</COMMON>
+    <BOTANICAL>Dodecatheon</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Mostly Shady</LIGHT>
+    <PRICE>$8.60</PRICE>
+    <AVAILABILITY>051399</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Snakeroot</COMMON>
+    <BOTANICAL>Cimicifuga</BOTANICAL>
+    <ZONE>Annual</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$5.63</PRICE>
+    <AVAILABILITY>071199</AVAILABILITY>
+  </PLANT>
+  <PLANT>
+    <COMMON>Cardinal Flower</COMMON>
+    <BOTANICAL>Lobelia cardinalis</BOTANICAL>
+    <ZONE>2</ZONE>
+    <LIGHT>Shade</LIGHT>
+    <PRICE>$3.02</PRICE>
+    <AVAILABILITY>022299</AVAILABILITY>
+  </PLANT>
+</CATALOG>
\ No newline at end of file
diff --git a/contrib/storage-jdbc/pom.xml b/contrib/storage-jdbc/pom.xml
index 579d2de..c709337 100755
--- a/contrib/storage-jdbc/pom.xml
+++ b/contrib/storage-jdbc/pom.xml
@@ -28,7 +28,7 @@
 
   <artifactId>drill-jdbc-storage</artifactId>
 
-  <name>contrib/jdbc-storage-plugin</name>
+  <name>Drill : Contrib : Storage : JDBC</name>
 
   <properties>
     <mysql.connector.version>8.0.19</mysql.connector.version>
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java
index d8ea6bf..401cf51 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java
@@ -25,13 +25,17 @@
 import org.apache.calcite.adapter.jdbc.JdbcRules;
 import org.apache.calcite.adapter.jdbc.JdbcRules.JdbcFilterRule;
 import org.apache.calcite.adapter.jdbc.JdbcRules.JdbcProjectRule;
+import org.apache.calcite.adapter.jdbc.JdbcRules.JdbcSortRule;
 import org.apache.calcite.adapter.jdbc.JdbcToEnumerableConverterRule;
 import org.apache.calcite.linq4j.tree.ConstantUntypedNull;
+import org.apache.calcite.plan.Convention;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.sql.SqlDialect;
 import org.apache.drill.exec.planner.RuleInstance;
+import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.exec.store.enumerable.plan.VertexDrelConverterRule;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
 
 /**
@@ -43,7 +47,7 @@
    * Unwanted Calcite's JdbcRules are filtered out using this set
    */
   private static final Set<Class<? extends RelOptRule>> EXCLUDED_CALCITE_RULES = ImmutableSet.of(
-      JdbcToEnumerableConverterRule.class, JdbcFilterRule.class, JdbcProjectRule.class);
+      JdbcToEnumerableConverterRule.class, JdbcFilterRule.class, JdbcProjectRule.class, JdbcSortRule.class);
 
   private final ImmutableSet<RelOptRule> rules;
   private final JdbcStoragePlugin plugin;
@@ -57,9 +61,15 @@
     this.rules = ImmutableSet.<RelOptRule>builder()
         .addAll(calciteJdbcRules)
         .add(JdbcIntermediatePrelConverterRule.INSTANCE)
-        .add(new JdbcDrelConverterRule(this))
-        .add(new DrillJdbcRuleBase.DrillJdbcProjectRule(this))
-        .add(new DrillJdbcRuleBase.DrillJdbcFilterRule(this))
+        .add(new VertexDrelConverterRule(this))
+        .add(new DrillJdbcRuleBase.DrillJdbcProjectRule(Convention.NONE, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcProjectRule(DrillRel.DRILL_LOGICAL, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcFilterRule(Convention.NONE, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcFilterRule(DrillRel.DRILL_LOGICAL, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcSortRule(Convention.NONE, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcSortRule(DrillRel.DRILL_LOGICAL, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcLimitRule(Convention.NONE, this))
+        .add(new DrillJdbcRuleBase.DrillJdbcLimitRule(DrillRel.DRILL_LOGICAL, this))
         .add(RuleInstance.FILTER_SET_OP_TRANSPOSE_RULE)
         .add(RuleInstance.PROJECT_REMOVE_RULE)
         .build();
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
index c0b28bf..dca8a99 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
@@ -23,15 +23,17 @@
 
 import org.apache.calcite.adapter.jdbc.JdbcConvention;
 import org.apache.calcite.adapter.jdbc.JdbcRules;
-import org.apache.calcite.plan.Convention;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelTrait;
+import org.apache.calcite.rel.RelCollations;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.calcite.rel.core.Sort;
 import org.apache.calcite.rel.logical.LogicalFilter;
 import org.apache.calcite.rel.logical.LogicalProject;
 import org.apache.calcite.rex.RexNode;
 
+import org.apache.drill.exec.planner.common.DrillLimitRelBase;
 import org.apache.drill.shaded.guava.com.google.common.cache.CacheBuilder;
 import org.apache.drill.shaded.guava.com.google.common.cache.CacheLoader;
 import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
@@ -58,8 +60,8 @@
 
   static class DrillJdbcProjectRule extends DrillJdbcRuleBase {
 
-    DrillJdbcProjectRule(JdbcConvention out) {
-      super(LogicalProject.class, Convention.NONE, out, "DrillJdbcProjectRule");
+    DrillJdbcProjectRule(RelTrait in, JdbcConvention out) {
+      super(LogicalProject.class, in, out, "DrillJdbcProjectRule");
     }
 
     public RelNode convert(RelNode rel) {
@@ -89,8 +91,8 @@
 
   static class DrillJdbcFilterRule extends DrillJdbcRuleBase {
 
-    DrillJdbcFilterRule(JdbcConvention out) {
-      super(LogicalFilter.class, Convention.NONE, out, "DrillJdbcFilterRule");
+    DrillJdbcFilterRule(RelTrait in, JdbcConvention out) {
+      super(LogicalFilter.class, in, out, "DrillJdbcFilterRule");
     }
 
     public RelNode convert(RelNode rel) {
@@ -117,4 +119,36 @@
       }
     }
   }
+
+  static class DrillJdbcSortRule extends DrillJdbcRuleBase {
+
+    DrillJdbcSortRule(RelTrait in, JdbcConvention out) {
+      super(Sort.class, in, out, "DrillJdbcSortRule");
+    }
+
+    @Override
+    public RelNode convert(RelNode rel) {
+      Sort sort = (Sort) rel;
+
+      return new DrillJdbcSort(sort.getCluster(), sort.getTraitSet().replace(this.out),
+          convert(sort.getInput(), sort.getInput().getTraitSet().replace(this.out).simplify()),
+          sort.collation, sort.offset, sort.fetch);
+    }
+  }
+
+  static class DrillJdbcLimitRule extends DrillJdbcRuleBase {
+
+    DrillJdbcLimitRule(RelTrait in, JdbcConvention out) {
+      super(DrillLimitRelBase.class, in, out, "DrillJdbcLimitRule");
+    }
+
+    @Override
+    public RelNode convert(RelNode rel) {
+      DrillLimitRelBase limit = (DrillLimitRelBase) rel;
+
+      return new DrillJdbcSort(limit.getCluster(), limit.getTraitSet().plus(RelCollations.EMPTY).replace(this.out).simplify(),
+          convert(limit.getInput(), limit.getInput().getTraitSet().replace(this.out).simplify()),
+          RelCollations.EMPTY, limit.getOffset(), limit.getFetch());
+    }
+  }
 }
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcSort.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcSort.java
new file mode 100644
index 0000000..447f19d
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcSort.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import org.apache.calcite.adapter.jdbc.JdbcRules;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexNode;
+import org.apache.drill.exec.planner.cost.DrillCostBase;
+
+public class DrillJdbcSort extends JdbcRules.JdbcSort {
+  public DrillJdbcSort(RelOptCluster cluster, RelTraitSet traitSet, RelNode input, RelCollation collation, RexNode offset, RexNode fetch) {
+    super(cluster, traitSet, input, collation, offset, fetch);
+  }
+
+  @Override
+  public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) {
+    if (collation.getFieldCollations().isEmpty()) {
+      // The case when sort node represents a regular limit without actual sort.
+      // Drill separates limit from sort, and they have different formulas for cost calculation.
+      // The cost for the case of the JDBC limit operator should correspond to the cost of the Drill's one.
+      double numRows = mq.getRowCount(this);
+      double cpuCost = DrillCostBase.COMPARE_CPU_COST * numRows;
+      DrillCostBase.DrillCostFactory costFactory = (DrillCostBase.DrillCostFactory) planner.getCostFactory();
+      return costFactory.makeCost(numRows, cpuCost, 0, 0);
+    }
+    return super.computeSelfCost(planner, mq);
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java
index 1fc58b4..5147cfe 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java
@@ -24,13 +24,14 @@
 import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillRelFactories;
 import org.apache.drill.exec.planner.physical.Prel;
+import org.apache.drill.exec.store.enumerable.plan.VertexDrel;
 
 final class JdbcIntermediatePrelConverterRule extends ConverterRule {
 
   static final JdbcIntermediatePrelConverterRule INSTANCE = new JdbcIntermediatePrelConverterRule();
 
   private JdbcIntermediatePrelConverterRule() {
-    super(JdbcDrel.class, (Predicate<RelNode>) input -> true, DrillRel.DRILL_LOGICAL,
+    super(VertexDrel.class, (Predicate<RelNode>) input -> true, DrillRel.DRILL_LOGICAL,
         Prel.DRILL_PHYSICAL, DrillRelFactories.LOGICAL_BUILDER, "JDBC_PREL_Converter");
   }
 
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
index d30feb0..815e43f 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
@@ -27,10 +27,8 @@
 import org.apache.calcite.plan.ConventionTraitDef;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.plan.volcano.RelSubset;
 import org.apache.calcite.rel.AbstractRelNode;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.RelShuttleImpl;
 import org.apache.calcite.rel.RelWriter;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.sql.SqlDialect;
@@ -40,6 +38,7 @@
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.visitor.PrelVisitor;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
+import org.apache.drill.exec.store.SubsetRemover;
 
 /**
  * Represents a JDBC Plan once the children nodes have been rewritten into SQL.
@@ -61,7 +60,7 @@
         dialect,
         (JavaTypeFactory) getCluster().getTypeFactory());
     final JdbcImplementor.Result result =
-        jdbcImplementor.visitChild(0, input.accept(new SubsetRemover()));
+        jdbcImplementor.visitChild(0, input.accept(SubsetRemover.INSTANCE));
     sql = result.asStatement().toSqlString(dialect).getSql();
     rowType = input.getRowType();
   }
@@ -78,19 +77,6 @@
     return strippedSqlTextBldr.toString();
   }
 
-  private static class SubsetRemover extends RelShuttleImpl {
-
-    @Override
-    public RelNode visit(RelNode other) {
-      if (other instanceof RelSubset) {
-        return ((RelSubset) other).getBest().accept(this);
-      } else {
-        return super.visit(other);
-      }
-    }
-
-  }
-
   @Override
   public PhysicalOperator getPhysicalOperator(PhysicalPlanCreator creator) {
     List<SchemaPath> columns = new ArrayList<>();
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcSubScan.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcSubScan.java
old mode 100755
new mode 100644
index 9991f6f..def31fa
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcSubScan.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcSubScan.java
@@ -21,7 +21,6 @@
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.physical.base.AbstractSubScan;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
@@ -35,6 +34,8 @@
 @JsonTypeName("jdbc-sub-scan")
 public class JdbcSubScan extends AbstractSubScan {
 
+  public static final String OPERATOR_TYPE = "JDBC_SCAN";
+
   private final String sql;
   private final JdbcStoragePlugin plugin;
   private final List<SchemaPath> columns;
@@ -59,8 +60,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return UserBitShared.CoreOperatorType.JDBC_SCAN.getNumber();
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   public String getSql() {
diff --git a/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json b/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
index 35a5c00..8d04485 100644
--- a/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
+++ b/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
@@ -8,7 +8,7 @@
       "password": "xxx",
       "caseInsensitiveTableNames": false,
       "sourceParameters" : {
-        "maxIdle" : 8
+        "maximumPoolSize": 10
       },
       "enabled": false
     }
diff --git a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
index 73b8f1c..6b7ae90 100644
--- a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
+++ b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
@@ -324,11 +324,44 @@
 
   @Test
   public void testLimitPushDown() throws Exception {
-    String query = "select person_id from mysql.`drill_mysql_test`.person limit 10";
+    String query = "select person_id, first_name, last_name from mysql.`drill_mysql_test`.person limit 100";
     queryBuilder()
         .sql(query)
         .planMatcher()
-        .include("Jdbc\\(.*LIMIT 10")
+        .include("Jdbc\\(.*LIMIT 100")
+        .exclude("Limit\\(")
+        .match();
+  }
+
+  @Test
+  public void testLimitPushDownWithOrderBy() throws Exception {
+    String query = "select person_id from mysql.`drill_mysql_test`.person order by first_name limit 100";
+    queryBuilder()
+        .sql(query)
+        .planMatcher()
+        .include("Jdbc\\(.*ORDER BY `first_name`.*LIMIT 100")
+        .exclude("Limit\\(")
+        .match();
+  }
+
+  @Test
+  public void testLimitPushDownWithOffset() throws Exception {
+    String query = "select person_id, first_name from mysql.`drill_mysql_test`.person limit 100 offset 10";
+    queryBuilder()
+        .sql(query)
+        .planMatcher()
+        .include("Jdbc\\(.*LIMIT 100 OFFSET 10")
+        .exclude("Limit\\(")
+        .match();
+  }
+
+  @Test
+  public void testLimitPushDownWithConvertFromJson() throws Exception {
+    String query = "select convert_fromJSON(first_name)['ppid'] from mysql.`drill_mysql_test`.person LIMIT 100";
+    queryBuilder()
+        .sql(query)
+        .planMatcher()
+        .include("Jdbc\\(.*LIMIT 100")
         .exclude("Limit\\(")
         .match();
   }
diff --git a/contrib/storage-kafka/pom.xml b/contrib/storage-kafka/pom.xml
index 02afb36..6b9c059 100644
--- a/contrib/storage-kafka/pom.xml
+++ b/contrib/storage-kafka/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-storage-kafka</artifactId>
-  <name>contrib/kafka-storage-plugin</name>
+  <name>Drill : Contrib : Storage : Kafka</name>
 
   <properties>
     <kafka.version>2.3.1</kafka.version>
diff --git a/contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaSubScan.java b/contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaSubScan.java
index 9341aa6..c55f1e7 100644
--- a/contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaSubScan.java
+++ b/contrib/storage-kafka/src/main/java/org/apache/drill/exec/store/kafka/KafkaSubScan.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
@@ -41,6 +40,8 @@
 @JsonTypeName("kafka-partition-scan")
 public class KafkaSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "KAFKA_SUB_SCAN";
+
   private final KafkaStoragePlugin kafkaStoragePlugin;
   private final List<SchemaPath> columns;
   private final List<KafkaPartitionScanSpec> partitionSubScanSpecList;
@@ -105,7 +106,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.KAFKA_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/contrib/storage-kudu/pom.xml b/contrib/storage-kudu/pom.xml
index 0f61f2b..cef8989 100644
--- a/contrib/storage-kudu/pom.xml
+++ b/contrib/storage-kudu/pom.xml
@@ -27,7 +27,7 @@
   </parent>
 
   <artifactId>drill-kudu-storage</artifactId>
-  <name>contrib/kudu-storage-plugin</name>
+  <name>Drill : Contrib : Storage : Kudu</name>
 
 
   <dependencies>
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSubScan.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSubScan.java
index 22e13a6..a4f99bc 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSubScan.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSubScan.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
@@ -44,6 +43,8 @@
 @JsonTypeName("kudu-sub-scan")
 public class KuduSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "KUDU_SUB_SCAN";
+
   private final KuduStoragePlugin kuduStoragePlugin;
   private final List<KuduSubScanSpec> tabletScanSpecList;
   private final List<SchemaPath> columns;
@@ -127,7 +128,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.KUDU_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduWriter.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduWriter.java
index 95f286e..c456058 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduWriter.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduWriter.java
@@ -23,7 +23,6 @@
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.physical.base.AbstractWriter;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
@@ -33,6 +32,8 @@
 
 public class KuduWriter extends AbstractWriter {
 
+  public static final String OPERATOR_TYPE = "KUDU_WRITER";
+
   private final KuduStoragePlugin plugin;
   private final String name;
 
@@ -55,8 +56,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.KUDU_WRITER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/contrib/storage-mongo/pom.xml b/contrib/storage-mongo/pom.xml
index 343e92b..c1f2613 100644
--- a/contrib/storage-mongo/pom.xml
+++ b/contrib/storage-mongo/pom.xml
@@ -28,7 +28,7 @@
 
   <artifactId>drill-mongo-storage</artifactId>
 
-  <name>contrib/mongo-storage-plugin</name>
+  <name>Drill : Contrib : Storage : MongoDB</name>
 
   <properties>
      <mongo.TestSuite>**/MongoTestSuite.class</mongo.TestSuite>
@@ -45,7 +45,7 @@
   <dependency>
     <groupId>org.mongodb</groupId>
     <artifactId>mongo-java-driver</artifactId>
-    <version>3.8.0</version>
+    <version>3.12.7</version>
   </dependency>
 
     <!-- Test dependencie -->
diff --git a/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/MongoSubScan.java b/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/MongoSubScan.java
index c85dc61..551886f 100644
--- a/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/MongoSubScan.java
+++ b/contrib/storage-mongo/src/main/java/org/apache/drill/exec/store/mongo/MongoSubScan.java
@@ -30,11 +30,8 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.bson.Document;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -45,7 +42,8 @@
 
 @JsonTypeName("mongo-shard-read")
 public class MongoSubScan extends AbstractBase implements SubScan {
-  static final Logger logger = LoggerFactory.getLogger(MongoSubScan.class);
+
+  public static final String OPERATOR_TYPE = "MONGO_SUB_SCAN";
 
   @JsonProperty
   private final MongoStoragePluginConfig mongoPluginConfig;
@@ -114,8 +112,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.MONGO_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/contrib/storage-opentsdb/pom.xml b/contrib/storage-opentsdb/pom.xml
index 3692526..9469396 100644
--- a/contrib/storage-opentsdb/pom.xml
+++ b/contrib/storage-opentsdb/pom.xml
@@ -28,7 +28,7 @@
 
     <artifactId>drill-opentsdb-storage</artifactId>
 
-    <name>contrib/opentsdb-storage-plugin</name>
+    <name>Drill : Contrib : Storage : OpenTSDB</name>
 
     <properties>
         <retrofit.version>2.8.1</retrofit.version>
diff --git a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBSubScan.java b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBSubScan.java
index 01e418b..5544942 100644
--- a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBSubScan.java
+++ b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBSubScan.java
@@ -29,7 +29,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import java.util.Collections;
@@ -40,6 +39,8 @@
 @JsonTypeName("openTSDB-sub-scan")
 public class OpenTSDBSubScan extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "OPEN_TSDB_SUB_SCAN";
+
   public final OpenTSDBStoragePluginConfig storage;
 
   private final List<SchemaPath> columns;
@@ -68,8 +69,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.OPEN_TSDB_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/contrib/udfs/pom.xml b/contrib/udfs/pom.xml
index f41d35b..c170fb9 100644
--- a/contrib/udfs/pom.xml
+++ b/contrib/udfs/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-udfs</artifactId>
-  <name>contrib/drill-udfs</name>
+  <name>Drill : Contrib : UDFs</name>
 
   <dependencies>
     <dependency>
@@ -63,10 +63,11 @@
       <artifactId>proj4j</artifactId>
       <version>0.1.0</version>
     </dependency>
+
     <dependency>
       <groupId>nl.basjes.parse.useragent</groupId>
       <artifactId>yauaa</artifactId>
-      <version>5.16</version>
+      <version>${yauaa.version}</version>
     </dependency>
 
     <!-- Test dependencies -->
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
new file mode 100644
index 0000000..44b1d45
--- /dev/null
+++ b/dev-support/docker/Dockerfile
@@ -0,0 +1,155 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Dockerfile for installing the necessary dependencies for building Drill.
+# See BUILDING.txt.
+
+FROM ubuntu:20.04
+
+ARG DEBIAN_FRONTEND=noninteractive
+
+WORKDIR /root
+
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#####
+# Disable suggests/recommends
+#####
+RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras
+RUN echo APT::Install-Suggests "0"\; >>  /etc/apt/apt.conf.d/10disableextras
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV DEBCONF_TERSE true
+
+
+# hadolint ignore=DL3008
+RUN apt -q update \
+    && apt install -y software-properties-common apt-utils apt-transport-https \
+    # Repo for different Python versions
+    && add-apt-repository -y ppa:deadsnakes/ppa \
+    && apt-get -q install -y --no-install-recommends \
+        ant \
+        bats \
+        bash-completion \
+        build-essential \
+        bzip2 \
+        ca-certificates \
+        clang \
+        cmake \
+        curl \
+        docker.io \
+        doxygen \
+        findbugs \
+        fuse \
+        g++ \
+        gcc \
+        git \
+        gnupg-agent \
+        libaio1 \
+        libbcprov-java \
+        libbz2-dev \
+        libcurl4-openssl-dev \
+        libfuse-dev \
+        libnuma-dev \
+        libncurses5 \
+        libprotobuf-dev \
+        libprotoc-dev \
+        libsasl2-dev \
+        libsnappy-dev \
+        libssl-dev \
+        libtool \
+        libzstd-dev \
+        locales \
+        make \
+        maven \
+#        openjdk-11-jdk \
+        openjdk-8-jdk \
+        pinentry-curses \
+        pkg-config \
+        python \
+        python2.7 \
+#        python-pip \
+        python-pkg-resources \
+        python-setuptools \
+#        python-wheel \
+        python3-setuptools \
+        python3-pip \
+        python3.5 \
+        python3.6 \
+        python3.7 \
+        python2.7 \
+        virtualenv \
+        tox \
+        rsync \
+        shellcheck \
+        software-properties-common \
+        sudo \
+        valgrind \
+        vim \
+        wget \
+        zlib1g-dev \
+    && apt-get clean \
+    && rm -rf /var/lib/apt/lists/*
+
+###
+# Install grpcio-tools mypy-protobuf for `python3 sdks/python/setup.py sdist` to work
+###
+RUN pip3 install grpcio-tools mypy-protobuf
+
+###
+# Install Go
+###
+RUN mkdir -p /goroot \
+    && curl https://dl.google.com/go/go1.15.2.linux-amd64.tar.gz | tar xvzf - -C /goroot --strip-components=1 \
+    && chmod a+rwX -R /goroot
+
+# Set environment variables for Go
+ENV GOROOT /goroot
+ENV GOPATH /gopath
+ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
+CMD go get github.com/linkedin/goavro
+
+###
+# Miscelaneous fixes...
+###
+# Turns out some build tools use 'time' and in this docker image this is no longer
+# an executable but ONLY an internal command of bash
+COPY time.sh /usr/bin/time
+RUN chmod 755 /usr/bin/time
+
+# Force the complete use of Java 8
+RUN apt remove -y openjdk-11-jre openjdk-11-jre-headless
+ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
+
+
+###
+# Avoid out of memory errors in builds
+###
+ENV MAVEN_OPTS -Xmx4g -XX:MaxPermSize=512m
+
+###
+# Add a welcome message and environment checks.
+###
+RUN mkdir /scripts
+COPY drill_env_checks.sh /scripts/drill_env_checks.sh
+COPY bashcolors.sh      /scripts/bashcolors.sh
+RUN chmod 755 /scripts /scripts/drill_env_checks.sh /scripts/bashcolors.sh
+
+# hadolint ignore=SC2016
+RUN echo '. /etc/bash_completion'        >> /root/.bash_aliases
+RUN echo '. /scripts/drill_env_checks.sh' >> /root/.bash_aliases
diff --git a/dev-support/docker/bashcolors.sh b/dev-support/docker/bashcolors.sh
new file mode 100755
index 0000000..0f0e05c
--- /dev/null
+++ b/dev-support/docker/bashcolors.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Based upon info on https://wiki.archlinux.org/index.php/Color_Bash_Prompt
+
+# Reset
+export Color_Off='\e[0m'      # Text Reset
+
+# Regular Colors
+export Black='\e[0;30m'       # Black
+export Red='\e[0;31m'         # Red
+export Green='\e[0;32m'       # Green
+export Yellow='\e[0;33m'      # Yellow
+export Blue='\e[0;34m'        # Blue
+export Purple='\e[0;35m'      # Purple
+export Cyan='\e[0;36m'        # Cyan
+export White='\e[0;37m'       # White
+
+# Bold
+export BBlack='\e[1;30m'      # Black
+export BRed='\e[1;31m'        # Red
+export BGreen='\e[1;32m'      # Green
+export BYellow='\e[1;33m'     # Yellow
+export BBlue='\e[1;34m'       # Blue
+export BPurple='\e[1;35m'     # Purple
+export BCyan='\e[1;36m'       # Cyan
+export BWhite='\e[1;37m'      # White
+
+# Underline
+export UBlack='\e[4;30m'      # Black
+export URed='\e[4;31m'        # Red
+export UGreen='\e[4;32m'      # Green
+export UYellow='\e[4;33m'     # Yellow
+export UBlue='\e[4;34m'       # Blue
+export UPurple='\e[4;35m'     # Purple
+export UCyan='\e[4;36m'       # Cyan
+export UWhite='\e[4;37m'      # White
+
+# Background
+export On_Black='\e[40m'      # Black
+export On_Red='\e[41m'        # Red
+export On_Green='\e[42m'      # Green
+export On_Yellow='\e[43m'     # Yellow
+export On_Blue='\e[44m'       # Blue
+export On_Purple='\e[45m'     # Purple
+export On_Cyan='\e[46m'       # Cyan
+export On_White='\e[47m'      # White
+
+# High Intensity
+export IBlack='\e[0;90m'      # Black
+export IRed='\e[0;91m'        # Red
+export IGreen='\e[0;92m'      # Green
+export IYellow='\e[0;93m'     # Yellow
+export IBlue='\e[0;94m'       # Blue
+export IPurple='\e[0;95m'     # Purple
+export ICyan='\e[0;96m'       # Cyan
+export IWhite='\e[0;97m'      # White
+
+# Bold High Intensity
+export BIBlack='\e[1;90m'     # Black
+export BIRed='\e[1;91m'       # Red
+export BIGreen='\e[1;92m'     # Green
+export BIYellow='\e[1;93m'    # Yellow
+export BIBlue='\e[1;94m'      # Blue
+export BIPurple='\e[1;95m'    # Purple
+export BICyan='\e[1;96m'      # Cyan
+export BIWhite='\e[1;97m'     # White
+
+# High Intensity backgrounds
+export On_IBlack='\e[0;100m'  # Black
+export On_IRed='\e[0;101m'    # Red
+export On_IGreen='\e[0;102m'  # Green
+export On_IYellow='\e[0;103m' # Yellow
+export On_IBlue='\e[0;104m'   # Blue
+export On_IPurple='\e[0;105m' # Purple
+export On_ICyan='\e[0;106m'   # Cyan
+export On_IWhite='\e[0;107m'  # White
diff --git a/dev-support/docker/drill_env_checks.sh b/dev-support/docker/drill_env_checks.sh
new file mode 100755
index 0000000..b92182f
--- /dev/null
+++ b/dev-support/docker/drill_env_checks.sh
@@ -0,0 +1,121 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# SHELLDOC-IGNORE
+
+# -------------------------------------------------------
+function showWelcome {
+# http://patorjk.com/software/taag/#p=display&f=Doom&t=Drill%20Build%20Env.
+cat << "Welcome-message"
+
+______      _ _ _  ______       _ _     _   _____
+|  _  \    (_) | | | ___ \     (_) |   | | |  ___|
+| | | |_ __ _| | | | |_/ /_   _ _| | __| | | |__ _ ____   __
+| | | | '__| | | | | ___ \ | | | | |/ _` | |  __| '_ \ \ / /
+| |/ /| |  | | | | | |_/ / |_| | | | (_| | | |__| | | \ V /
+|___/ |_|  |_|_|_| \____/ \__,_|_|_|\__,_| \____/_| |_|\_(_)
+
+This is the standard Drill Developer build environment.
+This has all the right tools installed required to build
+Apache Drill from source.
+
+Welcome-message
+}
+
+# -------------------------------------------------------
+
+function showAbort {
+  cat << "Abort-message"
+
+  ___  _                _   _
+ / _ \| |              | | (_)
+/ /_\ \ |__   ___  _ __| |_ _ _ __   __ _
+|  _  | '_ \ / _ \| '__| __| | '_ \ / _\` |
+| | | | |_) | (_) | |  | |_| | | | | (_| |
+\_| |_/_.__/ \___/|_|   \__|_|_| |_|\__, |
+                                     __/ |
+                                    |___/
+
+Abort-message
+}
+
+# -------------------------------------------------------
+
+function failIfUserIsRoot {
+    if [ "$(id -u)" -eq "0" ]; # If you are root then something went wrong.
+    then
+        cat <<End-of-message
+
+Apparently you are inside this docker container as the user root.
+Putting it simply:
+
+   This should not occur.
+
+Known possible causes of this are:
+1) Running this script as the root user ( Just don't )
+2) Running an old docker version ( upgrade to 1.4.1 or higher )
+
+End-of-message
+
+    showAbort
+
+    logout
+
+    fi
+}
+
+# -------------------------------------------------------
+
+function warnIfLowMemory {
+    MINIMAL_MEMORY=2046755
+    INSTALLED_MEMORY=$(grep -F MemTotal /proc/meminfo | awk '{print $2}')
+    if [[ $((INSTALLED_MEMORY)) -lt $((MINIMAL_MEMORY)) ]]; then
+        cat <<End-of-message
+
+ _                    ___  ___
+| |                   |  \\/  |
+| |     _____      __ | .  . | ___ _ __ ___   ___  _ __ _   _
+| |    / _ \\ \\ /\\ / / | |\\/| |/ _ \\ '_ \` _ \\ / _ \\| '__| | | |
+| |___| (_) \\ V  V /  | |  | |  __/ | | | | | (_) | |  | |_| |
+\\_____/\\___/ \\_/\\_/   \\_|  |_/\\___|_| |_| |_|\\___/|_|   \\__, |
+                                                         __/ |
+                                                        |___/
+
+Your system is running on very little memory.
+This means it may work but it wil most likely be slower than needed.
+
+If you are running this via boot2docker you can simply increase
+the available memory to at least ${MINIMAL_MEMORY}KiB
+(you have ${INSTALLED_MEMORY}KiB )
+
+End-of-message
+    fi
+}
+
+# -------------------------------------------------------
+
+showWelcome
+warnIfLowMemory
+failIfUserIsRoot
+
+# -------------------------------------------------------
+
+. "/scripts/bashcolors.sh"
+. "/usr/lib/git-core/git-sh-prompt"
+export PS1='\['${IBlue}${On_Black}'\] \u@\['${IWhite}${On_Red}'\][Drill Build Env.]\['${IBlue}${On_Black}'\]:\['${Cyan}${On_Black}'\]\w$(declare -F __git_ps1 &>/dev/null && __git_ps1 " \['${BIPurple}'\]{\['${BIGreen}'\]%s\['${BIPurple}'\]}")\['${BIBlue}'\] ]\['${Color_Off}'\]\n$ '
diff --git a/dev-support/docker/time.sh b/dev-support/docker/time.sh
new file mode 100644
index 0000000..94af172
--- /dev/null
+++ b/dev-support/docker/time.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+time "$@"
diff --git a/dev-support/formatter/eclipse_formatter_apache.xml b/dev-support/formatter/eclipse_formatter_apache.xml
new file mode 100644
index 0000000..9b73034
--- /dev/null
+++ b/dev-support/formatter/eclipse_formatter_apache.xml
@@ -0,0 +1,288 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<profiles version="11">
+<profile kind="CodeFormatterProfile" name="Apache" version="11">
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.source" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.lineSplit" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.indentation.size" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.assertIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="space"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.enumIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.compiler.compliance" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode" value="enabled"/>
+<setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.targetPlatform" value="1.5"/>
+<setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
+</profile>
+</profiles>
diff --git a/dev-support/formatter/intellij-idea-settings.jar b/dev-support/formatter/intellij-idea-settings.jar
new file mode 100644
index 0000000..8ce13ca
--- /dev/null
+++ b/dev-support/formatter/intellij-idea-settings.jar
Binary files differ
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 6a1b29f..6adce9d 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -28,7 +28,7 @@
 
   <artifactId>distribution</artifactId>
   <packaging>pom</packaging>
-  <name>Packaging and Distribution Assembly</name>
+  <name>Drill : Packaging and Distribution Assembly</name>
 
   <properties>
     <aws.java.sdk.version>1.11.375</aws.java.sdk.version>
@@ -334,6 +334,11 @@
         </dependency>
         <dependency>
           <groupId>org.apache.drill.contrib</groupId>
+          <artifactId>drill-storage-elasticsearch</artifactId>
+          <version>${project.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.drill.contrib</groupId>
           <artifactId>drill-udfs</artifactId>
           <version>${project.version}</version>
         </dependency>
@@ -344,6 +349,11 @@
         </dependency>
         <dependency>
           <groupId>org.apache.drill.contrib</groupId>
+          <artifactId>drill-format-httpd</artifactId>
+          <version>${project.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.drill.contrib</groupId>
           <artifactId>drill-format-hdf5</artifactId>
           <version>${project.version}</version>
         </dependency>
@@ -359,6 +369,16 @@
         </dependency>
         <dependency>
           <groupId>org.apache.drill.contrib</groupId>
+          <artifactId>drill-format-xml</artifactId>
+          <version>${project.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.drill.contrib</groupId>
+          <artifactId>drill-format-image</artifactId>
+          <version>${project.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.drill.contrib</groupId>
           <artifactId>drill-format-esri</artifactId>
           <version>${project.version}</version>
         </dependency>
@@ -754,4 +774,4 @@
     </profile>
   </profiles>
 
-</project>
\ No newline at end of file
+</project>
diff --git a/distribution/src/assemble/component.xml b/distribution/src/assemble/component.xml
index b9a2fce..334ff4e 100644
--- a/distribution/src/assemble/component.xml
+++ b/distribution/src/assemble/component.xml
@@ -44,13 +44,17 @@
         <include>org.apache.drill.contrib:drill-format-mapr:jar</include>
         <include>org.apache.drill.contrib:drill-format-syslog:jar</include>
         <include>org.apache.drill.contrib:drill-format-esri:jar</include>
+        <include>org.apache.drill.contrib:drill-format-xml:jar</include>
+        <include>org.apache.drill.contrib:drill-format-image:jar</include>
         <include>org.apache.drill.contrib:drill-format-hdf5:jar</include>
         <include>org.apache.drill.contrib:drill-format-ltsv:jar</include>
+        <include>org.apache.drill.contrib:drill-format-httpd:jar</include>
         <include>org.apache.drill.contrib:drill-format-excel:jar</include>
         <include>org.apache.drill.contrib:drill-format-spss:jar</include>
         <include>org.apache.drill.contrib:drill-jdbc-storage:jar</include>
         <include>org.apache.drill.contrib:drill-kudu-storage:jar</include>
         <include>org.apache.drill.contrib:drill-storage-kafka:jar</include>
+        <include>org.apache.drill.contrib:drill-storage-elasticsearch:jar</include>
         <include>org.apache.drill.contrib:drill-storage-http:jar</include>
         <include>org.apache.drill.contrib:drill-opentsdb-storage:jar</include>
         <include>org.apache.drill.contrib:drill-udfs:jar</include>
diff --git a/docs/dev/DevDocs.md b/docs/dev/DevDocs.md
index 9b6ec09..e6e8420 100644
--- a/docs/dev/DevDocs.md
+++ b/docs/dev/DevDocs.md
@@ -15,3 +15,7 @@
 ## Javadocs
 
 For more info about generating and using javadocs see [Javadocs.md](Javadocs.md)
+
+## Building with Maven
+
+For more info about the use of maven see [Maven.md](Maven.md)
diff --git a/docs/dev/Environment.md b/docs/dev/Environment.md
index 47d37c1..fb594d8 100644
--- a/docs/dev/Environment.md
+++ b/docs/dev/Environment.md
@@ -6,6 +6,14 @@
   * Java 8
   * Maven 3.6.3 or greater
 
+## Docker based build environment
+
+The `start-build-env.sh` script in the root of the project source builds and starts a preconfigured environment
+that contains all the tools needed to build Apache Drill from source.
+
+This is known to work on Ubuntu 20.04 with Docker installed.
+On other systems your success may vary. On Redhat/CentOS based systems no longer have Docker.
+
 ## Confirm settings
     # java -version
     java version "1.8.0_161"
@@ -15,6 +23,11 @@
     # mvn --version
     Apache Maven 3.6.3
 
+## Formatter Configuration
+
+Setting up IDE formatters is recommended and can be done by importing the following settings into your browser.
+[Formatter File](../../dev-support/formatter)
+
 ## Checkout
 
     git clone https://github.com/apache/drill.git
@@ -24,6 +37,14 @@
     cd drill
     mvn clean install -DskipTests
 
+## Build Quickly
+This command works to build Drill in about 2 minutes for quick testing. 
+
+    mvn install -T 4 -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Drat.skip=true -Dlicense.skip=true -Dcheckstyle.skip=true -Dfindbugs.skip=true -Dmaven.site.skip=true -Denforcer.skip=true -DskipIfEmpty=true -Dmaven.compiler.optimize=true
+
+## Generate Dependency Report
+    mvn clean site
+
 ## Explode tarball in installation directory
    
     mkdir /opt/drill
diff --git a/docs/dev/Maven.md b/docs/dev/Maven.md
new file mode 100644
index 0000000..cf829f2
--- /dev/null
+++ b/docs/dev/Maven.md
@@ -0,0 +1,30 @@
+# Maven
+
+## Introduction
+
+The Drill project uses Apache maven as the build tool.
+The project has been split up into a number of separate maven modules to isolate the various components and plugins.
+
+## Naming new modules.
+
+### The artifact name
+The artifactId is the name that is used as the name of the output of the build.
+
+In general the new name of an artifact should follow the pattern of `drill-<what>-<kind>`
+So a `format` plugin for `something` should become `drill-something-storage`
+
+If a module is really just a combination of other modules then let the name end with `-parent` and the module must be `<packaging>pom</packaging>`.
+
+### The logging name
+To ensure the build remains readable for the developers building the system we want to have the module show a name
+that is easy to read and easy to understand which part of the project is being built.
+
+When creating a new module please make sure the new module has a name that follows the pattern shown in the other modules.
+
+Some basic patterns of those names:
+- All start with `Drill : `
+- Various parts are separated by ` : `
+- A `pom` module name ends with ` : `
+- A `jar` module name ends with the name of that module.
+
+Please make sure the names are concise, unique and easy to read.
diff --git a/drill-shaded/pom.xml b/drill-shaded/pom.xml
index b3bb3f8..e71abf4 100644
--- a/drill-shaded/pom.xml
+++ b/drill-shaded/pom.xml
@@ -32,7 +32,7 @@
   <artifactId>drill-shaded</artifactId>
   <version>1.0</version>
 
-  <name>drill-shaded</name>
+  <name>Drill : Shaded</name>
   <packaging>pom</packaging>
 
   <build>
diff --git a/drill-yarn/pom.xml b/drill-yarn/pom.xml
index fd4b564..8334c41 100644
--- a/drill-yarn/pom.xml
+++ b/drill-yarn/pom.xml
@@ -28,7 +28,7 @@
 
   <artifactId>drill-yarn</artifactId>
   <packaging>jar</packaging>
-  <name>Drill-on-YARN</name>
+  <name>Drill : On-YARN</name>
 
   <build>
     <plugins>
diff --git a/exec/java-exec/pom.xml b/exec/java-exec/pom.xml
index 8417906..9535721 100644
--- a/exec/java-exec/pom.xml
+++ b/exec/java-exec/pom.xml
@@ -26,7 +26,7 @@
     <version>1.19.0-SNAPSHOT</version>
   </parent>
   <artifactId>drill-java-exec</artifactId>
-  <name>exec/Java Execution Engine</name>
+  <name>Drill : Exec : Java Execution Engine</name>
 
   <properties>
     <libpam4j.version>1.8-rev2</libpam4j.version>
@@ -199,7 +199,7 @@
     <dependency>
       <groupId>org.mongodb</groupId>
       <artifactId>mongo-java-driver</artifactId>
-      <version>3.8.0</version>
+      <version>3.12.7</version>
     </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.module</groupId>
@@ -529,7 +529,7 @@
     <dependency>
       <groupId>nl.basjes.parse.httpdlog</groupId>
       <artifactId>httpdlog-parser</artifactId>
-      <version>5.3</version>
+      <version>${httpdlog-parser.version}</version>
       <exclusions>
         <exclusion>
           <groupId>commons-codec</groupId>
@@ -606,10 +606,6 @@
       </exclusions>
     </dependency>
     <dependency>
-      <groupId>com.drewnoakes</groupId>
-      <artifactId>metadata-extractor</artifactId>
-    </dependency>
-    <dependency>
       <groupId>fr.bmartel</groupId>
       <artifactId>pcapngdecoder</artifactId>
       <version>1.2</version>
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassBuilder.java
index 2c580da..fc78f51 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassBuilder.java
@@ -23,7 +23,6 @@
 import java.util.Map;
 
 import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.util.DrillStringUtils;
 import org.apache.drill.exec.compile.ClassTransformer.ClassNames;
 import org.apache.drill.exec.exception.ClassTransformationException;
 import org.apache.drill.exec.expr.CodeGenerator;
@@ -144,21 +143,22 @@
       saveCode(code, name);
     }
 
+    Class<?> compiledClass = getCompiledClass(code, className, config, options);
+    logger.debug("Compiled {}: time = {} ms.",
+        className,
+        (System.nanoTime() - t1 + 500_000) / 1_000_000);
+    return compiledClass;
+  }
+
+  public static Class<?> getCompiledClass(String code, String className,
+      DrillConfig config, OptionSet options) throws CompileException, ClassNotFoundException, ClassTransformationException, IOException {
     // Compile the code and load it into a class loader.
     CachedClassLoader classLoader = new CachedClassLoader();
     ClassCompilerSelector compilerSelector = new ClassCompilerSelector(classLoader, config, options);
+    ClassNames name = new ClassNames(className);
     Map<String,byte[]> results = compilerSelector.compile(name, code);
     classLoader.addClasses(results);
 
-    long totalBytecodeSize = 0;
-    for (byte[] clazz : results.values()) {
-      totalBytecodeSize += clazz.length;
-    }
-    logger.debug("Compiled {}: bytecode size = {}, time = {} ms.",
-                 cg.getClassName(),
-                  DrillStringUtils.readable(totalBytecodeSize),
-                  (System.nanoTime() - t1 + 500_000) / 1_000_000);
-
     // Get the class from the class loader.
     try {
       return classLoader.findClass(className);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/StringFunctions.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/StringFunctions.java
index b9c53be..7b2aaba 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/StringFunctions.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/StringFunctions.java
@@ -387,63 +387,48 @@
   @FunctionTemplate(name = "split_part", scope = FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL,
                     outputWidthCalculatorType = OutputWidthCalculatorType.CUSTOM_FIXED_WIDTH_DEFAULT)
   public static class SplitPart implements DrillSimpleFunc {
-    @Param  VarCharHolder str;
-    @Param  VarCharHolder splitter;
-    @Param  IntHolder index;
+    @Param
+    VarCharHolder in;
+    @Param
+    VarCharHolder delimiter;
 
-    @Output VarCharHolder out;
+    @Param
+    IntHolder index;
+
+    @Workspace
+    com.google.common.base.Splitter splitter;
+
+    @Inject
+    DrillBuf buffer;
+
+    @Output
+    VarCharHolder out;
 
     @Override
-    public void setup() {}
-
-    @Override
-    public void eval() {
+    public void setup() {
       if (index.value < 1) {
         throw org.apache.drill.common.exceptions.UserException.functionError()
             .message("Index in split_part must be positive, value provided was " + index.value).build();
       }
-      int bufPos = str.start;
-      out.start = bufPos;
-      boolean beyondLastIndex = false;
-      int splitterLen = (splitter.end - splitter.start);
-      for (int i = 1; i < index.value + 1; i++) {
-        //Do string match.
-        final int pos = org.apache.drill.exec.expr.fn.impl.StringFunctionUtil.stringLeftMatchUTF8(str.buffer,
-            bufPos, str.end,
-            splitter.buffer, splitter.start, splitter.end);
-        if (pos < 0) {
-          // this is the last iteration, it is okay to hit the end of the string
-          if (i == index.value) {
-            bufPos = str.end;
-            // when the output is terminated by the end of the string we do not want
-            // to subtract the length of the splitter from the output at the end of
-            // the function below
-            splitterLen = 0;
-            break;
-          } else {
-            beyondLastIndex = true;
-            break;
-          }
-        } else {
-          // Count the # of characters. (one char could have 1-4 bytes)
-          // unlike the position function don't add 1, we are not translating the positions into SQL user level 1 based indices
-          bufPos = org.apache.drill.exec.expr.fn.impl.StringFunctionUtil.getUTF8CharLength(str.buffer, str.start, pos)
-              + splitterLen;
-          // if this is the second to last iteration, store the position again, as the start and end of the
-          // string to be returned need to be available
-          if (i == index.value - 1) {
-            out.start = bufPos;
-          }
-        }
-      }
-      if (beyondLastIndex) {
-        out.start = 0;
-        out.end = 0;
-        out.buffer = str.buffer;
-      } else {
-        out.buffer = str.buffer;
-        out.end = bufPos - splitterLen;
-      }
+      String split = org.apache.drill.exec.expr.fn.impl.StringFunctionHelpers.
+              toStringFromUTF8(delimiter.start, delimiter.end, delimiter.buffer);
+      splitter = com.google.common.base.Splitter.on(split);
+
+    }
+
+    @Override
+    public void eval() {
+      String inputString =
+              org.apache.drill.exec.expr.fn.impl.StringFunctionHelpers.toStringFromUTF8(in.start, in.end, in.buffer);
+      int arrayIndex = index.value - 1;
+      String result =
+              (String) com.google.common.collect.Iterables.get(splitter.split(inputString), arrayIndex, "");
+      byte[] strBytes = result.getBytes(com.google.common.base.Charsets.UTF_8);
+
+      out.buffer = buffer = buffer.reallocIfNeeded(strBytes.length);
+      out.start = 0;
+      out.end = strBytes.length;
+      out.buffer.setBytes(0, strBytes);
     }
 
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentStats.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentStats.java
index 2129411..5ac0cae 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentStats.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentStats.java
@@ -32,7 +32,7 @@
 public class FragmentStats {
 //  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FragmentStats.class);
 
-  private Map<ImmutablePair<Integer, Integer>, OperatorStats> operators = new LinkedHashMap<>();
+  private final Map<ImmutablePair<Integer, String>, OperatorStats> operators = new LinkedHashMap<>();
   private final long startTime;
   private final DrillbitEndpoint endpoint;
   private final BufferAllocator allocator;
@@ -48,7 +48,7 @@
     prfB.setMaxMemoryUsed(allocator.getPeakMemoryAllocation());
     prfB.setEndTime(System.currentTimeMillis());
     prfB.setEndpoint(endpoint);
-    for(Entry<ImmutablePair<Integer, Integer>, OperatorStats> o : operators.entrySet()){
+    for(Entry<ImmutablePair<Integer, String>, OperatorStats> o : operators.entrySet()){
       prfB.addOperatorProfile(o.getValue().getProfile());
     }
   }
@@ -62,15 +62,14 @@
    */
   public OperatorStats newOperatorStats(final OpProfileDef profileDef, final BufferAllocator allocator) {
     final OperatorStats stats = new OperatorStats(profileDef, allocator);
-    if(profileDef.operatorType != -1) {
-      @SuppressWarnings("unused")
-      OperatorStats existingStatsHolder = addOperatorStats(stats);
+    if (profileDef.operatorType != null) {
+      addOperatorStats(stats);
     }
     return stats;
   }
 
-  public OperatorStats addOperatorStats(OperatorStats stats) {
-    return operators.put(new ImmutablePair<>(stats.operatorId, stats.operatorType), stats);
+  public void addOperatorStats(OperatorStats stats) {
+    operators.put(new ImmutablePair<>(stats.operatorId, stats.operatorType), stats);
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OpProfileDef.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OpProfileDef.java
index 8768eb3..e7b4f6c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OpProfileDef.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OpProfileDef.java
@@ -20,10 +20,10 @@
 public class OpProfileDef {
 
   public int operatorId;
-  public int operatorType;
+  public String operatorType;
   public int incomingCount;
 
-  public OpProfileDef(int operatorId, int operatorType, int incomingCount) {
+  public OpProfileDef(int operatorId, String operatorType, int incomingCount) {
     this.operatorId = operatorId;
     this.operatorType = operatorType;
     this.incomingCount = incomingCount;
@@ -32,7 +32,7 @@
     return operatorId;
   }
 
-  public int getOperatorType(){
+  public String getOperatorType(){
     return operatorType;
   }
   public int getIncomingCount(){
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorMetricRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorMetricRegistry.java
index da59068..c64a726 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorMetricRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorMetricRegistry.java
@@ -17,6 +17,21 @@
  */
 package org.apache.drill.exec.ops;
 
+import org.apache.drill.exec.physical.config.BroadcastSender;
+import org.apache.drill.exec.physical.config.ExternalSort;
+import org.apache.drill.exec.physical.config.FlattenPOP;
+import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.config.HashJoinPOP;
+import org.apache.drill.exec.physical.config.HashPartitionSender;
+import org.apache.drill.exec.physical.config.LateralJoinPOP;
+import org.apache.drill.exec.physical.config.MergeJoinPOP;
+import org.apache.drill.exec.physical.config.MergingReceiverPOP;
+import org.apache.drill.exec.physical.config.RuntimeFilterPOP;
+import org.apache.drill.exec.physical.config.Screen;
+import org.apache.drill.exec.physical.config.SingleSender;
+import org.apache.drill.exec.physical.config.UnionAll;
+import org.apache.drill.exec.physical.config.UnnestPOP;
+import org.apache.drill.exec.physical.config.UnorderedReceiver;
 import org.apache.drill.exec.physical.impl.ScreenCreator;
 import org.apache.drill.exec.physical.impl.SingleSenderCreator;
 import org.apache.drill.exec.physical.impl.aggregate.HashAggTemplate;
@@ -29,8 +44,8 @@
 import org.apache.drill.exec.physical.impl.unnest.UnnestRecordBatch;
 import org.apache.drill.exec.physical.impl.unorderedreceiver.UnorderedReceiverBatch;
 import org.apache.drill.exec.physical.impl.xsort.ExternalSortBatch;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.AbstractBinaryRecordBatch;
+import org.apache.drill.exec.store.parquet.ParquetRowGroupScan;
 import org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader;
 
 import java.util.Arrays;
@@ -44,28 +59,28 @@
 //  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(OperatorMetricRegistry.class);
 
   // Mapping: key : operator type, value : metric id --> metric name
-  private static final Map<Integer, String[]> OPERATOR_METRICS = new HashMap<>();
+  private static final Map<String, String[]> OPERATOR_METRICS = new HashMap<>();
 
   static {
-    register(CoreOperatorType.SCREEN_VALUE, ScreenCreator.ScreenRoot.Metric.class);
-    register(CoreOperatorType.SINGLE_SENDER_VALUE, SingleSenderCreator.SingleSenderRootExec.Metric.class);
-    register(CoreOperatorType.BROADCAST_SENDER_VALUE, BroadcastSenderRootExec.Metric.class);
-    register(CoreOperatorType.HASH_PARTITION_SENDER_VALUE, PartitionSenderRootExec.Metric.class);
-    register(CoreOperatorType.MERGING_RECEIVER_VALUE, MergingRecordBatch.Metric.class);
-    register(CoreOperatorType.UNORDERED_RECEIVER_VALUE, UnorderedReceiverBatch.Metric.class);
-    register(CoreOperatorType.HASH_AGGREGATE_VALUE, HashAggTemplate.Metric.class);
-    register(CoreOperatorType.HASH_JOIN_VALUE, HashJoinBatch.Metric.class);
-    register(CoreOperatorType.EXTERNAL_SORT_VALUE, ExternalSortBatch.Metric.class);
-    register(CoreOperatorType.PARQUET_ROW_GROUP_SCAN_VALUE, ParquetRecordReader.Metric.class);
-    register(CoreOperatorType.FLATTEN_VALUE, FlattenRecordBatch.Metric.class);
-    register(CoreOperatorType.MERGE_JOIN_VALUE, AbstractBinaryRecordBatch.Metric.class);
-    register(CoreOperatorType.LATERAL_JOIN_VALUE, AbstractBinaryRecordBatch.Metric.class);
-    register(CoreOperatorType.UNNEST_VALUE, UnnestRecordBatch.Metric.class);
-    register(CoreOperatorType.UNION_VALUE, AbstractBinaryRecordBatch.Metric.class);
-    register(CoreOperatorType.RUNTIME_FILTER_VALUE, RuntimeFilterRecordBatch.Metric.class);
+    register(Screen.OPERATOR_TYPE, ScreenCreator.ScreenRoot.Metric.class);
+    register(SingleSender.OPERATOR_TYPE, SingleSenderCreator.SingleSenderRootExec.Metric.class);
+    register(BroadcastSender.OPERATOR_TYPE, BroadcastSenderRootExec.Metric.class);
+    register(HashPartitionSender.OPERATOR_TYPE, PartitionSenderRootExec.Metric.class);
+    register(MergingReceiverPOP.OPERATOR_TYPE, MergingRecordBatch.Metric.class);
+    register(UnorderedReceiver.OPERATOR_TYPE, UnorderedReceiverBatch.Metric.class);
+    register(HashAggregate.OPERATOR_TYPE, HashAggTemplate.Metric.class);
+    register(HashJoinPOP.OPERATOR_TYPE, HashJoinBatch.Metric.class);
+    register(ExternalSort.OPERATOR_TYPE, ExternalSortBatch.Metric.class);
+    register(ParquetRowGroupScan.OPERATOR_TYPE, ParquetRecordReader.Metric.class);
+    register(FlattenPOP.OPERATOR_TYPE, FlattenRecordBatch.Metric.class);
+    register(MergeJoinPOP.OPERATOR_TYPE, AbstractBinaryRecordBatch.Metric.class);
+    register(LateralJoinPOP.OPERATOR_TYPE, AbstractBinaryRecordBatch.Metric.class);
+    register(UnnestPOP.OPERATOR_TYPE, UnnestRecordBatch.Metric.class);
+    register(UnionAll.OPERATOR_TYPE, AbstractBinaryRecordBatch.Metric.class);
+    register(RuntimeFilterPOP.OPERATOR_TYPE, RuntimeFilterRecordBatch.Metric.class);
   }
 
-  private static void register(final int operatorType, final Class<? extends MetricDef> metricDef) {
+  private static void register(String operatorType, Class<? extends MetricDef> metricDef) {
     // Currently registers a metric def that has enum constants
     MetricDef[] enumConstants = metricDef.getEnumConstants();
     if (enumConstants != null) {
@@ -82,7 +97,7 @@
    * @param operatorType the operator type
    * @return metric names if operator was registered, null otherwise
    */
-  public static String[] getMetricNames(int operatorType) {
+  public static String[] getMetricNames(String operatorType) {
     return OPERATOR_METRICS.get(operatorType);
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorStats.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorStats.java
index 67a8b80..2f9300d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorStats.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/OperatorStats.java
@@ -20,7 +20,6 @@
 import java.util.Iterator;
 
 import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.proto.UserBitShared.MetricValue;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile.Builder;
@@ -32,19 +31,20 @@
 import com.carrotsearch.hppc.cursors.IntLongCursor;
 import com.carrotsearch.hppc.procedures.IntDoubleProcedure;
 import com.carrotsearch.hppc.procedures.IntLongProcedure;
+import org.apache.drill.exec.server.rest.profile.CoreOperatorType;
 import org.apache.drill.shaded.guava.com.google.common.annotations.VisibleForTesting;
 
 public class OperatorStats {
   protected final int operatorId;
-  protected final int operatorType;
+  protected final String operatorType;
   private final BufferAllocator allocator;
 
-  private IntLongHashMap longMetrics = new IntLongHashMap();
-  private IntDoubleHashMap doubleMetrics = new IntDoubleHashMap();
+  private final IntLongHashMap longMetrics = new IntLongHashMap();
+  private final IntDoubleHashMap doubleMetrics = new IntDoubleHashMap();
 
   public long[] recordsReceivedByInput;
   public long[] batchesReceivedByInput;
-  private long[] schemaCountByInput;
+  private final long[] schemaCountByInput;
 
 
   private boolean inProcessing = false;
@@ -59,7 +59,7 @@
   private long setupMark;
   private long waitMark;
 
-  private int inputCount;
+  private final int inputCount;
 
   public OperatorStats(OpProfileDef def, BufferAllocator allocator){
     this(def.getOperatorId(), def.getOperatorType(), def.getIncomingCount(), allocator);
@@ -88,7 +88,7 @@
   }
 
   @VisibleForTesting
-  public OperatorStats(int operatorId, int operatorType, int inputCount, BufferAllocator allocator) {
+  public OperatorStats(int operatorId, String operatorType, int inputCount, BufferAllocator allocator) {
     super();
     this.allocator = allocator;
     this.operatorId = operatorId;
@@ -191,24 +191,31 @@
   }
 
   public String getId() {
-    StringBuilder s = new StringBuilder();
-    return s.append(this.operatorId)
+    return new StringBuilder()
+        .append(this.operatorId)
         .append(":")
         .append("[")
-        .append(UserBitShared.CoreOperatorType.valueOf(operatorType))
+        .append(operatorType)
         .append("]")
         .toString();
   }
 
+  @SuppressWarnings("deprecation")
   public OperatorProfile getProfile() {
-    final OperatorProfile.Builder b = OperatorProfile //
-        .newBuilder() //
-        .setOperatorType(operatorType) //
-        .setOperatorId(operatorId) //
-        .setSetupNanos(setupNanos) //
+    final OperatorProfile.Builder b = OperatorProfile
+        .newBuilder()
+        .setOperatorTypeName(operatorType)
+        .setOperatorId(operatorId)
+        .setSetupNanos(setupNanos)
         .setProcessNanos(processingNanos)
         .setWaitNanos(waitNanos);
 
+    CoreOperatorType coreOperatorType = CoreOperatorType.forName(operatorType);
+
+    if (coreOperatorType != null) {
+      b.setOperatorType(coreOperatorType.getId());
+    }
+
     if (allocator != null) {
       b.setPeakLocalMemoryAllocated(allocator.getPeakMemoryAllocation());
     }
@@ -229,7 +236,7 @@
     }
   }
 
-  private class LongProc implements IntLongProcedure {
+  private static class LongProc implements IntLongProcedure {
 
     private final OperatorProfile.Builder builder;
 
@@ -250,7 +257,7 @@
     }
   }
 
-  private class DoubleProc implements IntDoubleProcedure {
+  private static class DoubleProc implements IntDoubleProcedure {
     private final OperatorProfile.Builder builder;
 
     public DoubleProc(Builder builder) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
index 4106205..e77dbcb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractExchange.java
@@ -96,7 +96,7 @@
     return new ArrayList<>(affinityMap.values());
   }
 
-  protected void setupSenders(List<DrillbitEndpoint> senderLocations) throws PhysicalOperatorSetupException {
+  protected void setupSenders(List<DrillbitEndpoint> senderLocations) {
     this.senderLocations = ImmutableList.copyOf(senderLocations);
   }
 
@@ -122,7 +122,7 @@
   }
 
   @Override
-  public int getOperatorType() {
+  public String getOperatorType() {
     throw new UnsupportedOperationException();
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractGroupScan.java
index c4f04b3..cc549b1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/AbstractGroupScan.java
@@ -138,7 +138,7 @@
   }
 
   @Override
-  public int getOperatorType() {
+  public String getOperatorType() {
     throw new UnsupportedOperationException();
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
index 3bca02e..325d587 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/base/PhysicalOperator.java
@@ -116,5 +116,5 @@
   String getUserName();
 
   @JsonIgnore
-  int getOperatorType();
+  String getOperatorType();
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/BroadcastSender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/BroadcastSender.java
index 0cdc72b..8c2b0e6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/BroadcastSender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/BroadcastSender.java
@@ -23,7 +23,6 @@
 import org.apache.drill.exec.physical.base.AbstractSender;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -31,7 +30,8 @@
 
 @JsonTypeName("broadcast-sender")
 public class BroadcastSender extends AbstractSender {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BroadcastSender.class);
+
+  public static final String OPERATOR_TYPE = "BROADCAST_SENDER";
 
   @JsonCreator
   public BroadcastSender(@JsonProperty("receiver-major-fragment") int oppositeMajorFragmentId,
@@ -51,8 +51,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.BROADCAST_SENDER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ComplexToJson.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ComplexToJson.java
index b5e8eca..78fcaed 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ComplexToJson.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ComplexToJson.java
@@ -20,7 +20,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -29,7 +28,8 @@
 
 @JsonTypeName("complex-to-json")
 public class ComplexToJson extends AbstractSingle {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ComplexToJson.class);
+
+  public static final String OPERATOR_TYPE = "COMPLEX_TO_JSON";
 
   @JsonCreator
   public ComplexToJson(@JsonProperty("child") PhysicalOperator child) {
@@ -52,8 +52,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.COMPLEX_TO_JSON_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ExternalSort.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ExternalSort.java
index 9ead21c..c09020d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ExternalSort.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ExternalSort.java
@@ -22,7 +22,6 @@
 import org.apache.drill.common.logical.data.Order.Ordering;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -30,10 +29,11 @@
 
 @JsonTypeName("external-sort")
 public class ExternalSort extends Sort {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ExternalSort.class);
 
   public static final long DEFAULT_SORT_ALLOCATION = 20_000_000;
 
+  public static final String OPERATOR_TYPE = "EXTERNAL_SORT";
+
   @JsonCreator
   public ExternalSort(@JsonProperty("child") PhysicalOperator child, @JsonProperty("orderings") List<Ordering> orderings, @JsonProperty("reverse") boolean reverse) {
     super(child, orderings, reverse);
@@ -48,8 +48,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.EXTERNAL_SORT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   /**
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Filter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Filter.java
index d6eaf76..a944cbe 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Filter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Filter.java
@@ -21,7 +21,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -31,7 +30,7 @@
 @JsonTypeName("filter")
 public class Filter extends AbstractSingle {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Filter.class);
+  public static final String OPERATOR_TYPE = "FILTER";
 
   private final LogicalExpression expr;
   private final float selectivity;
@@ -67,8 +66,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.FILTER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/FlattenPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/FlattenPOP.java
index 74ef426..4e371b1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/FlattenPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/FlattenPOP.java
@@ -25,15 +25,15 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared;
 
 import java.util.Iterator;
 
 @JsonTypeName("flatten")
 public class FlattenPOP extends AbstractSingle {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FlattenPOP.class);
 
-  private SchemaPath column;
+  public static final String OPERATOR_TYPE = "FLATTEN";
+
+  private final SchemaPath column;
 
   @JsonCreator
   public FlattenPOP(
@@ -64,7 +64,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return UserBitShared.CoreOperatorType.FLATTEN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
index 521aba1..d862c8a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
@@ -24,7 +24,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.planner.physical.AggPrelBase;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -35,7 +34,7 @@
 @JsonTypeName("hash-aggregate")
 public class HashAggregate extends AbstractSingle {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HashAggregate.class);
+  public static final String OPERATOR_TYPE = "HASH_AGGREGATE";
 
   private final AggPrelBase.OperatorPhase aggPhase;
   private final List<NamedExpression> groupByExprs;
@@ -83,8 +82,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HASH_AGGREGATE_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   /**
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashJoinPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashJoinPOP.java
index 35c187c..8b1ae3c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashJoinPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashJoinPOP.java
@@ -26,7 +26,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.SubScan;
 import org.apache.drill.exec.planner.common.JoinControl;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.calcite.rel.core.JoinRelType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -41,7 +40,8 @@
 @JsonTypeName("hash-join")
 @JsonIgnoreProperties(ignoreUnknown = true)
 public class HashJoinPOP extends AbstractJoinPop {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HashJoinPOP.class);
+
+  public static final String OPERATOR_TYPE = "HASH_JOIN";
 
   private RuntimeFilterDef runtimeFilterDef;
 
@@ -134,9 +134,9 @@
   }
 
   @Override
-  public int getOperatorType() {
-        return CoreOperatorType.HASH_JOIN_VALUE;
-    }
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
+  }
 
   /**
    *
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashPartitionSender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashPartitionSender.java
index a586bbe..2bc7447 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashPartitionSender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashPartitionSender.java
@@ -25,7 +25,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.impl.partitionsender.Partitioner;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -33,7 +32,8 @@
 
 @JsonTypeName("hash-partition-sender")
 public class HashPartitionSender extends AbstractSender {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HashPartitionSender.class);
+
+  public static final String OPERATOR_TYPE = "HASH_PARTITION_SENDER";
 
   private final LogicalExpression expr;
   private final int outgoingBatchSize;
@@ -76,8 +76,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HASH_PARTITION_SENDER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/IteratorValidator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/IteratorValidator.java
index 4f73b00..ec08e88 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/IteratorValidator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/IteratorValidator.java
@@ -22,7 +22,9 @@
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 
 public class IteratorValidator extends AbstractSingle{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(IteratorValidator.class);
+
+  public static final String OPERATOR_TYPE = "ITERATOR_VALIDATOR";
+
   /* isRepeatable flag will be set to true if this validator is created by a Repeatable pipeline.
    * In a repeatable pipeline some state transitions are valid i.e downstream operator
    * can call the upstream operator even after receiving NONE.
@@ -50,8 +52,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    // TODO: DRILL-6643: this implementation should be revisited
-    return -1;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/LateralJoinPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/LateralJoinPOP.java
index 9e00f7c..2be30b3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/LateralJoinPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/LateralJoinPOP.java
@@ -27,13 +27,13 @@
 import org.apache.drill.exec.physical.base.AbstractJoinPop;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import java.util.List;
 
 @JsonTypeName("lateral-join")
 public class LateralJoinPOP extends AbstractJoinPop {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(LateralJoinPOP.class);
+
+  public static final String OPERATOR_TYPE = "LATERAL_JOIN";
 
   @JsonProperty("excludedColumns")
   private List<SchemaPath> excludedColumns;
@@ -87,8 +87,8 @@
   public String getImplicitRIDColumn() { return this.implicitRIDColumn; }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.LATERAL_JOIN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Limit.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Limit.java
index f79aa93..9850f40 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Limit.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Limit.java
@@ -20,7 +20,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -29,6 +28,9 @@
 
 @JsonTypeName("limit")
 public class Limit extends AbstractSingle {
+
+  public static final String OPERATOR_TYPE = "LIMIT";
+
   private final Integer first;
   private final Integer last;
 
@@ -63,7 +65,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.LIMIT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergeJoinPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergeJoinPOP.java
index eb1a31a..266ee4d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergeJoinPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergeJoinPOP.java
@@ -26,13 +26,13 @@
 import org.apache.drill.common.logical.data.JoinCondition;
 import org.apache.drill.exec.physical.base.AbstractJoinPop;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import java.util.List;
 
 @JsonTypeName("merge-join")
-public class MergeJoinPOP extends AbstractJoinPop{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MergeJoinPOP.class);
+public class MergeJoinPOP extends AbstractJoinPop {
+
+  public static final String OPERATOR_TYPE = "MERGE_JOIN";
 
   @JsonCreator
   public MergeJoinPOP(
@@ -66,7 +66,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.MERGE_JOIN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergingReceiverPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergingReceiverPOP.java
index a72ff1b..7eef240 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergingReceiverPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MergingReceiverPOP.java
@@ -23,7 +23,6 @@
 import org.apache.drill.exec.physical.MinorFragmentEndpoint;
 import org.apache.drill.exec.physical.base.AbstractReceiver;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -34,8 +33,9 @@
 // is guaranteed to be in order, so the operator simply merges the incoming
 // batches.  This is accomplished by building and depleting a priority queue.
 @JsonTypeName("merging-receiver")
-public class MergingReceiverPOP extends AbstractReceiver{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MergingReceiverPOP.class);
+public class MergingReceiverPOP extends AbstractReceiver {
+
+  public static final String OPERATOR_TYPE = "MERGING_RECEIVER";
 
   private final List<Ordering> orderings;
 
@@ -63,7 +63,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.MERGING_RECEIVER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataControllerPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataControllerPOP.java
index 6b317ff..adad975 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataControllerPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataControllerPOP.java
@@ -24,7 +24,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.metastore.analyze.MetadataControllerContext;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 
 import java.util.Arrays;
@@ -33,6 +32,9 @@
 
 @JsonTypeName("metadataController")
 public class MetadataControllerPOP extends AbstractBase {
+
+  public static final String OPERATOR_TYPE = "METADATA_CONTROLLER";
+
   private final MetadataControllerContext context;
   private final PhysicalOperator left;
   private final PhysicalOperator right;
@@ -57,8 +59,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return UserBitShared.CoreOperatorType.METADATA_CONTROLLER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataHandlerPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataHandlerPOP.java
index 1599d90..9841459 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataHandlerPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/MetadataHandlerPOP.java
@@ -24,10 +24,12 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.metastore.analyze.MetadataHandlerContext;
-import org.apache.drill.exec.proto.UserBitShared;
 
 @JsonTypeName("metadataHandler")
 public class MetadataHandlerPOP extends AbstractSingle {
+
+  public static final String OPERATOR_TYPE = "METADATA_HANDLER";
+
   private final MetadataHandlerContext context;
 
   @JsonCreator
@@ -48,8 +50,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return UserBitShared.CoreOperatorType.METADATA_HANDLER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   public MetadataHandlerContext getContext() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/NestedLoopJoinPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/NestedLoopJoinPOP.java
index 5783b4e..4d6aca4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/NestedLoopJoinPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/NestedLoopJoinPOP.java
@@ -25,13 +25,13 @@
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.exec.physical.base.AbstractJoinPop;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import java.util.List;
 
 @JsonTypeName("nested-loop-join")
 public class NestedLoopJoinPOP extends AbstractJoinPop {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(NestedLoopJoinPOP.class);
+
+  public static final String OPERATOR_TYPE = "NESTED_LOOP_JOIN";
 
   @JsonCreator
   public NestedLoopJoinPOP(
@@ -50,7 +50,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.NESTED_LOOP_JOIN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/OrderedPartitionSender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/OrderedPartitionSender.java
index 320bc6d..7b18036 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/OrderedPartitionSender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/OrderedPartitionSender.java
@@ -25,7 +25,6 @@
 import org.apache.drill.exec.physical.base.AbstractSender;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -34,15 +33,16 @@
 
 @JsonTypeName("OrderedPartitionSender")
 public class OrderedPartitionSender extends AbstractSender {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(OrderedPartitionSender.class);
+
+  public static final String OPERATOR_TYPE = "ORDERED_PARTITION_SENDER";
 
   private final List<Ordering> orderings;
   private final FieldReference ref;
   private final int sendingWidth;
 
-  private int recordsToSample;
-  private int samplingFactor;
-  private float completionFactor;
+  private final int recordsToSample;
+  private final int samplingFactor;
+  private final float completionFactor;
 
   @JsonCreator
   public OrderedPartitionSender(@JsonProperty("orderings") List<Ordering> orderings,
@@ -103,8 +103,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.ORDERED_PARTITION_SENDER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/PartitionLimit.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/PartitionLimit.java
index 4ea710d..aa71e90 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/PartitionLimit.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/PartitionLimit.java
@@ -22,11 +22,13 @@
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 @JsonTypeName("partition-limit")
 public class PartitionLimit extends Limit {
+
+  public static final String OPERATOR_TYPE = "PARTITION_LIMIT";
+
   private final String partitionColumn;
 
   @JsonCreator
@@ -56,8 +58,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.PARTITION_LIMIT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ProducerConsumer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ProducerConsumer.java
index e31f417..8d880bb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ProducerConsumer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/ProducerConsumer.java
@@ -20,7 +20,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -28,7 +27,8 @@
 
 @JsonTypeName("producer-consumer")
 public class ProducerConsumer extends AbstractSingle{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ProducerConsumer.class);
+
+  public static final String OPERATOR_TYPE = "PRODUCER_CONSUMER";
 
   private final int size;
 
@@ -53,7 +53,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.PRODUCER_CONSUMER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Project.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Project.java
index 7c40a9c..b2fc8dd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Project.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Project.java
@@ -23,7 +23,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -32,7 +31,8 @@
 
 @JsonTypeName("project")
 public class Project extends AbstractSingle{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Project.class);
+
+  public static final String OPERATOR_TYPE = "PROJECT";
 
   private final List<NamedExpression> exprs;
 
@@ -58,7 +58,7 @@
   }
 
   /**
-   * @Return true if Project is for the query's final output. Such Project is added by TopProjectVisitor,
+   * @return true if Project is for the query's final output. Such Project is added by TopProjectVisitor,
    * to handle fast NONE when all the inputs to the query are empty and are skipped.
    */
   public boolean isOutputProj() {
@@ -81,7 +81,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.PROJECT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RangePartitionSender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RangePartitionSender.java
index 0c0852a..862ffda 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RangePartitionSender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RangePartitionSender.java
@@ -24,7 +24,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.planner.physical.PartitionFunction;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -33,13 +32,13 @@
 @JsonTypeName("range-partition-sender")
 public class RangePartitionSender extends AbstractSender{
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RangePartitionSender.class);
-
   // The number of records in the outgoing batch. This is overriding the default value in Partitioner
   public static final int RANGE_PARTITION_OUTGOING_BATCH_SIZE = (1 << 12) - 1;
 
+  public static final String OPERATOR_TYPE = "RANGE_PARTITION_SENDER";
+
   @JsonProperty("partitionFunction")
-  private PartitionFunction partitionFunction;
+  private final PartitionFunction partitionFunction;
 
   @JsonCreator
   public RangePartitionSender(@JsonProperty("receiver-major-fragment") int oppositeMajorFragmentId,
@@ -66,8 +65,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.RANGE_PARTITION_SENDER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RowKeyJoinPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RowKeyJoinPOP.java
index 1b0980d..3690dfa 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RowKeyJoinPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RowKeyJoinPOP.java
@@ -25,7 +25,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -35,7 +34,8 @@
 
 @JsonTypeName("rowkey-join")
 public class RowKeyJoinPOP extends AbstractBase {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RowKeyJoinPOP.class);
+
+  public static final String OPERATOR_TYPE = "ROWKEY_JOIN";
 
 
   private final PhysicalOperator left;
@@ -90,7 +90,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.ROWKEY_JOIN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RuntimeFilterPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RuntimeFilterPOP.java
index b35bf29..f3df51e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RuntimeFilterPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/RuntimeFilterPOP.java
@@ -23,13 +23,12 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 @JsonTypeName("runtime-filter")
 public class RuntimeFilterPOP extends AbstractSingle {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RuntimeFilterPOP.class);
+  public static final String OPERATOR_TYPE = "RUNTIME_FILTER";
 
   private long identifier;
 
@@ -55,8 +54,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.RUNTIME_FILTER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Screen.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Screen.java
index 13dbbb7..8c1e552 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Screen.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Screen.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.base.Store;
 import org.apache.drill.exec.planner.fragment.DistributionAffinity;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
 import com.fasterxml.jackson.annotation.JsonIgnore;
@@ -37,7 +36,8 @@
 
 @JsonTypeName("screen")
 public class Screen extends AbstractStore {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Screen.class);
+
+  public static final String OPERATOR_TYPE = "SCREEN";
 
   private final DrillbitEndpoint endpoint;
 
@@ -99,8 +99,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.SCREEN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SelectionVectorRemover.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SelectionVectorRemover.java
index 1dcf913..5074e01 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SelectionVectorRemover.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SelectionVectorRemover.java
@@ -20,7 +20,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -30,7 +29,7 @@
 @JsonTypeName("selection-vector-remover")
 public class SelectionVectorRemover extends AbstractSingle {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SelectionVectorRemover.class);
+  public static final String OPERATOR_TYPE = "SELECTION_VECTOR_REMOVER";
 
   @JsonCreator
   public SelectionVectorRemover(@JsonProperty("child") PhysicalOperator child) {
@@ -53,7 +52,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.SELECTION_VECTOR_REMOVER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SingleSender.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SingleSender.java
index 1e74136..59895f3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SingleSender.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/SingleSender.java
@@ -25,7 +25,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonIgnore;
@@ -37,7 +36,8 @@
  */
 @JsonTypeName("single-sender")
 public class SingleSender extends AbstractSender {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SingleSender.class);
+
+  public static final String OPERATOR_TYPE = "SINGLE_SENDER";
 
   /**
    * Create a SingleSender which sends data to fragment identified by given MajorFragmentId and MinorFragmentId,
@@ -96,8 +96,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.SINGLE_SENDER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Sort.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Sort.java
index 5d65c39..4337024 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Sort.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Sort.java
@@ -23,7 +23,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -32,10 +31,11 @@
 
 @JsonTypeName("sort")
 public class Sort extends AbstractSingle{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Sort.class);
+
+  public static final String OPERATOR_TYPE = "OLD_SORT";
 
   protected final List<Ordering> orderings;
-  protected boolean reverse = false;
+  protected boolean reverse;
 
   @JsonCreator
   public Sort(@JsonProperty("child") PhysicalOperator child, @JsonProperty("orderings") List<Ordering> orderings, @JsonProperty("reverse") boolean reverse) {
@@ -68,8 +68,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.OLD_SORT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsAggregate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsAggregate.java
index 58fee51..c6709c1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsAggregate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsAggregate.java
@@ -21,7 +21,6 @@
 
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -31,6 +30,9 @@
 
 @JsonTypeName("statistics-aggregate")
 public class StatisticsAggregate extends StreamingAggregate {
+
+  public static final String OPERATOR_TYPE = "STATISTICS_AGGREGATE";
+
   private final List<String> functions;
 
   @JsonCreator
@@ -57,8 +59,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.STATISTICS_AGGREGATE_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsMerge.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsMerge.java
index 1570660..16595d1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsMerge.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StatisticsMerge.java
@@ -24,12 +24,13 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
 
 @JsonTypeName("statistics-merge")
 public class StatisticsMerge extends AbstractSingle {
 
+  public static final String OPERATOR_TYPE = "STATISTICS_MERGE";
+
   private final Map<String, String> functions;
   private final double samplePercent;
 
@@ -63,7 +64,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.STATISTICS_MERGE_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StreamingAggregate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StreamingAggregate.java
index 3d074bf..bbad8a1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StreamingAggregate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/StreamingAggregate.java
@@ -21,7 +21,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -32,6 +31,8 @@
 @JsonTypeName("streaming-aggregate")
 public class StreamingAggregate extends AbstractSingle {
 
+  public static final String OPERATOR_TYPE = "STREAMING_AGGREGATE";
+
   private final List<NamedExpression> keys;
   private final List<NamedExpression> exprs;
 
@@ -63,7 +64,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.STREAMING_AGGREGATE_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/TopN.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/TopN.java
index d2a87d5..3b45b5d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/TopN.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/TopN.java
@@ -22,7 +22,6 @@
 import org.apache.drill.common.logical.data.Order.Ordering;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -30,7 +29,8 @@
 
 @JsonTypeName("top-n")
 public class TopN extends Sort {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TopN.class);
+
+  public static final String OPERATOR_TYPE = "TOP_N_SORT";
 
   private final int limit;
 
@@ -65,8 +65,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.TOP_N_SORT_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Trace.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Trace.java
index c777e0d..6550e29 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Trace.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Trace.java
@@ -20,7 +20,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
@@ -28,7 +27,7 @@
 @JsonTypeName("trace")
 public class Trace extends AbstractSingle {
 
-    static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Trace.class);
+    public static final String OPERATOR_TYPE = "TRACE";
 
     /* Tag associated with each trace operator
      * Printed along with trace output to distinguish
@@ -52,7 +51,7 @@
     }
 
     @Override
-    public int getOperatorType() {
-      return CoreOperatorType.TRACE_VALUE;
+    public String getOperatorType() {
+      return OPERATOR_TYPE;
     }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
index 4593737..59b4bfd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
@@ -22,7 +22,6 @@
 import org.apache.drill.exec.physical.base.AbstractMultiple;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -32,6 +31,8 @@
 
 public class UnionAll extends AbstractMultiple {
 
+  public static final String OPERATOR_TYPE = "UNION";
+
   @JsonCreator
   public UnionAll(@JsonProperty("children") List<PhysicalOperator> children) {
     super(children);
@@ -48,7 +49,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.UNION_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnnestPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnnestPOP.java
index 42635a8..4bef1b8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnnestPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnnestPOP.java
@@ -33,16 +33,15 @@
 import java.util.Iterator;
 import java.util.List;
 
-import static org.apache.drill.exec.proto.UserBitShared.CoreOperatorType.UNNEST_VALUE;
-
 @JsonTypeName("unnest")
 public class UnnestPOP extends AbstractBase implements Leaf {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(UnnestPOP.class);
+
+  public static final String OPERATOR_TYPE = "UNNEST";
 
   @JsonProperty("implicitColumn")
-  private String implicitColumn;
+  private final String implicitColumn;
 
-  private SchemaPath column;
+  private final SchemaPath column;
 
   @JsonIgnore
   private UnnestRecordBatch unnestBatch;
@@ -91,7 +90,7 @@
   public String getImplicitColumn() { return this.implicitColumn; }
 
   @Override
-  public int getOperatorType() {
-    return UNNEST_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnorderedReceiver.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnorderedReceiver.java
index da6f07a..da0be8d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnorderedReceiver.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnorderedReceiver.java
@@ -22,7 +22,6 @@
 import org.apache.drill.exec.physical.MinorFragmentEndpoint;
 import org.apache.drill.exec.physical.base.AbstractReceiver;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -31,6 +30,8 @@
 @JsonTypeName("unordered-receiver")
 public class UnorderedReceiver extends AbstractReceiver{
 
+  public static final String OPERATOR_TYPE = "UNORDERED_RECEIVER";
+
   @JsonCreator
   public UnorderedReceiver(@JsonProperty("sender-major-fragment") int oppositeMajorFragmentId,
                            @JsonProperty("senders") List<MinorFragmentEndpoint> senders,
@@ -49,7 +50,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.UNORDERED_RECEIVER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnpivotMaps.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnpivotMaps.java
index 2210f7a..89fc5f5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnpivotMaps.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnpivotMaps.java
@@ -22,7 +22,6 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -30,6 +29,9 @@
 
 @JsonTypeName("unpivot-maps")
 public class UnpivotMaps extends AbstractSingle {
+
+  public static final String OPERATOR_TYPE = "UNPIVOT_MAPS";
+
   private final List<String> mapFieldNames;
 
   @JsonCreator
@@ -54,7 +56,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.UNPIVOT_MAPS_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Values.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Values.java
index c454d60..d4bb862 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Values.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/Values.java
@@ -33,8 +33,7 @@
 
 public class Values extends AbstractBase implements Leaf {
 
-  @SuppressWarnings("unused")
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Values.class);
+  public static final String OPERATOR_TYPE = "VALUES";
 
   private final JSONOptions content;
 
@@ -59,9 +58,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    // TODO: DRILL-6643: this implementation should be revisited
-    return -1;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/WindowPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/WindowPOP.java
index 3ddaa7f..5d272c0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/WindowPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/WindowPOP.java
@@ -26,13 +26,14 @@
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.proto.UserBitShared;
 
 import java.util.List;
 
 @JsonTypeName("window")
 public class WindowPOP extends AbstractSingle {
 
+  public static final String OPERATOR_TYPE = "WINDOW";
+
   private final List<NamedExpression> withins;
   private final List<NamedExpression> aggregations;
   private final List<Order.Ordering> orderings;
@@ -67,8 +68,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return UserBitShared.CoreOperatorType.WINDOW_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   public Bound getStart() {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertDoubleToDecimal.java
similarity index 61%
copy from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
copy to exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertDoubleToDecimal.java
index 2023d19..6d4289f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertDoubleToDecimal.java
@@ -15,20 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.physical.impl.scan.convert;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
 
-public class DummyArrayImpl implements Column {
-  @Override
-  public TypeProtos.MajorType getMinorType() {
-    return Types.repeated(TypeProtos.MinorType.INT);
+import java.math.BigDecimal;
+
+public class ConvertDoubleToDecimal extends DirectConverter {
+
+  public ConvertDoubleToDecimal(ScalarWriter baseWriter) {
+    super(baseWriter);
   }
 
   @Override
-  public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
+  public void setDouble(double value) {
+    baseWriter.setDecimal(BigDecimal.valueOf(value));
+  }
+
+  @Override
+  public void setValue(Object value) {
+    if (value == null) {
+      setNull();
+    } else {
+      setDouble((double) value);
+    }
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertFloatToDecimal.java
similarity index 61%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertFloatToDecimal.java
index 2023d19..411bb65 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertFloatToDecimal.java
@@ -15,20 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.physical.impl.scan.convert;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
 
-public class DummyArrayImpl implements Column {
-  @Override
-  public TypeProtos.MajorType getMinorType() {
-    return Types.repeated(TypeProtos.MinorType.INT);
+import java.math.BigDecimal;
+
+public class ConvertFloatToDecimal extends DirectConverter {
+
+  public ConvertFloatToDecimal(ScalarWriter baseWriter) {
+    super(baseWriter);
   }
 
   @Override
-  public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
+  public void setFloat(float value) {
+    baseWriter.setDecimal(BigDecimal.valueOf(value));
+  }
+
+  @Override
+  public void setValue(Object value) {
+    if (value == null) {
+      setNull();
+    } else {
+      setDouble((float) value);
+    }
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertIntToDecimal.java
similarity index 61%
copy from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
copy to exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertIntToDecimal.java
index 2023d19..c90748c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertIntToDecimal.java
@@ -15,20 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.physical.impl.scan.convert;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
 
-public class DummyArrayImpl implements Column {
-  @Override
-  public TypeProtos.MajorType getMinorType() {
-    return Types.repeated(TypeProtos.MinorType.INT);
+import java.math.BigDecimal;
+
+public class ConvertIntToDecimal extends DirectConverter {
+
+  public ConvertIntToDecimal(ScalarWriter baseWriter) {
+    super(baseWriter);
   }
 
   @Override
-  public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
+  public void setInt(int value) {
+    baseWriter.setDecimal(BigDecimal.valueOf(value));
+  }
+
+  @Override
+  public void setValue(Object value) {
+    if (value == null) {
+      setNull();
+    } else {
+      setDouble((int) value);
+    }
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertLongToDecimal.java
similarity index 61%
copy from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
copy to exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertLongToDecimal.java
index 2023d19..92650ff 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyArrayImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/ConvertLongToDecimal.java
@@ -15,20 +15,29 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.physical.impl.scan.convert;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
 
-public class DummyArrayImpl implements Column {
-  @Override
-  public TypeProtos.MajorType getMinorType() {
-    return Types.repeated(TypeProtos.MinorType.INT);
+import java.math.BigDecimal;
+
+public class ConvertLongToDecimal extends DirectConverter {
+
+  public ConvertLongToDecimal(ScalarWriter baseWriter) {
+    super(baseWriter);
   }
 
   @Override
-  public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
+  public void setLong(long value) {
+    baseWriter.setDecimal(BigDecimal.valueOf(value));
+  }
+
+  @Override
+  public void setValue(Object value) {
+    if (value == null) {
+      setNull();
+    } else {
+      setDouble((long) value);
+    }
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/DirectConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/DirectConverter.java
index 5222926..344184c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/DirectConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/DirectConverter.java
@@ -64,6 +64,11 @@
   }
 
   @Override
+  public void setFloat(float value) {
+    baseWriter.setDouble(value);
+  }
+
+  @Override
   public void setDouble(double value) {
     baseWriter.setDouble(value);
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/StandardConversions.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/StandardConversions.java
index 396e741..8deb0f1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/StandardConversions.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/scan/convert/StandardConversions.java
@@ -203,9 +203,9 @@
    * <p>
    * Does not support any of the "legacy" decimal types.
    *
-   * @param inputDefn the column schema for the input column which the
+   * @param inputSchema the column schema for the input column which the
    * client code (e.g. reader) wants to produce
-   * @param outputDefn the column schema for the output vector to be produced
+   * @param outputSchema the column schema for the output vector to be produced
    * by this operator
    * @return a description of the conversion needed (if any), along with the
    * standard conversion class, if available
@@ -241,8 +241,9 @@
         case BIGINT:
         case FLOAT4:
         case FLOAT8:
-        case VARDECIMAL:
           return IMPLICIT;
+        case VARDECIMAL:
+          return new ConversionDefn(ConvertIntToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertIntToString.class);
         default:
@@ -257,8 +258,9 @@
         case BIGINT:
         case FLOAT4:
         case FLOAT8:
-        case VARDECIMAL:
           return IMPLICIT;
+        case VARDECIMAL:
+          return new ConversionDefn(ConvertIntToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertIntToString.class);
        default:
@@ -273,9 +275,10 @@
         case BIGINT:
         case FLOAT4:
         case FLOAT8:
-        case VARDECIMAL:
         case TIME:
           return IMPLICIT;
+        case VARDECIMAL:
+          return new ConversionDefn(ConvertIntToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertIntToString.class);
         default:
@@ -290,10 +293,11 @@
           return IMPLICIT_UNSAFE;
         case FLOAT4:
         case FLOAT8:
-        case VARDECIMAL:
         case DATE:
         case TIMESTAMP:
           return IMPLICIT;
+        case VARDECIMAL:
+          return new ConversionDefn(ConvertLongToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertLongToString.class);
         default:
@@ -308,8 +312,9 @@
         case BIGINT:
           return IMPLICIT_UNSAFE;
         case FLOAT8:
-        case VARDECIMAL:
           return IMPLICIT;
+        case VARDECIMAL:
+          return new ConversionDefn(ConvertFloatToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertDoubleToString.class);
         default:
@@ -325,7 +330,7 @@
         case FLOAT4:
           return IMPLICIT_UNSAFE;
         case VARDECIMAL:
-          return IMPLICIT;
+          return new ConversionDefn(ConvertDoubleToDecimal.class);
         case VARCHAR:
           return new ConversionDefn(ConvertDoubleToString.class);
         default:
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ProjectionFilter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ProjectionFilter.java
index 0d97429..9bcbe5b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ProjectionFilter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/ProjectionFilter.java
@@ -44,7 +44,7 @@
   ProjResult NOT_PROJECTED = new ProjResult(false, null, PROJECT_NONE);
   ProjResult PROJECTED = new ProjResult(true, null, PROJECT_ALL);
 
-  public static class ProjResult {
+  class ProjResult {
     public final boolean isProjected;
     public final ColumnMetadata projection;
     public final ProjectionFilter mapFilter;
@@ -70,7 +70,7 @@
 
   boolean isEmpty();
 
-  public static ProjectionFilter projectionFilter(RequestedTuple tupleProj,
+  static ProjectionFilter projectionFilter(RequestedTuple tupleProj,
       CustomErrorContext errorContext) {
     switch (tupleProj.type()) {
       case ALL:
@@ -82,7 +82,7 @@
     }
   }
 
-  public static ProjectionFilter providedSchemaFilter(RequestedTuple tupleProj,
+  static ProjectionFilter providedSchemaFilter(RequestedTuple tupleProj,
       TupleMetadata providedSchema, CustomErrorContext errorContext) {
     if (tupleProj.type() == TupleProjectionType.NONE) {
       return PROJECT_NONE;
@@ -106,7 +106,7 @@
         schemaFilter);
   }
 
-  public static ProjectionFilter definedSchemaFilter(
+  static ProjectionFilter definedSchemaFilter(
       TupleMetadata definedSchema, CustomErrorContext errorContext) {
     if (definedSchema.isEmpty()) {
       return PROJECT_NONE;
@@ -120,7 +120,7 @@
    * projects special columns (those marked as not being expanded in
    * SELECT *).
    */
-  public static class ImplicitProjectionFilter implements ProjectionFilter {
+  class ImplicitProjectionFilter implements ProjectionFilter {
     private final boolean projectAll;
 
     public ImplicitProjectionFilter(boolean projectAll) {
@@ -152,7 +152,7 @@
    * the reader column is consistent with the form of projection (map,
    * array, or plain) in the projection list.
    */
-  public static class DirectProjectionFilter implements ProjectionFilter {
+  class DirectProjectionFilter implements ProjectionFilter {
     private final RequestedTuple projectionSet;
     private final CustomErrorContext errorContext;
 
@@ -185,7 +185,7 @@
   /**
    * Schema-based projection.
    */
-  public abstract static class BaseSchemaProjectionFilter implements ProjectionFilter {
+  abstract class BaseSchemaProjectionFilter implements ProjectionFilter {
     protected final TupleMetadata schema;
     protected final CustomErrorContext errorContext;
 
@@ -233,7 +233,7 @@
    * If the column is found, enforces that the reader schema has the same type and
    * mode as the provided column.
    */
-  public static class TypeProjectionFilter extends BaseSchemaProjectionFilter {
+  class TypeProjectionFilter extends BaseSchemaProjectionFilter {
 
     public TypeProjectionFilter(TupleMetadata providedSchema, CustomErrorContext errorContext) {
       super(providedSchema, errorContext);
@@ -265,7 +265,7 @@
    * Projection filter in which a schema exactly defines the set of allowed
    * columns, and their types.
    */
-  public static class SchemaProjectionFilter extends BaseSchemaProjectionFilter {
+  class SchemaProjectionFilter extends BaseSchemaProjectionFilter {
 
     public SchemaProjectionFilter(TupleMetadata definedSchema, CustomErrorContext errorContext) {
       super(definedSchema, errorContext);
@@ -296,7 +296,7 @@
   /**
    * Compound filter for combining direct and provided schema projections.
    */
-  public static class CompoundProjectionFilter implements ProjectionFilter {
+  class CompoundProjectionFilter implements ProjectionFilter {
     private final ProjectionFilter filter1;
     private final ProjectionFilter filter2;
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
index 3cb2077..af33675 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
@@ -284,7 +284,7 @@
   public static boolean isProjectOutputRowcountUnknown(Project project) {
     for (RexNode rex : project.getProjects()) {
       if (rex instanceof RexCall) {
-        if ("flatten".equals(((RexCall) rex).getOperator().getName().toLowerCase())) {
+        if ("flatten".equalsIgnoreCase(((RexCall) rex).getOperator().getName())) {
           return true;
         }
       }
@@ -304,7 +304,7 @@
           new RexVisitorImpl<Void>(true) {
             @Override
             public Void visitCall(RexCall call) {
-              if ("convert_fromjson".equals(call.getOperator().getName().toLowerCase())) {
+              if ("convert_fromjson".equalsIgnoreCase(call.getOperator().getName())) {
                 throw new Util.FoundOne(call); /* throw exception to interrupt tree walk (this is similar to
                                               other utility methods in RexUtil.java */
               }
@@ -321,6 +321,17 @@
     return false;
   }
 
+  public static TableScan findScan(RelNode... rels) {
+    for (RelNode rel : rels) {
+      if (rel instanceof TableScan) {
+        return (TableScan) rel;
+      } else {
+        return findScan(rel.getInputs().toArray(new RelNode[0]));
+      }
+    }
+    return null;
+  }
+
   /**
    * InputRefVisitor is a utility class used to collect all the RexInputRef nodes in a
    * RexNode.
@@ -677,8 +688,7 @@
   }
 
   public static DrillTable getDrillTable(final TableScan scan) {
-    DrillTable drillTable = null;
-    drillTable = scan.getTable().unwrap(DrillTable.class);
+    DrillTable drillTable = scan.getTable().unwrap(DrillTable.class);
     if (drillTable == null) {
       DrillTranslatableTable transTable = scan.getTable().unwrap(DrillTranslatableTable.class);
       if (transTable != null) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillPushProjectIntoScanRule.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillPushProjectIntoScanRule.java
index 8b717c2..91875bb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillPushProjectIntoScanRule.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillPushProjectIntoScanRule.java
@@ -28,10 +28,8 @@
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexNode;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.exec.planner.common.DrillProjectRelBase;
 import org.apache.drill.exec.planner.common.DrillRelOptUtil;
 import org.apache.drill.exec.planner.common.DrillRelOptUtil.ProjectPushInfo;
-import org.apache.drill.exec.planner.common.DrillScanRelBase;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectPrel;
 import org.apache.drill.exec.planner.physical.ScanPrel;
@@ -89,7 +87,7 @@
         }
       };
 
-  private DrillPushProjectIntoScanRule(Class<? extends Project> projectClass, Class<? extends TableScan> scanClass, String description) {
+  protected DrillPushProjectIntoScanRule(Class<? extends Project> projectClass, Class<? extends TableScan> scanClass, String description) {
     super(RelOptHelper.some(projectClass, RelOptHelper.any(scanClass)), description);
   }
 
@@ -110,14 +108,14 @@
         return;
       }
 
-      DrillScanRelBase newScan = createScan(scan, projectPushInfo);
+      TableScan newScan = createScan(scan, projectPushInfo);
 
       List<RexNode> newProjects = new ArrayList<>();
       for (RexNode n : project.getChildExps()) {
         newProjects.add(n.accept(projectPushInfo.getInputReWriter()));
       }
 
-      DrillProjectRelBase newProject =
+      Project newProject =
           createProject(project, newScan, newProjects);
 
       if (ProjectRemoveRule.isTrivial(newProject)) {
@@ -151,7 +149,7 @@
    * @param newProjects new project expressions
    * @return new project instance
    */
-  protected DrillProjectRelBase createProject(Project project, TableScan newScan, List<RexNode> newProjects) {
+  protected Project createProject(Project project, TableScan newScan, List<RexNode> newProjects) {
     return new DrillProjectRel(project.getCluster(),
         project.getTraitSet().plus(DrillRel.DRILL_LOGICAL),
         newScan,
@@ -168,7 +166,7 @@
    * @param projectPushInfo the source of row type and fields list
    * @return new scan instance
    */
-  protected DrillScanRelBase createScan(TableScan scan, ProjectPushInfo projectPushInfo) {
+  protected TableScan createScan(TableScan scan, ProjectPushInfo projectPushInfo) {
     return new DrillScanRel(scan.getCluster(),
         scan.getTraitSet().plus(DrillRel.DRILL_LOGICAL),
         scan.getTable(),
@@ -184,7 +182,7 @@
    * @param projectPushInfo fields information
    * @return true if push project into scan can be performed, false otherwise
    */
-  private boolean canPushProjectIntoScan(RelOptTable table, ProjectPushInfo projectPushInfo) throws IOException {
+  protected boolean canPushProjectIntoScan(RelOptTable table, ProjectPushInfo projectPushInfo) throws IOException {
     DrillTable drillTable = Utilities.getDrillTable(table);
     return !Utilities.isStarQuery(projectPushInfo.getFields())
         && drillTable.getGroupScan().canPushdownProjects(projectPushInfo.getFields());
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillTable.java
index 104fb6a..7a9dc0c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillTable.java
@@ -140,10 +140,6 @@
     return metadataProviderManager;
   }
 
-  public TableMetadataProvider getMetadataProvider() throws IOException {
-    return getGroupScan().getMetadataProvider();
-  }
-
   public StoragePluginConfig getStorageEngineConfig() {
     return storageEngineConfig;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/LeafPrel.java
similarity index 68%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/LeafPrel.java
index 109b7dd..55a7955 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/LeafPrel.java
@@ -15,14 +15,23 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.planner.physical;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.exec.vector.ValueVector;
+import java.util.Collections;
+import java.util.Iterator;
 
-public interface Column {
-  TypeProtos.MajorType getMinorType();
+/**
+ * Prel without children.
+ */
+public interface LeafPrel extends Prel {
 
-  void process(IEnhancedPacketBLock block, ValueVector vv, int count);
+  @Override
+  default boolean needsFinalColumnReordering() {
+    return true;
+  }
+
+  @Override
+  default Iterator<Prel> iterator() {
+    return Collections.emptyIterator();
+  }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/ScanPrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/ScanPrel.java
index 69cd603..82fdc4b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/ScanPrel.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/ScanPrel.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.Collections;
-import java.util.Iterator;
 import java.util.List;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptCost;
@@ -43,7 +42,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class ScanPrel extends DrillScanRelBase implements Prel, HasDistributionAffinity {
+public class ScanPrel extends DrillScanRelBase implements LeafPrel, HasDistributionAffinity {
   private static final Logger logger = LoggerFactory.getLogger(ScanPrel.class);
 
   private final RelDataType rowType;
@@ -137,11 +136,6 @@
   }
 
   @Override
-  public Iterator<Prel> iterator() {
-    return Collections.emptyIterator();
-  }
-
-  @Override
   public <T, X, E extends Throwable> T accept(PrelVisitor<T, X, E> logicalVisitor, X value) throws E {
     return logicalVisitor.visitScan(this, value);
   }
@@ -157,11 +151,6 @@
   }
 
   @Override
-  public boolean needsFinalColumnReordering() {
-    return true;
-  }
-
-  @Override
   public DistributionAffinity getDistributionAffinity() {
     return this.getGroupScan().getDistributionAffinity();
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/UnnestPrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/UnnestPrel.java
index a344915..456f966 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/UnnestPrel.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/UnnestPrel.java
@@ -36,11 +36,9 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
 import java.util.List;
 
-public class UnnestPrel extends DrillUnnestRelBase implements Prel {
+public class UnnestPrel extends DrillUnnestRelBase implements LeafPrel {
 
   protected final UnnestPOP unnestPOP;
 
@@ -52,11 +50,6 @@
   }
 
   @Override
-  public Iterator<Prel> iterator() {
-    return Collections.emptyIterator();
-  }
-
-  @Override
   public <T, X, E extends Throwable> T accept(PrelVisitor<T, X, E> visitor, X value) throws E {
     return visitor.visitUnnest(this, value);
   }
@@ -77,11 +70,6 @@
     return BatchSchema.SelectionVectorMode.NONE;
   }
 
-  @Override
-  public boolean needsFinalColumnReordering() {
-    return true;
-  }
-
   public Class<?> getParentClass() {
     return LateralJoinPrel.class;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/BasePrelVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/BasePrelVisitor.java
index b260080..dd11d03 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/BasePrelVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/BasePrelVisitor.java
@@ -20,6 +20,7 @@
 import org.apache.drill.exec.planner.physical.DirectScanPrel;
 import org.apache.drill.exec.planner.physical.ExchangePrel;
 import org.apache.drill.exec.planner.physical.JoinPrel;
+import org.apache.drill.exec.planner.physical.LeafPrel;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectPrel;
 import org.apache.drill.exec.planner.physical.ScanPrel;
@@ -81,4 +82,8 @@
     return visitPrel(prel, value);
   }
 
+  @Override
+  public RETURN visitLeaf(LeafPrel prel, EXTRA value) throws EXCEP {
+    return visitPrel(prel, value);
+  }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisitor.java
index c138c83..a97c10f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisitor.java
@@ -20,6 +20,7 @@
 import org.apache.drill.exec.planner.physical.DirectScanPrel;
 import org.apache.drill.exec.planner.physical.ExchangePrel;
 import org.apache.drill.exec.planner.physical.JoinPrel;
+import org.apache.drill.exec.planner.physical.LeafPrel;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectPrel;
 import org.apache.drill.exec.planner.physical.ScanPrel;
@@ -42,4 +43,5 @@
   RETURN visitPrel(Prel prel, EXTRA value) throws EXCEP;
   RETURN visitUnnest(UnnestPrel prel, EXTRA value) throws EXCEP;
   RETURN visitLateral(LateralJoinPrel prel, EXTRA value) throws EXCEP;
+  RETURN visitLeaf(LeafPrel prel, EXTRA value) throws EXCEP;
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisualizerVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisualizerVisitor.java
index f02ad6f..3dfc030 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisualizerVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/PrelVisualizerVisitor.java
@@ -20,6 +20,7 @@
 import org.apache.drill.exec.planner.physical.DirectScanPrel;
 import org.apache.drill.exec.planner.physical.ExchangePrel;
 import org.apache.drill.exec.planner.physical.JoinPrel;
+import org.apache.drill.exec.planner.physical.LeafPrel;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectPrel;
 import org.apache.drill.exec.planner.physical.ScanPrel;
@@ -246,4 +247,10 @@
     visitPrel(prel, value);
     return null;
   }
+
+  @Override
+  public Void visitLeaf(LeafPrel prel, VisualizationState value) throws Exception {
+    visitPrel(prel, value);
+    return null;
+  }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/StarColumnConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/StarColumnConverter.java
index e4b8800..8d91628 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/StarColumnConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/StarColumnConverter.java
@@ -24,6 +24,7 @@
 
 import org.apache.calcite.rel.rules.ProjectRemoveRule;
 import org.apache.drill.exec.planner.StarColumnHelper;
+import org.apache.drill.exec.planner.physical.LeafPrel;
 import org.apache.drill.exec.planner.physical.MetadataControllerPrel;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.ProjectAllowDupPrel;
@@ -240,12 +241,17 @@
 
   @Override
   public Prel visitScan(ScanPrel scanPrel, Void value) throws RuntimeException {
-    return prefixTabNameToStar(scanPrel, value);
+    return visitLeaf(scanPrel, value);
+  }
+
+  @Override
+  public Prel visitLeaf(LeafPrel prel, Void value) throws RuntimeException {
+    return prefixTabNameToStar(prel, value);
   }
 
   @Override
   public Prel visitUnnest(UnnestPrel unnestPrel, Void value) throws RuntimeException {
-    return prefixTabNameToStar(unnestPrel, value);
+    return visitLeaf(unnestPrel, value);
   }
 
   private List<String> makeUniqueNames(List<String> names) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
index ed0dda3..5e53fd9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
@@ -92,7 +92,7 @@
   }
 
   /** Utility method to search for schema path starting from the given <i>schema</i> reference */
-  private static SchemaPlus searchSchemaTree(SchemaPlus schema, final List<String> schemaPath) {
+  public static SchemaPlus searchSchemaTree(SchemaPlus schema, final List<String> schemaPath) {
     for (String schemaName : schemaPath) {
       // schemas in Drill are case insensitive and stored in lower case
       schema = schema.getSubSchema(schemaName.toLowerCase());
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AnalyzeTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AnalyzeTableHandler.java
index ee08d07..c2b7e0f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AnalyzeTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AnalyzeTableHandler.java
@@ -37,7 +37,6 @@
 import org.apache.drill.exec.planner.common.DrillStatsTable;
 import org.apache.drill.exec.planner.logical.DrillAnalyzeRel;
 import org.apache.drill.exec.planner.logical.DrillRel;
-import org.apache.drill.exec.planner.logical.DrillScanRel;
 import org.apache.drill.exec.planner.logical.DrillScreenRel;
 import org.apache.drill.exec.planner.logical.DrillTable;
 import org.apache.drill.exec.planner.logical.DrillWriterRel;
@@ -210,16 +209,6 @@
     return new DrillScreenRel(writerRel.getCluster(), writerRel.getTraitSet(), writerRel);
   }
 
-  public static DrillScanRel findScan(RelNode... rels) {
-    for (RelNode rel : rels) {
-      if (rel instanceof DrillScanRel) {
-        return (DrillScanRel) rel;
-      } else {
-        return findScan(rel.getInputs().toArray(new RelNode[0]));
-      }
-    }
-    return null;
-  }
   // Make sure no unsupported features in ANALYZE statement are used
   private static void verifyNoUnsupportedFunctions(final SqlAnalyzeTable analyzeTable) {
     // throw unsupported error for functions that are not yet implemented
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/MetastoreAnalyzeTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/MetastoreAnalyzeTableHandler.java
index 23a6b1b..e8378e6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/MetastoreAnalyzeTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/MetastoreAnalyzeTableHandler.java
@@ -45,6 +45,7 @@
 import org.apache.drill.exec.metastore.analyze.MetastoreAnalyzeConstants;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.planner.common.DrillRelOptUtil;
 import org.apache.drill.exec.planner.logical.DrillAnalyzeRel;
 import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillScreenRel;
@@ -237,7 +238,7 @@
     if (metastoreTableInfo.isExists()) {
       RelNode finalRelNode = relNode;
       CheckedSupplier<TableScan, SqlUnsupportedException> tableScanSupplier =
-          () -> AnalyzeTableHandler.findScan(convertToDrel(finalRelNode.getInput(0)));
+          () -> DrillRelOptUtil.findScan(convertToDrel(finalRelNode.getInput(0)));
 
       MetadataInfoCollector metadataInfoCollector = analyzeInfoProvider.getMetadataInfoCollector(basicRequests,
           tableInfo, (FormatSelection) table.getSelection(), context.getPlannerSettings(),
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverter.java
new file mode 100644
index 0000000..3688972
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverter.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.record;
+
+import java.math.BigDecimal;
+import java.sql.Date;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.MapColumnMetadata;
+import org.apache.drill.exec.record.metadata.MetadataUtils;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.record.metadata.TupleSchema;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.DictWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+
+/**
+ * Converts and sets given value into the specific column writer.
+ */
+public interface ColumnConverter {
+
+  void convert(Object value);
+
+  /**
+   * Does nothing, is used when column is not projected to avoid unnecessary
+   * column values conversions and writes.
+   */
+  class DummyColumnConverter implements ColumnConverter {
+
+    public static final DummyColumnConverter INSTANCE = new DummyColumnConverter();
+
+    @Override
+    public void convert(Object value) {
+      // do nothing
+    }
+  }
+
+  /**
+   * Converts and writes scalar values using provided {@link #valueConverter}.
+   * {@link #valueConverter} has different implementation depending
+   * on the scalar value type.
+   */
+  class ScalarColumnConverter implements ColumnConverter {
+
+    private final Consumer<Object> valueConverter;
+
+    public ScalarColumnConverter(Consumer<Object> valueConverter) {
+      this.valueConverter = valueConverter;
+    }
+
+    @Override
+    public void convert(Object value) {
+      if (value == null) {
+        return;
+      }
+
+      valueConverter.accept(value);
+    }
+  }
+
+  /**
+   * Converts and writes array values using {@link #valueConverter}
+   * into {@link #arrayWriter}.
+   */
+  class ArrayColumnConverter implements ColumnConverter {
+
+    private final ArrayWriter arrayWriter;
+    private final ColumnConverter valueConverter;
+
+    public ArrayColumnConverter(ArrayWriter arrayWriter, ColumnConverter valueConverter) {
+      this.arrayWriter = arrayWriter;
+      this.valueConverter = valueConverter;
+    }
+
+    @Override
+    public void convert(Object value) {
+      if (value == null || !arrayWriter.isProjected()) {
+        return;
+      }
+
+      Iterable<?> array = (Iterable<?>) value;
+      array.forEach(arrayValue -> {
+        valueConverter.convert(arrayValue);
+        arrayWriter.save();
+      });
+    }
+  }
+
+  /**
+   * Converts and writes all map children using provided {@link #converters}.
+   * If {@link #converters} are empty, generates their converters based on
+   * specified schema.
+   */
+  class MapColumnConverter implements ColumnConverter {
+
+    private final ColumnConverterFactory factory;
+    private final TupleMetadata providedSchema;
+    private final TupleWriter tupleWriter;
+    private final Map<String, ColumnConverter> converters;
+
+    public MapColumnConverter(ColumnConverterFactory factory,
+        TupleMetadata providedSchema,
+        TupleWriter tupleWriter, Map<String, ColumnConverter> converters) {
+      this.factory = factory;
+      this.providedSchema = providedSchema;
+      this.tupleWriter = tupleWriter;
+      this.converters = new HashMap<>(converters);
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public void convert(Object value) {
+      if (value == null) {
+        return;
+      }
+
+      Map<String, Object> record = (Map<String, Object>) value;
+
+      record.forEach(this::processValue);
+    }
+
+    private void processValue(String name, Object columnValue) {
+      ColumnConverter columnConverter = converters.computeIfAbsent(name,
+          columnName -> getColumnConverter(columnValue, columnName));
+      if (columnConverter != null) {
+        columnConverter.convert(columnValue);
+      }
+    }
+
+    private ColumnConverter getColumnConverter(Object columnValue, String columnName) {
+      if (columnValue != null) {
+        ColumnMetadata providedColumn = providedSchema != null
+            ? providedSchema.metadata(columnName)
+            : null;
+        ColumnMetadata column = buildColumnMetadata(columnName, columnValue);
+        if (column != null) {
+          tupleWriter.addColumn(providedColumn != null ? providedColumn : column);
+          return factory.getConverter(providedSchema, column, tupleWriter.column(columnName));
+        }
+      }
+      return null;
+    }
+
+    @SuppressWarnings("unchecked")
+    private ColumnMetadata buildColumnMetadata(String name, Object value) {
+      Class<?> clazz = value.getClass();
+      if (Map.class.isAssignableFrom(clazz)) {
+        return buildMapColumnMetadata(name, (Map<String, Object>) value);
+      } else if (List.class.isAssignableFrom(clazz)) {
+        List<?> list = (List<?>) value;
+        if (!list.isEmpty()) {
+          Object innerValue = list.iterator().next();
+          return buildListColumnMetadata(name, innerValue);
+        } else {
+          return null;
+        }
+      } else if (clazz.isArray()) {
+        Object[] array = (Object[]) value;
+        if (array.length > 0) {
+          return buildListColumnMetadata(name, array[0]);
+        } else {
+          return null;
+        }
+      }
+      return MetadataUtils.newScalar(name, getScalarMinorType(clazz), DataMode.OPTIONAL);
+    }
+
+    private MapColumnMetadata buildMapColumnMetadata(String name, Map<String, Object> map) {
+      TupleMetadata schema = new TupleSchema();
+      map.forEach((key, value) -> {
+        if (value != null) {
+          schema.addColumn(buildColumnMetadata(key, value));
+        }
+      });
+
+      return MetadataUtils.newMap(name, schema);
+    }
+
+    private ColumnMetadata buildListColumnMetadata(String name, Object innerValue) {
+      Class<?> clazz = innerValue.getClass();
+      Class<?> componentType = clazz.getComponentType();
+      if (List.class.isAssignableFrom(clazz) || componentType!= null && componentType.isArray()) {
+        return MetadataUtils.newRepeatedList(name, buildColumnMetadata(name, innerValue));
+      } else if (Map.class.isAssignableFrom(clazz)) {
+        return MetadataUtils.newMapArray(name);
+      } else {
+        return MetadataUtils.newScalar(name, getScalarMinorType(clazz), DataMode.REPEATED);
+      }
+    }
+
+    private MinorType getScalarMinorType(Class<?> clazz) {
+      if (clazz == byte.class || clazz == Byte.class) {
+        return MinorType.TINYINT;
+      } else if (clazz == short.class || clazz == Short.class) {
+        return MinorType.SMALLINT;
+      } else if (clazz == int.class || clazz == Integer.class) {
+        return MinorType.INT;
+      } else if (clazz == long.class || clazz == Long.class) {
+        return MinorType.BIGINT;
+      } else if (clazz == Date.class) {
+        return MinorType.DATE;
+      } else if (clazz == Time.class) {
+        return MinorType.TIME;
+      } else if (clazz == Timestamp.class) {
+        return MinorType.TIMESTAMP;
+      } else if (clazz == float.class || clazz == Float.class) {
+        return MinorType.FLOAT4;
+      } else if (clazz == double.class || clazz == Double.class) {
+        return MinorType.FLOAT8;
+      } else if (clazz == boolean.class || clazz == Boolean.class) {
+        return MinorType.BIT;
+      } else if (clazz == char.class || clazz == Character.class) {
+        return MinorType.VARCHAR;
+      } else if (clazz == String.class) {
+        return MinorType.VARCHAR;
+      } else if (clazz == byte[].class) {
+        return MinorType.VARBINARY;
+      } else if (clazz == BigDecimal.class) {
+        return MinorType.VARDECIMAL;
+      }
+      throw new IllegalArgumentException("Cannot determine minor type for " + clazz.getName());
+    }
+  }
+
+  /**
+   * Converts and writes dict values using provided key / value converters.
+   */
+  class DictColumnConverter implements ColumnConverter {
+
+    private final DictWriter dictWriter;
+    private final ColumnConverter keyConverter;
+    private final ColumnConverter valueConverter;
+
+    public DictColumnConverter(DictWriter dictWriter, ColumnConverter keyConverter, ColumnConverter valueConverter) {
+      this.dictWriter = dictWriter;
+      this.keyConverter = keyConverter;
+      this.valueConverter = valueConverter;
+    }
+
+    @Override
+    public void convert(Object value) {
+      if (value == null) {
+        return;
+      }
+
+      @SuppressWarnings("unchecked") Map<Object, Object> map = (Map<Object, Object>) value;
+      map.forEach((key, val) -> {
+        keyConverter.convert(key);
+        valueConverter.convert(val);
+        dictWriter.save();
+      });
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverterFactory.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverterFactory.java
new file mode 100644
index 0000000..b408476
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/ColumnConverterFactory.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.record;
+
+import org.apache.drill.exec.physical.impl.scan.convert.StandardConversions;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.record.ColumnConverter.ArrayColumnConverter;
+import org.apache.drill.exec.record.ColumnConverter.DictColumnConverter;
+import org.apache.drill.exec.record.ColumnConverter.DummyColumnConverter;
+import org.apache.drill.exec.record.ColumnConverter.MapColumnConverter;
+import org.apache.drill.exec.record.ColumnConverter.ScalarColumnConverter;
+import org.apache.drill.exec.vector.accessor.ArrayWriter;
+import org.apache.drill.exec.vector.accessor.DictWriter;
+import org.apache.drill.exec.vector.accessor.ObjectWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.drill.exec.vector.accessor.ValueWriter;
+import org.apache.drill.exec.vector.complex.DictVector;
+
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+public class ColumnConverterFactory {
+
+  private final StandardConversions standardConversions;
+
+  public ColumnConverterFactory(TupleMetadata providedSchema) {
+    if (providedSchema == null) {
+      standardConversions = null;
+    } else {
+      standardConversions = StandardConversions.builder().withSchema(providedSchema).build();
+    }
+  }
+
+  public ColumnConverter getRootConverter(TupleMetadata providedSchema,
+      TupleMetadata readerSchema, TupleWriter tupleWriter) {
+    return getMapConverter(providedSchema, readerSchema, tupleWriter);
+  }
+
+  /**
+   * Based on column type, creates corresponding column converter
+   * which holds conversion logic and appropriate writer to set converted data into.
+   * For columns which are not projected, {@link DummyColumnConverter} is used.
+   *
+   * @param readerSchema column metadata
+   * @param writer column writer
+   * @return column converter
+   */
+  public ColumnConverter getConverter(TupleMetadata providedSchema,
+      ColumnMetadata readerSchema, ObjectWriter writer) {
+    if (!writer.isProjected()) {
+      return DummyColumnConverter.INSTANCE;
+    }
+
+    if (readerSchema.isArray()) {
+      return getArrayConverter(providedSchema,
+          readerSchema, writer.array());
+    }
+
+    if (readerSchema.isMap()) {
+      return getMapConverter(
+          providedChildSchema(providedSchema, readerSchema),
+          readerSchema.tupleSchema(), writer.tuple());
+    }
+
+    if (readerSchema.isDict()) {
+      return getDictConverter(
+          providedChildSchema(providedSchema, readerSchema),
+          readerSchema.tupleSchema(), writer.dict());
+    }
+
+    return getScalarConverter(readerSchema, writer.scalar());
+  }
+
+  private TupleMetadata providedChildSchema(TupleMetadata providedSchema,
+      ColumnMetadata readerSchema) {
+    return providedSchema == null ? null :
+      providedSchema.metadata(readerSchema.name()).tupleSchema();
+  }
+
+  private ColumnConverter getArrayConverter(TupleMetadata providedSchema,
+      ColumnMetadata readerSchema, ArrayWriter arrayWriter) {
+    ObjectWriter valueWriter = arrayWriter.entry();
+    ColumnConverter valueConverter;
+    if (readerSchema.isMap()) {
+      valueConverter = getMapConverter(providedSchema,
+          readerSchema.tupleSchema(), valueWriter.tuple());
+    } else if (readerSchema.isDict()) {
+      valueConverter = getDictConverter(providedSchema,
+          readerSchema.tupleSchema(), valueWriter.dict());
+    } else if (readerSchema.isMultiList()) {
+      valueConverter = getConverter(null, readerSchema.childSchema(), valueWriter);
+    } else {
+      valueConverter = getScalarConverter(readerSchema, valueWriter.scalar());
+    }
+    return new ArrayColumnConverter(arrayWriter, valueConverter);
+  }
+
+  protected ColumnConverter getMapConverter(TupleMetadata providedSchema,
+      TupleMetadata readerSchema, TupleWriter tupleWriter) {
+    Map<String, ColumnConverter> converters = StreamSupport.stream(readerSchema.spliterator(), false)
+        .collect(Collectors.toMap(
+            ColumnMetadata::name,
+            columnMetadata ->
+                getConverter(providedSchema, columnMetadata, tupleWriter.column(columnMetadata.name()))));
+
+    return new MapColumnConverter(this, providedSchema, tupleWriter, converters);
+  }
+
+  private ColumnConverter getDictConverter(TupleMetadata providedSchema,
+      TupleMetadata readerSchema, DictWriter dictWriter) {
+    ColumnConverter keyConverter = getScalarConverter(
+        readerSchema.metadata(DictVector.FIELD_KEY_NAME), dictWriter.keyWriter());
+    ColumnConverter valueConverter = getConverter(providedSchema,
+        readerSchema.metadata(DictVector.FIELD_VALUE_NAME), dictWriter.valueWriter());
+    return new DictColumnConverter(dictWriter, keyConverter, valueConverter);
+  }
+
+  private ColumnConverter getScalarConverter(ColumnMetadata readerSchema, ScalarWriter scalarWriter) {
+    ValueWriter valueWriter;
+    if (standardConversions == null) {
+      valueWriter = scalarWriter;
+    } else {
+      valueWriter = standardConversions.converterFor(scalarWriter, readerSchema);
+    }
+    return buildScalar(readerSchema, valueWriter);
+  }
+
+  public  ScalarColumnConverter buildScalar(ColumnMetadata readerSchema, ValueWriter writer) {
+    return new ScalarColumnConverter(writer::setValue);
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 3eb643c..eda7d9d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -291,7 +291,7 @@
       new OptionDefinition(ExecConstants.CPU_LOAD_AVERAGE),
       new OptionDefinition(ExecConstants.ENABLE_VECTOR_VALIDATOR),
       new OptionDefinition(ExecConstants.ENABLE_ITERATOR_VALIDATOR),
-      new OptionDefinition(ExecConstants.OUTPUT_BATCH_SIZE_VALIDATOR, new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM, true, false)),
+      new OptionDefinition(ExecConstants.OUTPUT_BATCH_SIZE_VALIDATOR, new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM_AND_SESSION, true, false)),
       new OptionDefinition(ExecConstants.STATS_LOGGING_BATCH_SIZE_VALIDATOR, new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM_AND_SESSION, true, true)),
       new OptionDefinition(ExecConstants.STATS_LOGGING_BATCH_FG_SIZE_VALIDATOR,new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM_AND_SESSION, true, true)),
       new OptionDefinition(ExecConstants.STATS_LOGGING_BATCH_OPERATOR_VALIDATOR,new OptionMetaData(OptionValue.AccessibleScopes.SYSTEM_AND_SESSION, true, true)),
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/CoreOperatorType.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/CoreOperatorType.java
new file mode 100644
index 0000000..254817c
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/CoreOperatorType.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.server.rest.profile;
+
+
+import java.util.Arrays;
+
+/**
+ * This class is used for backward compatibility when reading older query profiles that
+ * stored operator id instead of its name.
+ * <b>Please do not update this class. It will be removed for Drill 2.0</b>
+ */
+public enum CoreOperatorType {
+
+  /**
+   * <code>SINGLE_SENDER = 0;</code>
+   */
+  SINGLE_SENDER(0),
+  /**
+   * <code>BROADCAST_SENDER = 1;</code>
+   */
+  BROADCAST_SENDER(1),
+  /**
+   * <code>FILTER = 2;</code>
+   */
+  FILTER(2),
+  /**
+   * <code>HASH_AGGREGATE = 3;</code>
+   */
+  HASH_AGGREGATE(3),
+  /**
+   * <code>HASH_JOIN = 4;</code>
+   */
+  HASH_JOIN(4),
+  /**
+   * <code>MERGE_JOIN = 5;</code>
+   */
+  MERGE_JOIN(5),
+  /**
+   * <code>HASH_PARTITION_SENDER = 6;</code>
+   */
+  HASH_PARTITION_SENDER(6),
+  /**
+   * <code>LIMIT = 7;</code>
+   */
+  LIMIT(7),
+  /**
+   * <code>MERGING_RECEIVER = 8;</code>
+   */
+  MERGING_RECEIVER(8),
+  /**
+   * <code>ORDERED_PARTITION_SENDER = 9;</code>
+   */
+  ORDERED_PARTITION_SENDER(9),
+  /**
+   * <code>PROJECT = 10;</code>
+   */
+  PROJECT(10),
+  /**
+   * <code>UNORDERED_RECEIVER = 11;</code>
+   */
+  UNORDERED_RECEIVER(11),
+  /**
+   * <code>RANGE_PARTITION_SENDER = 12;</code>
+   */
+  RANGE_PARTITION_SENDER(12),
+  /**
+   * <code>SCREEN = 13;</code>
+   */
+  SCREEN(13),
+  /**
+   * <code>SELECTION_VECTOR_REMOVER = 14;</code>
+   */
+  SELECTION_VECTOR_REMOVER(14),
+  /**
+   * <code>STREAMING_AGGREGATE = 15;</code>
+   */
+  STREAMING_AGGREGATE(15),
+  /**
+   * <code>TOP_N_SORT = 16;</code>
+   */
+  TOP_N_SORT(16),
+  /**
+   * <code>EXTERNAL_SORT = 17;</code>
+   */
+  EXTERNAL_SORT(17),
+  /**
+   * <code>TRACE = 18;</code>
+   */
+  TRACE(18),
+  /**
+   * <code>UNION = 19;</code>
+   */
+  UNION(19),
+  /**
+   * <code>OLD_SORT = 20;</code>
+   */
+  OLD_SORT(20),
+  /**
+   * <code>PARQUET_ROW_GROUP_SCAN = 21;</code>
+   */
+  PARQUET_ROW_GROUP_SCAN(21),
+  /**
+   * <code>HIVE_SUB_SCAN = 22;</code>
+   */
+  HIVE_SUB_SCAN(22),
+  /**
+   * <code>SYSTEM_TABLE_SCAN = 23;</code>
+   */
+  SYSTEM_TABLE_SCAN(23),
+  /**
+   * <code>MOCK_SUB_SCAN = 24;</code>
+   */
+  MOCK_SUB_SCAN(24),
+  /**
+   * <code>PARQUET_WRITER = 25;</code>
+   */
+  PARQUET_WRITER(25),
+  /**
+   * <code>DIRECT_SUB_SCAN = 26;</code>
+   */
+  DIRECT_SUB_SCAN(26),
+  /**
+   * <code>TEXT_WRITER = 27;</code>
+   */
+  TEXT_WRITER(27),
+  /**
+   * <code>TEXT_SUB_SCAN = 28;</code>
+   */
+  TEXT_SUB_SCAN(28),
+  /**
+   * <code>JSON_SUB_SCAN = 29;</code>
+   */
+  JSON_SUB_SCAN(29),
+  /**
+   * <code>INFO_SCHEMA_SUB_SCAN = 30;</code>
+   */
+  INFO_SCHEMA_SUB_SCAN(30),
+  /**
+   * <code>COMPLEX_TO_JSON = 31;</code>
+   */
+  COMPLEX_TO_JSON(31),
+  /**
+   * <code>PRODUCER_CONSUMER = 32;</code>
+   */
+  PRODUCER_CONSUMER(32),
+  /**
+   * <code>HBASE_SUB_SCAN = 33;</code>
+   */
+  HBASE_SUB_SCAN(33),
+  /**
+   * <code>WINDOW = 34;</code>
+   */
+  WINDOW(34),
+  /**
+   * <code>NESTED_LOOP_JOIN = 35;</code>
+   */
+  NESTED_LOOP_JOIN(35),
+  /**
+   * <code>AVRO_SUB_SCAN = 36;</code>
+   */
+  AVRO_SUB_SCAN(36),
+  /**
+   * <code>PCAP_SUB_SCAN = 37;</code>
+   */
+  PCAP_SUB_SCAN(37),
+  /**
+   * <code>KAFKA_SUB_SCAN = 38;</code>
+   */
+  KAFKA_SUB_SCAN(38),
+  /**
+   * <code>KUDU_SUB_SCAN = 39;</code>
+   */
+  KUDU_SUB_SCAN(39),
+  /**
+   * <code>FLATTEN = 40;</code>
+   */
+  FLATTEN(40),
+  /**
+   * <code>LATERAL_JOIN = 41;</code>
+   */
+  LATERAL_JOIN(41),
+  /**
+   * <code>UNNEST = 42;</code>
+   */
+  UNNEST(42),
+  /**
+   * <code>HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN = 43;</code>
+   */
+  HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN(43),
+  /**
+   * <code>JDBC_SCAN = 44;</code>
+   */
+  JDBC_SCAN(44),
+  /**
+   * <code>REGEX_SUB_SCAN = 45;</code>
+   */
+  REGEX_SUB_SCAN(45),
+  /**
+   * <code>MAPRDB_SUB_SCAN = 46;</code>
+   */
+  MAPRDB_SUB_SCAN(46),
+  /**
+   * <code>MONGO_SUB_SCAN = 47;</code>
+   */
+  MONGO_SUB_SCAN(47),
+  /**
+   * <code>KUDU_WRITER = 48;</code>
+   */
+  KUDU_WRITER(48),
+  /**
+   * <code>OPEN_TSDB_SUB_SCAN = 49;</code>
+   */
+  OPEN_TSDB_SUB_SCAN(49),
+  /**
+   * <code>JSON_WRITER = 50;</code>
+   */
+  JSON_WRITER(50),
+  /**
+   * <code>HTPPD_LOG_SUB_SCAN = 51;</code>
+   */
+  HTPPD_LOG_SUB_SCAN(51),
+  /**
+   * <code>IMAGE_SUB_SCAN = 52;</code>
+   */
+  IMAGE_SUB_SCAN(52),
+  /**
+   * <code>SEQUENCE_SUB_SCAN = 53;</code>
+   */
+  SEQUENCE_SUB_SCAN(53),
+  /**
+   * <code>PARTITION_LIMIT = 54;</code>
+   */
+  PARTITION_LIMIT(54),
+  /**
+   * <code>PCAPNG_SUB_SCAN = 55;</code>
+   */
+  PCAPNG_SUB_SCAN(55),
+  /**
+   * <code>RUNTIME_FILTER = 56;</code>
+   */
+  RUNTIME_FILTER(56),
+  /**
+   * <code>ROWKEY_JOIN = 57;</code>
+   */
+  ROWKEY_JOIN(57),
+  /**
+   * <code>SYSLOG_SUB_SCAN = 58;</code>
+   */
+  SYSLOG_SUB_SCAN(58),
+  /**
+   * <code>STATISTICS_AGGREGATE = 59;</code>
+   */
+  STATISTICS_AGGREGATE(59),
+  /**
+   * <code>UNPIVOT_MAPS = 60;</code>
+   */
+  UNPIVOT_MAPS(60),
+  /**
+   * <code>STATISTICS_MERGE = 61;</code>
+   */
+  STATISTICS_MERGE(61),
+  /**
+   * <code>LTSV_SUB_SCAN = 62;</code>
+   */
+  LTSV_SUB_SCAN(62),
+  /**
+   * <code>HDF5_SUB_SCAN = 63;</code>
+   */
+  HDF5_SUB_SCAN(63),
+  /**
+   * <code>EXCEL_SUB_SCAN = 64;</code>
+   */
+  EXCEL_SUB_SCAN(64),
+  /**
+   * <code>SHP_SUB_SCAN = 65;</code>
+   */
+  SHP_SUB_SCAN(65),
+  /**
+   * <code>METADATA_HANDLER = 66;</code>
+   */
+  METADATA_HANDLER(66),
+  /**
+   * <code>METADATA_CONTROLLER = 67;</code>
+   */
+  METADATA_CONTROLLER(67),
+  /**
+   * <code>DRUID_SUB_SCAN = 68;</code>
+   */
+  DRUID_SUB_SCAN(68),
+  /**
+   * <code>SPSS_SUB_SCAN = 69;</code>
+   */
+  SPSS_SUB_SCAN(69),
+  /**
+   * <code>HTTP_SUB_SCAN = 70;</code>
+   */
+  HTTP_SUB_SCAN(70),
+  /**
+   * <code>XML_SUB_SCAN = 71;</code>
+   */
+  XML_SUB_SCAN(71);
+
+  private final int value;
+
+  CoreOperatorType(int value) {
+    this.value = value;
+  }
+
+  public int getId() {
+    return value;
+  }
+
+  public static CoreOperatorType valueOf(int id) {
+    if (id >= 0 && id <= XML_SUB_SCAN.getId()) {
+      return values()[id];
+    }
+    return null;
+  }
+
+  public static CoreOperatorType forName(String name) {
+    return Arrays.stream(values())
+        .filter(value -> value.name().equalsIgnoreCase(name))
+        .findFirst()
+        .orElse(null);
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
index 2e593b6..25fad85 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/OperatorWrapper.java
@@ -23,6 +23,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.TimeUnit;
@@ -31,7 +32,9 @@
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.OperatorMetricRegistry;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
+import org.apache.drill.exec.physical.config.ExternalSort;
+import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.config.HashJoinPOP;
 import org.apache.drill.exec.proto.UserBitShared.MetricValue;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
 import org.apache.drill.exec.proto.UserBitShared.StreamProfile;
@@ -51,7 +54,7 @@
   private final int major;
   private final List<ImmutablePair<ImmutablePair<OperatorProfile, Integer>, String>> opsAndHosts; // [(operatorProfile --> minorFragment number,host), ...]
   private final OperatorProfile firstProfile;
-  private final CoreOperatorType operatorType;
+  private final String operatorType;
   private final String operatorName;
   private final int size;
   private final int timeSkewMin;
@@ -70,12 +73,19 @@
     Preconditions.checkArgument(opsAndHostsList.size() > 0);
     this.major = major;
     firstProfile = opsAndHostsList.get(0).getLeft().getLeft();
-    operatorType = CoreOperatorType.valueOf(firstProfile.getOperatorType());
+    if (firstProfile.hasOperatorTypeName()) {
+      operatorType = firstProfile.getOperatorTypeName();
+    } else {
+      CoreOperatorType operatorType = CoreOperatorType.valueOf(firstProfile.getOperatorType());
+      this.operatorType = operatorType != null
+          ? Objects.requireNonNull(operatorType).name()
+          : null;
+    }
     //Update Name from Physical Map
     String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build();
     //Use Plan Extracted Operator Names if available
     String extractedOpName = phyOperMap.get(path);
-    String inferredOpName = operatorType == null ? UNKNOWN_OPERATOR : operatorType.toString();
+    String inferredOpName = operatorType == null ? UNKNOWN_OPERATOR : operatorType;
     //Revert to inferred names for exceptional cases
     // 1. Extracted 'FLATTEN' operator is NULL
     // 2. Extracted 'SCAN' could be a PARQUET_ROW_GROUP_SCAN, or KAFKA_SUB_SCAN, or etc.
@@ -225,7 +235,7 @@
     tb.appendNanos(avgProcTime);
     long maxProcTime = longProcess.getLeft().getProcessNanos();
     //Calculating skew of longest processing fragment w.r.t. average
-    double maxSkew = (avgProcTime > 0) ? maxProcTime/Double.valueOf(avgProcTime) : 0.0d;
+    double maxSkew = (avgProcTime > 0) ? maxProcTime/ (double) avgProcTime : 0.0d;
     //Marking skew if both thresholds are crossed
     if (avgProcTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > timeSkewRatio ) {
       timeSkewMap = new HashMap<>();
@@ -256,7 +266,7 @@
     //Skewed Wait Warning
     timeSkewMap = null; //Resetting
     //Calculating skew of longest waiting fragment w.r.t. average
-    maxSkew = (avgWaitTime > 0) ? maxWaitTime/Double.valueOf(avgWaitTime) : 0.0d;
+    maxSkew = (avgWaitTime > 0) ? maxWaitTime/ (double) avgWaitTime : 0.0d;
     //Marking skew if both thresholds are crossed
     if (avgWaitTime > TimeUnit.SECONDS.toNanos(timeSkewMin) && maxSkew > waitSkewRatio) {
       timeSkewMap = new HashMap<>();
@@ -303,7 +313,7 @@
    * @param operatorType
    * @return index of spill metric
    */
-  private int getSpillCycleMetricIndex(CoreOperatorType operatorType) {
+  private int getSpillCycleMetricIndex(String operatorType) {
     // TODO: DRILL-6642, replace null values for ProtocolMessageEnum with UNRECOGNIZED NullValue to avoid null checks
     if (operatorType == null) {
       return NO_SPILL_METRIC_INDEX;
@@ -311,11 +321,11 @@
     String metricName;
 
     switch (operatorType) {
-    case EXTERNAL_SORT:
+    case ExternalSort.OPERATOR_TYPE:
       metricName = "SPILL_COUNT";
       break;
-    case HASH_AGGREGATE:
-    case HASH_JOIN:
+    case HashAggregate.OPERATOR_TYPE:
+    case HashJoinPOP.OPERATOR_TYPE:
       metricName = "SPILL_CYCLE";
       break;
     default:
@@ -323,7 +333,7 @@
     }
 
     int metricIndex = 0; //Default
-    String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType.getNumber());
+    String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType);
     for (String name : metricNames) {
       if (name.equalsIgnoreCase(metricName)) {
         return metricIndex;
@@ -338,7 +348,7 @@
     if (operatorType == null) {
       return "";
     }
-    final String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType.getNumber());
+    final String[] metricNames = OperatorMetricRegistry.getMetricNames(operatorType);
     if (metricNames == null) {
       return "";
     }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
index e5c4133..b0838da 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileWrapper.java
@@ -31,7 +31,6 @@
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.proto.UserBitShared.MajorFragmentProfile;
 import org.apache.drill.exec.proto.UserBitShared.MinorFragmentProfile;
 import org.apache.drill.exec.proto.UserBitShared.OperatorProfile;
@@ -68,7 +67,7 @@
   private Map<String, String> physicalOperatorMap;
   private final String noProgressWarningThreshold;
   private final int defaultAutoLimit;
-  private boolean showEstimatedRows;
+  private final boolean showEstimatedRows;
   private final String csrfToken;
 
   public ProfileWrapper(final QueryProfile profile, DrillConfig drillConfig, HttpServletRequest request) {
@@ -324,16 +323,6 @@
     return tb.build();
   }
 
-  public String getOperatorsJSON() {
-    final StringBuilder sb = new StringBuilder("{");
-    String sep = "";
-    for (final CoreOperatorType op : CoreOperatorType.values()) {
-      sb.append(String.format("%s\"%d\" : \"%s\"", sep, op.ordinal(), op));
-      sep = ", ";
-    }
-    return sb.append("}").toString();
-  }
-
   public Map<String, String> getOptions() {
     return getOptions(o -> true);
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
index 6ca2732..390a32c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistryImpl.java
@@ -420,9 +420,13 @@
       PluginHandle entry = restoreFromEphemeral(name, config);
       try {
         entry.plugin();
+      } catch (UserException e) {
+        // Provide helpful error messages.
+        throw new PluginException(e.getOriginalMessage(), e);
       } catch (Exception e) {
         throw new PluginException(String.format(
-            "Invalid plugin config for '%s'", name), e);
+            "Invalid plugin config for '%s', "
+          + "Please switch to Logs panel from the UI then check the log.", name), e);
       }
       oldEntry = pluginCache.put(entry);
     } else {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/SubsetRemover.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SubsetRemover.java
new file mode 100644
index 0000000..69206d8
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/SubsetRemover.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store;
+
+import org.apache.calcite.plan.volcano.RelSubset;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelShuttle;
+import org.apache.calcite.rel.RelShuttleImpl;
+
+/**
+ * Removes {@link RelSubset} nodes from the plan.
+ * It may be useful when we want to do some operations on the plan
+ * which aren't supported by {@link RelSubset} during the planning phase.
+ */
+public class SubsetRemover extends RelShuttleImpl {
+  public static RelShuttle INSTANCE = new SubsetRemover();
+
+  @Override
+  public RelNode visit(RelNode other) {
+    if (other instanceof RelSubset) {
+      return ((RelSubset) other).getBest().accept(this);
+    } else {
+      return super.visit(other);
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroBatchReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroBatchReader.java
index 51a77d6..ec47678 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroBatchReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroBatchReader.java
@@ -28,6 +28,7 @@
 import org.apache.drill.exec.physical.impl.scan.v3.FixedReceiver;
 import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
 import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.ColumnConverter;
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.exec.util.ImpersonationUtil;
 import org.apache.hadoop.fs.FileSystem;
@@ -39,8 +40,6 @@
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.util.List;
-import java.util.stream.IntStream;
 
 public class AvroBatchReader implements ManagedReader<FileScanFramework.FileSchemaNegotiator> {
   private static final Logger logger = LoggerFactory.getLogger(AvroBatchReader.class);
@@ -49,10 +48,10 @@
   private long endPosition;
   private DataFileReader<GenericRecord> reader;
   private ResultSetLoader loader;
-  private List<ColumnConverter> converters;
+  private ColumnConverter converter;
   // re-use container instance
   private GenericRecord record;
-  private int maxRecords;
+  private final int maxRecords;
 
   public AvroBatchReader(int maxRecords) {
     this.maxRecords = maxRecords;
@@ -81,8 +80,8 @@
     logger.debug("Avro file table schema: {}", tableSchema);
     negotiator.tableSchema(tableSchema, true);
     loader = negotiator.build();
-    ColumnConverterFactory factory = new ColumnConverterFactory(providedSchema);
-    converters = factory.initConverters(providedSchema, readerSchema, loader.writer());
+    AvroColumnConverterFactory factory = new AvroColumnConverterFactory(providedSchema);
+    converter = factory.getRootConverter(providedSchema, readerSchema, loader.writer());
 
     return true;
   }
@@ -181,8 +180,7 @@
     }
 
     rowWriter.start();
-    IntStream.range(0, rowWriter.size())
-      .forEach(i -> converters.get(i).convert(record.get(i)));
+    converter.convert(record);
     rowWriter.save();
 
     return true;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroColumnConverterFactory.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroColumnConverterFactory.java
new file mode 100644
index 0000000..cfd7d50
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroColumnConverterFactory.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.avro;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.IntBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.apache.avro.generic.GenericFixed;
+import org.apache.avro.generic.GenericRecord;
+import org.apache.avro.util.Utf8;
+import org.apache.drill.exec.physical.impl.scan.v3.FixedReceiver;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.ColumnConverter;
+import org.apache.drill.exec.record.ColumnConverterFactory;
+import org.apache.drill.exec.record.metadata.ColumnMetadata;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.vector.accessor.TupleWriter;
+import org.apache.drill.exec.vector.accessor.ValueWriter;
+import org.apache.drill.shaded.guava.com.google.common.base.Charsets;
+import org.joda.time.DateTimeConstants;
+import org.joda.time.Period;
+
+public class AvroColumnConverterFactory extends ColumnConverterFactory {
+  public AvroColumnConverterFactory(TupleMetadata providedSchema) {
+    super(providedSchema);
+  }
+
+  /**
+   * Based on given converted Avro schema and current row writer generates list of
+   * column converters based on column type.
+   *
+   * @param readerSchema converted Avro schema
+   * @param rowWriter current row writer
+   * @return list of column converters
+   */
+  public List<ColumnConverter> initConverters(TupleMetadata providedSchema,
+      TupleMetadata readerSchema, RowSetLoader rowWriter) {
+    return IntStream.range(0, readerSchema.size())
+        .mapToObj(i -> getConverter(providedSchema, readerSchema.metadata(i), rowWriter.column(i)))
+        .collect(Collectors.toList());
+  }
+
+  @Override
+  public ColumnConverter.ScalarColumnConverter buildScalar(ColumnMetadata readerSchema, ValueWriter writer) {
+    switch (readerSchema.type()) {
+      case VARCHAR:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          byte[] binary;
+          int length;
+          if (value instanceof Utf8) {
+            Utf8 utf8 = (Utf8) value;
+            binary = utf8.getBytes();
+            length = utf8.getByteLength();
+          } else {
+            binary = value.toString().getBytes(Charsets.UTF_8);
+            length = binary.length;
+          }
+          writer.setBytes(binary, length);
+        });
+      case VARBINARY:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          if (value instanceof ByteBuffer) {
+            ByteBuffer buf = (ByteBuffer) value;
+            writer.setBytes(buf.array(), buf.remaining());
+          } else {
+            byte[] bytes = ((GenericFixed) value).bytes();
+            writer.setBytes(bytes, bytes.length);
+          }
+        });
+      case VARDECIMAL:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          BigInteger bigInteger;
+          if (value instanceof ByteBuffer) {
+            ByteBuffer decBuf = (ByteBuffer) value;
+            bigInteger = new BigInteger(decBuf.array());
+          } else {
+            GenericFixed genericFixed = (GenericFixed) value;
+            bigInteger = new BigInteger(genericFixed.bytes());
+          }
+          BigDecimal decimalValue = new BigDecimal(bigInteger, readerSchema.scale());
+          writer.setDecimal(decimalValue);
+        });
+      case TIMESTAMP:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          String avroLogicalType = readerSchema.property(AvroSchemaUtil.AVRO_LOGICAL_TYPE_PROPERTY);
+          if (AvroSchemaUtil.TIMESTAMP_MILLIS_LOGICAL_TYPE.equals(avroLogicalType)) {
+            writer.setLong((long) value);
+          } else {
+            writer.setLong((long) value / 1000);
+          }
+        });
+      case DATE:
+        return new ColumnConverter.ScalarColumnConverter(value -> writer.setLong((int) value * (long) DateTimeConstants.MILLIS_PER_DAY));
+      case TIME:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          if (value instanceof Long) {
+            writer.setInt((int) ((long) value / 1000));
+          } else {
+            writer.setInt((int) value);
+          }
+        });
+      case INTERVAL:
+        return new ColumnConverter.ScalarColumnConverter(value -> {
+          GenericFixed genericFixed = (GenericFixed) value;
+          IntBuffer intBuf = ByteBuffer.wrap(genericFixed.bytes())
+              .order(ByteOrder.LITTLE_ENDIAN)
+              .asIntBuffer();
+
+          Period period = Period.months(intBuf.get(0))
+              .withDays(intBuf.get(1)
+              ).withMillis(intBuf.get(2));
+
+          writer.setPeriod(period);
+        });
+      case FLOAT4:
+        return new ColumnConverter.ScalarColumnConverter(value -> writer.setDouble((Float) value));
+      case BIT:
+        return new ColumnConverter.ScalarColumnConverter(value -> writer.setBoolean((Boolean) value));
+      default:
+        return super.buildScalar(readerSchema, writer);
+    }
+  }
+
+  /**
+   * Based on provided schema, given converted Avro schema and current row writer
+   * generates list of column converters based on column type for {@link MapColumnConverter} and returns it.
+   *
+   * @param providedSchema provided schema
+   * @param readerSchema   converted Avro schema
+   * @param tupleWriter    current row writer
+   * @return {@link MapColumnConverter} with column converters
+   */
+  @Override
+  protected ColumnConverter getMapConverter(TupleMetadata providedSchema,
+      TupleMetadata readerSchema, TupleWriter tupleWriter) {
+    List<ColumnConverter> converters = IntStream.range(0, readerSchema.size())
+        .mapToObj(i -> getConverter(providedSchema, readerSchema.metadata(i), tupleWriter.column(i)))
+        .collect(Collectors.toList());
+
+    return new MapColumnConverter(this, providedSchema, tupleWriter, converters);
+  }
+
+  public void buildMapMembers(GenericRecord genericRecord, TupleMetadata providedSchema,
+      TupleWriter tupleWriter, List<ColumnConverter> converters) {
+    // fill in tuple schema for cases when it contains recursive named record types
+    TupleMetadata readerSchema = AvroSchemaUtil.convert(genericRecord.getSchema());
+    TupleMetadata tableSchema = FixedReceiver.Builder.mergeSchemas(providedSchema, readerSchema);
+    tableSchema.toMetadataList().forEach(tupleWriter::addColumn);
+
+    IntStream.range(0, tableSchema.size())
+        .mapToObj(i -> getConverter(providedSchema,
+            readerSchema.metadata(i), tupleWriter.column(i)))
+        .forEach(converters::add);
+  }
+
+  /**
+   * Converts and writes all map children using provided {@link #converters}.
+   * If {@link #converters} are empty, generates their converters based on
+   * {@link GenericRecord} schema.
+   */
+  public static class MapColumnConverter implements ColumnConverter {
+
+    private final AvroColumnConverterFactory factory;
+    private final TupleMetadata providedSchema;
+    private final TupleWriter tupleWriter;
+    private final List<ColumnConverter> converters;
+
+    public MapColumnConverter(AvroColumnConverterFactory factory,
+        TupleMetadata providedSchema,
+        TupleWriter tupleWriter, List<ColumnConverter> converters) {
+      this.factory = factory;
+      this.providedSchema = providedSchema;
+      this.tupleWriter = tupleWriter;
+      this.converters = new ArrayList<>(converters);
+    }
+
+    @Override
+    public void convert(Object value) {
+      if (value == null) {
+        return;
+      }
+
+      GenericRecord genericRecord = (GenericRecord) value;
+
+      if (converters.isEmpty()) {
+        factory.buildMapMembers(genericRecord, providedSchema, tupleWriter, converters);
+      }
+
+      IntStream.range(0, converters.size())
+          .forEach(i -> converters.get(i).convert(genericRecord.get(i)));
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroFormatPlugin.java
index 854c6be..5051cb4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/AvroFormatPlugin.java
@@ -22,7 +22,6 @@
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework;
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
@@ -45,19 +44,18 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, AvroFormatConfig formatConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = true;
-    config.compressible = false;
-    config.supportsProjectPushdown = true;
-    config.extensions = formatConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = DEFAULT_NAME;
-    config.readerOperatorType = CoreOperatorType.AVRO_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(true)
+        .compressible(false)
+        .supportsProjectPushdown(true)
+        .extensions(formatConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(DEFAULT_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverter.java
deleted file mode 100644
index ee856de..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverter.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.avro;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Consumer;
-import java.util.stream.IntStream;
-
-import org.apache.avro.generic.GenericArray;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.drill.exec.record.metadata.TupleMetadata;
-import org.apache.drill.exec.vector.accessor.ArrayWriter;
-import org.apache.drill.exec.vector.accessor.DictWriter;
-import org.apache.drill.exec.vector.accessor.TupleWriter;
-
-/**
- * Converts and sets given value into the specific column writer.
- */
-public interface ColumnConverter {
-
-  void convert(Object value);
-
-  /**
-   * Does nothing, is used when column is not projected to avoid unnecessary
-   * column values conversions and writes.
-   */
-  class DummyColumnConverter implements ColumnConverter {
-
-    public static final DummyColumnConverter INSTANCE = new DummyColumnConverter();
-
-    @Override
-    public void convert(Object value) {
-      // do nothing
-    }
-  }
-
-  /**
-   * Converts and writes scalar values using provided {@link #valueConverter}.
-   * {@link #valueConverter} has different implementation depending
-   * on the scalar value type.
-   */
-  class ScalarColumnConverter implements ColumnConverter {
-
-    private final Consumer<Object> valueConverter;
-
-    public ScalarColumnConverter(Consumer<Object> valueConverter) {
-      this.valueConverter = valueConverter;
-    }
-
-    @Override
-    public void convert(Object value) {
-      if (value == null) {
-        return;
-      }
-
-      valueConverter.accept(value);
-    }
-  }
-
-  /**
-   * Converts and writes array values using {@link #valueConverter}
-   * into {@link #arrayWriter}.
-   */
-  class ArrayColumnConverter implements ColumnConverter {
-
-    private final ArrayWriter arrayWriter;
-    private final ColumnConverter valueConverter;
-
-    public ArrayColumnConverter(ArrayWriter arrayWriter, ColumnConverter valueConverter) {
-      this.arrayWriter = arrayWriter;
-      this.valueConverter = valueConverter;
-    }
-
-    @Override
-    public void convert(Object value) {
-      if (value == null || !arrayWriter.isProjected()) {
-        return;
-      }
-
-      GenericArray<?> array = (GenericArray<?>) value;
-      array.forEach(arrayValue -> {
-        valueConverter.convert(arrayValue);
-        arrayWriter.save();
-      });
-    }
-  }
-
-  /**
-   * Converts and writes all map children using provided {@link #converters}.
-   * If {@link #converters} are empty, generates their converters based on
-   * {@link GenericRecord} schema.
-   */
-  class MapColumnConverter implements ColumnConverter {
-
-    private final ColumnConverterFactory factory;
-    private final TupleMetadata providedSchema;
-    private final TupleWriter tupleWriter;
-    private final List<ColumnConverter> converters;
-
-    public MapColumnConverter(ColumnConverterFactory factory,
-        TupleMetadata providedSchema,
-        TupleWriter tupleWriter, List<ColumnConverter> converters) {
-      this.factory = factory;
-      this.providedSchema = providedSchema;
-      this.tupleWriter = tupleWriter;
-      this.converters = new ArrayList<>(converters);
-    }
-
-    @Override
-    public void convert(Object value) {
-      if (value == null) {
-        return;
-      }
-
-      GenericRecord genericRecord = (GenericRecord) value;
-
-      if (converters.isEmpty()) {
-        factory.buildMapMembers(genericRecord, providedSchema, tupleWriter, converters);
-      }
-
-      IntStream.range(0, converters.size())
-        .forEach(i -> converters.get(i).convert(genericRecord.get(i)));
-    }
-  }
-
-  /**
-   * Converts and writes dict values using provided key / value converters.
-   */
-  class DictColumnConverter implements ColumnConverter {
-
-    private final DictWriter dictWriter;
-    private final ColumnConverter keyConverter;
-    private final ColumnConverter valueConverter;
-
-    public DictColumnConverter(DictWriter dictWriter, ColumnConverter keyConverter, ColumnConverter valueConverter) {
-      this.dictWriter = dictWriter;
-      this.keyConverter = keyConverter;
-      this.valueConverter = valueConverter;
-    }
-
-    @Override
-    public void convert(Object value) {
-      if (value == null) {
-        return;
-      }
-
-      @SuppressWarnings("unchecked") Map<Object, Object> map = (Map<Object, Object>) value;
-      map.forEach((key, val) -> {
-        keyConverter.convert(key);
-        valueConverter.convert(val);
-        dictWriter.save();
-      });
-    }
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverterFactory.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverterFactory.java
deleted file mode 100644
index 527f21d..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/avro/ColumnConverterFactory.java
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.avro;
-
-import org.apache.avro.generic.GenericFixed;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.avro.util.Utf8;
-import org.apache.drill.exec.physical.impl.scan.convert.StandardConversions;
-import org.apache.drill.exec.physical.impl.scan.v3.FixedReceiver;
-import org.apache.drill.exec.physical.resultSet.RowSetLoader;
-import org.apache.drill.exec.record.metadata.ColumnMetadata;
-import org.apache.drill.exec.record.metadata.TupleMetadata;
-import org.apache.drill.exec.store.avro.ColumnConverter.ArrayColumnConverter;
-import org.apache.drill.exec.store.avro.ColumnConverter.DictColumnConverter;
-import org.apache.drill.exec.store.avro.ColumnConverter.DummyColumnConverter;
-import org.apache.drill.exec.store.avro.ColumnConverter.MapColumnConverter;
-import org.apache.drill.exec.store.avro.ColumnConverter.ScalarColumnConverter;
-import org.apache.drill.exec.vector.accessor.ArrayWriter;
-import org.apache.drill.exec.vector.accessor.DictWriter;
-import org.apache.drill.exec.vector.accessor.ObjectWriter;
-import org.apache.drill.exec.vector.accessor.ScalarWriter;
-import org.apache.drill.exec.vector.accessor.TupleWriter;
-import org.apache.drill.exec.vector.accessor.ValueWriter;
-import org.apache.drill.exec.vector.complex.DictVector;
-import org.apache.drill.shaded.guava.com.google.common.base.Charsets;
-import org.joda.time.DateTimeConstants;
-import org.joda.time.Period;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.IntBuffer;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-public class ColumnConverterFactory {
-
-  private final StandardConversions standardConversions;
-
-  public ColumnConverterFactory(TupleMetadata providedSchema) {
-    if (providedSchema == null) {
-      standardConversions = null;
-    } else {
-      standardConversions = StandardConversions.builder().withSchema(providedSchema).build();
-    }
-  }
-
-  /**
-   * Based on given converted Avro schema and current row writer generates list of
-   * column converters based on column type.
-   *
-   * @param readerSchema converted Avro schema
-   * @param rowWriter current row writer
-   * @return list of column converters
-   */
-  public List<ColumnConverter> initConverters(TupleMetadata providedSchema,
-      TupleMetadata readerSchema, RowSetLoader rowWriter) {
-    return IntStream.range(0, readerSchema.size())
-      .mapToObj(i -> getConverter(providedSchema, readerSchema.metadata(i), rowWriter.column(i)))
-      .collect(Collectors.toList());
-  }
-
-  /**
-   * Based on column type, creates corresponding column converter
-   * which holds conversion logic and appropriate writer to set converted data into.
-   * For columns which are not projected, {@link DummyColumnConverter} is used.
-   *
-   * @param readerSchema column metadata
-   * @param writer column writer
-   * @return column converter
-   */
-  public ColumnConverter getConverter(TupleMetadata providedSchema,
-      ColumnMetadata readerSchema, ObjectWriter writer) {
-    if (!writer.isProjected()) {
-      return DummyColumnConverter.INSTANCE;
-    }
-
-    if (readerSchema.isArray()) {
-      return getArrayConverter(providedSchema,
-          readerSchema, writer.array());
-    }
-
-    if (readerSchema.isMap()) {
-      return getMapConverter(
-          providedChildSchema(providedSchema, readerSchema),
-          readerSchema.tupleSchema(), writer.tuple());
-    }
-
-    if (readerSchema.isDict()) {
-      return getDictConverter(
-          providedChildSchema(providedSchema, readerSchema),
-          readerSchema.tupleSchema(), writer.dict());
-    }
-
-    return getScalarConverter(readerSchema, writer.scalar());
-  }
-
-  private TupleMetadata providedChildSchema(TupleMetadata providedSchema,
-      ColumnMetadata readerSchema) {
-    return providedSchema == null ? null :
-      providedSchema.metadata(readerSchema.name()).tupleSchema();
-  }
-
-  private ColumnConverter getArrayConverter(TupleMetadata providedSchema,
-      ColumnMetadata readerSchema, ArrayWriter arrayWriter) {
-    ObjectWriter valueWriter = arrayWriter.entry();
-    ColumnConverter valueConverter;
-    if (readerSchema.isMap()) {
-      valueConverter = getMapConverter(providedSchema,
-          readerSchema.tupleSchema(), valueWriter.tuple());
-    } else if (readerSchema.isDict()) {
-      valueConverter = getDictConverter(providedSchema,
-          readerSchema.tupleSchema(), valueWriter.dict());
-    } else if (readerSchema.isMultiList()) {
-      valueConverter = getConverter(null, readerSchema.childSchema(), valueWriter);
-    } else {
-      valueConverter = getScalarConverter(readerSchema, valueWriter.scalar());
-    }
-    return new ArrayColumnConverter(arrayWriter, valueConverter);
-  }
-
-  private ColumnConverter getMapConverter(TupleMetadata providedSchema,
-      TupleMetadata readerSchema, TupleWriter tupleWriter) {
-    List<ColumnConverter> converters = IntStream.range(0, readerSchema.size())
-      .mapToObj(i -> getConverter(providedSchema, readerSchema.metadata(i), tupleWriter.column(i)))
-      .collect(Collectors.toList());
-    return new MapColumnConverter(this, providedSchema, tupleWriter, converters);
-  }
-
-  private ColumnConverter getDictConverter(TupleMetadata providedSchema,
-      TupleMetadata readerSchema, DictWriter dictWriter) {
-    ColumnConverter keyConverter = getScalarConverter(
-        readerSchema.metadata(DictVector.FIELD_KEY_NAME), dictWriter.keyWriter());
-    ColumnConverter valueConverter = getConverter(providedSchema,
-        readerSchema.metadata(DictVector.FIELD_VALUE_NAME), dictWriter.valueWriter());
-    return new DictColumnConverter(dictWriter, keyConverter, valueConverter);
-  }
-
-  private ColumnConverter getScalarConverter(ColumnMetadata readerSchema, ScalarWriter scalarWriter) {
-    ValueWriter valueWriter;
-    if (standardConversions == null) {
-      valueWriter = scalarWriter;
-    } else {
-      valueWriter = standardConversions.converterFor(scalarWriter, readerSchema);
-    }
-    return buildScalar(readerSchema, valueWriter);
-  }
-
-  public  ScalarColumnConverter buildScalar(ColumnMetadata readerSchema, ValueWriter writer) {
-    switch (readerSchema.type()) {
-      case VARCHAR:
-        return new ScalarColumnConverter(value -> {
-          byte[] binary;
-          int length;
-          if (value instanceof Utf8) {
-            Utf8 utf8 = (Utf8) value;
-            binary = utf8.getBytes();
-            length = utf8.getByteLength();
-          } else {
-            binary = value.toString().getBytes(Charsets.UTF_8);
-            length = binary.length;
-          }
-          writer.setBytes(binary, length);
-        });
-      case VARBINARY:
-        return new ScalarColumnConverter(value -> {
-          if (value instanceof ByteBuffer) {
-            ByteBuffer buf = (ByteBuffer) value;
-            writer.setBytes(buf.array(), buf.remaining());
-          } else {
-            byte[] bytes = ((GenericFixed) value).bytes();
-            writer.setBytes(bytes, bytes.length);
-          }
-        });
-      case VARDECIMAL:
-        return new ScalarColumnConverter(value -> {
-          BigInteger bigInteger;
-          if (value instanceof ByteBuffer) {
-            ByteBuffer decBuf = (ByteBuffer) value;
-            bigInteger = new BigInteger(decBuf.array());
-          } else {
-            GenericFixed genericFixed = (GenericFixed) value;
-            bigInteger = new BigInteger(genericFixed.bytes());
-          }
-          BigDecimal decimalValue = new BigDecimal(bigInteger, readerSchema.scale());
-          writer.setDecimal(decimalValue);
-        });
-      case TIMESTAMP:
-        return new ScalarColumnConverter(value -> {
-          String avroLogicalType = readerSchema.property(AvroSchemaUtil.AVRO_LOGICAL_TYPE_PROPERTY);
-          if (AvroSchemaUtil.TIMESTAMP_MILLIS_LOGICAL_TYPE.equals(avroLogicalType)) {
-            writer.setLong((long) value);
-          } else {
-            writer.setLong((long) value / 1000);
-          }
-        });
-      case DATE:
-        return new ScalarColumnConverter(value -> writer.setLong((int) value * (long) DateTimeConstants.MILLIS_PER_DAY));
-      case TIME:
-        return new ScalarColumnConverter(value -> {
-          if (value instanceof Long) {
-            writer.setInt((int) ((long) value / 1000));
-          } else {
-            writer.setInt((int) value);
-          }
-        });
-      case INTERVAL:
-        return new ScalarColumnConverter(value -> {
-          GenericFixed genericFixed = (GenericFixed) value;
-          IntBuffer intBuf = ByteBuffer.wrap(genericFixed.bytes())
-            .order(ByteOrder.LITTLE_ENDIAN)
-            .asIntBuffer();
-
-          Period period = Period.months(intBuf.get(0))
-            .withDays(intBuf.get(1)
-            ).withMillis(intBuf.get(2));
-
-          writer.setPeriod(period);
-        });
-      case FLOAT4:
-        return new ScalarColumnConverter(value -> writer.setDouble((Float) value));
-      case BIT:
-        return new ScalarColumnConverter(value -> writer.setBoolean((Boolean) value));
-      default:
-        return new ScalarColumnConverter(writer::setValue);
-    }
-  }
-
-  public void buildMapMembers(GenericRecord genericRecord, TupleMetadata providedSchema,
-      TupleWriter tupleWriter, List<ColumnConverter> converters) {
-    // fill in tuple schema for cases when it contains recursive named record types
-    TupleMetadata readerSchema = AvroSchemaUtil.convert(genericRecord.getSchema());
-    TupleMetadata tableSchema = FixedReceiver.Builder.mergeSchemas(providedSchema, readerSchema);
-    tableSchema.toMetadataList().forEach(tupleWriter::addColumn);
-
-    IntStream.range(0, tableSchema.size())
-      .mapToObj(i -> getConverter(providedSchema,
-          readerSchema.metadata(i), tupleWriter.column(i)))
-      .forEach(converters::add);
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/bson/BsonRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/bson/BsonRecordReader.java
index 64c6200..44852c1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/bson/BsonRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/bson/BsonRecordReader.java
@@ -17,11 +17,7 @@
  */
 package org.apache.drill.exec.store.bson;
 
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
+import io.netty.buffer.DrillBuf;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.expression.PathSegment;
 import org.apache.drill.common.expression.SchemaPath;
@@ -36,14 +32,17 @@
 import org.apache.drill.exec.vector.complex.writer.BaseWriter;
 import org.apache.drill.exec.vector.complex.writer.BaseWriter.ComplexWriter;
 import org.apache.drill.exec.vector.complex.writer.TimeStampWriter;
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 import org.bson.BsonBinary;
 import org.bson.BsonReader;
 import org.bson.BsonType;
 import org.joda.time.DateTime;
 
-import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
-
-import io.netty.buffer.DrillBuf;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.util.List;
 
 public class BsonRecordReader {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BsonRecordReader.class);
@@ -178,6 +177,11 @@
         writeTimeStamp(time, writer, fieldName, isList);
         atLeastOneWrite = true;
         break;
+      case DECIMAL128:
+         BigDecimal readBigDecimalAsDecimal128 = reader.readDecimal128().bigDecimalValue();
+         writeDecimal128(readBigDecimalAsDecimal128, writer, fieldName, isList);
+         atLeastOneWrite = true;
+         break;
       default:
         // Didn't handled REGULAR_EXPRESSION and DB_POINTER types
         throw new DrillRuntimeException("UnSupported Bson type: " + currentBsonType);
@@ -351,7 +355,16 @@
     }
   }
 
-  public void ensureAtLeastOneField(ComplexWriter writer) {
+    private void writeDecimal128(BigDecimal readBigDecimal, final MapOrListWriterImpl writer, String fieldName, boolean isList) {
+        if (isList) {
+            writer.list.varDecimal().writeVarDecimal(readBigDecimal);
+        } else {
+            writer.varDecimal(fieldName, readBigDecimal.precision(), readBigDecimal.scale()).writeVarDecimal(readBigDecimal);
+        }
+    }
+
+
+    public void ensureAtLeastOneField(ComplexWriter writer) {
     if (!atLeastOneWrite) {
       // if we had no columns, create one empty one so we can return some data
       // for count purposes.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/DrillFileSystem.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/DrillFileSystem.java
index 9380e41..4f72699 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/DrillFileSystem.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/DrillFileSystem.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.store.dfs;
 
+import java.io.ByteArrayInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -26,6 +27,8 @@
 import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.drill.common.AutoCloseables;
 import org.apache.drill.exec.ops.OperatorStats;
 import org.apache.drill.exec.util.AssertionUtil;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
@@ -783,6 +786,14 @@
     underlyingFs.removeXAttr(path, name);
   }
 
+  /**
+   * Returns an InputStream from a Hadoop path. If the data is compressed, this method will return a compressed
+   * InputStream depending on the codec.  Note that if the results of this method are sent to a third party parser
+   * that works with bytes or individual characters directly, you should use the openDecompressedInputStream method.
+   * @param path Input file path
+   * @return InputStream of opened file path
+   * @throws IOException If the file is unreachable, unavailable or otherwise unreadable
+   */
   public InputStream openPossiblyCompressedStream(Path path) throws IOException {
     CompressionCodec codec = codecFactory.getCodec(path); // infers from file ext.
     if (codec != null) {
@@ -791,6 +802,50 @@
       return open(path);
     }
   }
+
+  /**
+   * Returns a normal, decompressed InputStream. Some parsers, particularly those
+   * that read raw bytes, generate errors when passed Hadoop ZipCompressed InputStreams.
+   * This utility function wraps some of these functions so that a format plugin can be guaranteed
+   * readable bytes.
+   * @param path The file being read
+   * @return Decompressed InputStream of the input file
+   * @throws IOException If the file is unreadable or uses an unknown compression codec
+   */
+  public InputStream openDecompressedInputStream(Path path) throws IOException {
+    CompressionCodec codec = getCodec(path);
+    if (codec == null) {
+      return open(path);
+    } else {
+      InputStream compressedStream = codec.createInputStream(open(path));
+      byte[] bytes = IOUtils.toByteArray(compressedStream);
+      AutoCloseables.closeSilently(compressedStream);
+      return new ByteArrayInputStream(bytes);
+    }
+  }
+
+  /**
+   * There are parsers which require an uncompressed input stream to read the data
+   * properly. This method helps identify whether the file being read is in fact compressed.
+   * @param path The file being read
+   * @return True if the file is compressed, false if not.
+   */
+  public boolean isCompressed(Path path) {
+    CompressionCodec codec = codecFactory.getCodec(path);
+    return codec != null;
+  }
+
+  /**
+   * Returns the {@link org.apache.hadoop.io.compress.CompressionCodec} for a given file.  This
+   * can be used to determine the type of compression (if any) which was used.  Returns null if the
+   * file is not compressed.
+   * @param path The file of unknown compression
+   * @return CompressionCodec used by the file. Null if the file is not compressed.
+   */
+  public CompressionCodec getCodec(Path path) {
+    return codecFactory.getCodec(path);
+  }
+
   @Override
   public void fileOpened(Path path, DrillFSDataInputStream fsDataInputStream) {
     openedFiles.put(fsDataInputStream, new DebugStackTrace(path, Thread.currentThread().getStackTrace()));
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyFormatPlugin.java
index af931f9..5c290d3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyFormatPlugin.java
@@ -18,9 +18,12 @@
 package org.apache.drill.exec.store.dfs.easy;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 
 import org.apache.drill.common.exceptions.ExecutionSetupException;
@@ -90,34 +93,253 @@
    * vary across uses of the plugin.
    */
   public static class EasyFormatConfig {
-    public BasicFormatMatcher matcher;
-    public boolean readable = true;
-    public boolean writable;
-    public boolean blockSplittable;
-    public boolean compressible;
-    public Configuration fsConf;
-    public List<String> extensions;
-    public String defaultName;
+    private BasicFormatMatcher matcher;
+    private final boolean readable;
+    private final boolean writable;
+    private final boolean blockSplittable;
+    private final boolean compressible;
+    private final Configuration fsConf;
+    private final List<String> extensions;
+    private final String defaultName;
 
     // Config options that, prior to Drill 1.15, required the plugin to
     // override methods. Moving forward, plugins should be migrated to
     // use this simpler form. New plugins should use these options
     // instead of overriding methods.
 
-    public boolean supportsLimitPushdown;
-    public boolean supportsProjectPushdown;
-    public boolean supportsFileImplicitColumns = true;
-    public boolean supportsAutoPartitioning;
-    public boolean supportsStatistics;
-    public int readerOperatorType = -1;
-    public int writerOperatorType = -1;
+    private final boolean supportsLimitPushdown;
+    private final boolean supportsProjectPushdown;
+    private final boolean supportsFileImplicitColumns;
+    private final boolean supportsAutoPartitioning;
+    private final boolean supportsStatistics;
+    private final String readerOperatorType;
+    private final String writerOperatorType;
 
     /**
      *  Choose whether to use the "traditional" or "enhanced" reader
      *  structure. Can also be selected at runtime by overriding
-     *  {@link #useEnhancedScan(OptionManager)}.
+     *  {@link #useEnhancedScan()}.
      */
-    public boolean useEnhancedScan;
+    private final boolean useEnhancedScan;
+
+    public EasyFormatConfig(EasyFormatConfigBuilder builder) {
+      this.matcher = builder.matcher;
+      this.readable = builder.readable;
+      this.writable = builder.writable;
+      this.blockSplittable = builder.blockSplittable;
+      this.compressible = builder.compressible;
+      this.fsConf = builder.fsConf;
+      this.extensions = builder.extensions;
+      this.defaultName = builder.defaultName;
+      this.supportsLimitPushdown = builder.supportsLimitPushdown;
+      this.supportsProjectPushdown = builder.supportsProjectPushdown;
+      this.supportsFileImplicitColumns = builder.supportsFileImplicitColumns;
+      this.supportsAutoPartitioning = builder.supportsAutoPartitioning;
+      this.supportsStatistics = builder.supportsStatistics;
+      this.readerOperatorType = builder.readerOperatorType;
+      this.writerOperatorType = builder.writerOperatorType;
+      this.useEnhancedScan = builder.useEnhancedScan;
+    }
+
+    public BasicFormatMatcher getMatcher() {
+      return matcher;
+    }
+
+    public boolean isReadable() {
+      return readable;
+    }
+
+    public boolean isWritable() {
+      return writable;
+    }
+
+    public boolean isBlockSplittable() {
+      return blockSplittable;
+    }
+
+    public boolean isCompressible() {
+      return compressible;
+    }
+
+    public Configuration getFsConf() {
+      return fsConf;
+    }
+
+    public List<String> getExtensions() {
+      return extensions;
+    }
+
+    public String getDefaultName() {
+      return defaultName;
+    }
+
+    public boolean supportsLimitPushdown() {
+      return supportsLimitPushdown;
+    }
+
+    public boolean supportsProjectPushdown() {
+      return supportsProjectPushdown;
+    }
+
+    public boolean supportsFileImplicitColumns() {
+      return supportsFileImplicitColumns;
+    }
+
+    public boolean supportsAutoPartitioning() {
+      return supportsAutoPartitioning;
+    }
+
+    public boolean supportsStatistics() {
+      return supportsStatistics;
+    }
+
+    public String getReaderOperatorType() {
+      return readerOperatorType;
+    }
+
+    public String getWriterOperatorType() {
+      return writerOperatorType;
+    }
+
+    public boolean useEnhancedScan() {
+      return useEnhancedScan;
+    }
+
+    public static EasyFormatConfigBuilder builder() {
+      return new EasyFormatConfigBuilder();
+    }
+
+    public EasyFormatConfigBuilder toBuilder() {
+      return builder()
+          .matcher(matcher)
+          .readable(readable)
+          .writable(writable)
+          .blockSplittable(blockSplittable)
+          .compressible(compressible)
+          .fsConf(fsConf)
+          .extensions(extensions)
+          .defaultName(defaultName)
+          .supportsLimitPushdown(supportsLimitPushdown)
+          .supportsProjectPushdown(supportsProjectPushdown)
+          .supportsFileImplicitColumns(supportsFileImplicitColumns)
+          .supportsAutoPartitioning(supportsAutoPartitioning)
+          .supportsStatistics(supportsStatistics)
+          .readerOperatorType(readerOperatorType)
+          .writerOperatorType(writerOperatorType)
+          .useEnhancedScan(useEnhancedScan);
+    }
+  }
+
+  public static class EasyFormatConfigBuilder {
+    private BasicFormatMatcher matcher;
+    private boolean readable = true;
+    private boolean writable;
+    private boolean blockSplittable;
+    private boolean compressible;
+    private Configuration fsConf;
+    private List<String> extensions;
+    private String defaultName;
+    private boolean supportsLimitPushdown;
+    private boolean supportsProjectPushdown;
+    private boolean supportsFileImplicitColumns = true;
+    private boolean supportsAutoPartitioning;
+    private boolean supportsStatistics;
+    private String readerOperatorType;
+    private String writerOperatorType = "";
+    private boolean useEnhancedScan;
+
+    public EasyFormatConfigBuilder matcher(BasicFormatMatcher matcher) {
+      this.matcher = matcher;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder readable(boolean readable) {
+      this.readable = readable;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder writable(boolean writable) {
+      this.writable = writable;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder blockSplittable(boolean blockSplittable) {
+      this.blockSplittable = blockSplittable;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder compressible(boolean compressible) {
+      this.compressible = compressible;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder fsConf(Configuration fsConf) {
+      this.fsConf = fsConf;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder extensions(List<String> extensions) {
+      this.extensions = extensions;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder extensions(String... extensions) {
+      this.extensions = Arrays.asList(extensions);
+      return this;
+    }
+
+    public EasyFormatConfigBuilder defaultName(String defaultName) {
+      this.defaultName = defaultName;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder supportsLimitPushdown(boolean supportsLimitPushdown) {
+      this.supportsLimitPushdown = supportsLimitPushdown;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder supportsProjectPushdown(boolean supportsProjectPushdown) {
+      this.supportsProjectPushdown = supportsProjectPushdown;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder supportsFileImplicitColumns(boolean supportsFileImplicitColumns) {
+      this.supportsFileImplicitColumns = supportsFileImplicitColumns;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder supportsAutoPartitioning(boolean supportsAutoPartitioning) {
+      this.supportsAutoPartitioning = supportsAutoPartitioning;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder supportsStatistics(boolean supportsStatistics) {
+      this.supportsStatistics = supportsStatistics;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder readerOperatorType(String readerOperatorType) {
+      this.readerOperatorType = readerOperatorType;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder writerOperatorType(String writerOperatorType) {
+      this.writerOperatorType = writerOperatorType;
+      return this;
+    }
+
+    public EasyFormatConfigBuilder useEnhancedScan(boolean useEnhancedScan) {
+      this.useEnhancedScan = useEnhancedScan;
+      return this;
+    }
+
+    public EasyFormatConfig build() {
+      Objects.requireNonNull(defaultName, "defaultName is not set");
+      readerOperatorType = readerOperatorType == null
+          ? defaultName.toUpperCase(Locale.ROOT) + "_SUB_SCAN"
+          : readerOperatorType;
+      return new EasyFormatConfig(this);
+    }
   }
 
   /**
@@ -163,14 +385,16 @@
       boolean blockSplittable,
       boolean compressible, List<String> extensions, String defaultName) {
     this.name = name == null ? defaultName : name;
-    easyConfig = new EasyFormatConfig();
-    easyConfig.matcher = new BasicFormatMatcher(this, fsConf, extensions, compressible);
-    easyConfig.readable = readable;
-    easyConfig.writable = writable;
+    easyConfig = EasyFormatConfig.builder()
+        .matcher(new BasicFormatMatcher(this, fsConf, extensions, compressible))
+        .readable(readable)
+        .writable(writable)
+        .blockSplittable(blockSplittable)
+        .compressible(compressible)
+        .fsConf(fsConf)
+        .defaultName(defaultName)
+        .build();
     this.context = context;
-    easyConfig.blockSplittable = blockSplittable;
-    easyConfig.compressible = compressible;
-    easyConfig.fsConf = fsConf;
     this.storageConfig = storageConfig;
     this.formatConfig = formatConfig;
   }
@@ -203,7 +427,7 @@
   }
 
   @Override
-  public Configuration getFsConf() { return easyConfig.fsConf; }
+  public Configuration getFsConf() { return easyConfig.getFsConf(); }
 
   @Override
   public DrillbitContext getContext() { return context; }
@@ -220,7 +444,7 @@
    * that are identified at the first row.  CSV for example.  If the user only wants 100 rows, it
    * does not make sense to read the entire file.
    */
-  public boolean supportsLimitPushdown() { return easyConfig.supportsLimitPushdown; }
+  public boolean supportsLimitPushdown() { return easyConfig.supportsLimitPushdown(); }
 
   /**
    * Does this plugin support projection push down? That is, can the reader
@@ -230,7 +454,7 @@
    * @return {@code true} if the plugin supports projection push-down,
    * {@code false} if Drill should do the task by adding a project operator
    */
-  public boolean supportsPushDown() { return easyConfig.supportsProjectPushdown; }
+  public boolean supportsPushDown() { return easyConfig.supportsProjectPushdown(); }
 
   /**
    * Whether this format plugin supports implicit file columns.
@@ -239,7 +463,7 @@
    * {@code false} otherwise
    */
   public boolean supportsFileImplicitColumns() {
-    return easyConfig.supportsFileImplicitColumns;
+    return easyConfig.supportsFileImplicitColumns();
   }
 
   /**
@@ -249,7 +473,7 @@
    *
    * @return {@code true} if splitable.
    */
-  public boolean isBlockSplittable() { return easyConfig.blockSplittable; }
+  public boolean isBlockSplittable() { return easyConfig.isBlockSplittable(); }
 
   /**
    * Indicates whether or not this format could also be in a compression
@@ -259,7 +483,7 @@
    *
    * @return {@code true} if it is compressible
    */
-  public boolean isCompressible() { return easyConfig.compressible; }
+  public boolean isCompressible() { return easyConfig.isCompressible(); }
 
   /**
    * Return a record reader for the specific file format, when using the original
@@ -279,7 +503,7 @@
 
   protected CloseableRecordBatch getReaderBatch(FragmentContext context,
       EasySubScan scan) throws ExecutionSetupException {
-    if (useEnhancedScan(context.getOptions())) {
+    if (useEnhancedScan()) {
       return buildScan(context, scan);
     } else {
       return buildScanBatch(context, scan);
@@ -295,8 +519,8 @@
    * @return true to use the enhanced scan framework, false for the
    * traditional scan-batch framework
    */
-  protected boolean useEnhancedScan(OptionManager options) {
-    return easyConfig.useEnhancedScan;
+  protected boolean useEnhancedScan() {
+    return easyConfig.useEnhancedScan();
   }
 
   /**
@@ -409,7 +633,7 @@
     // Additional error context to identify this plugin
     builder.errorContext(
         currentBuilder -> currentBuilder
-            .addContext("Format plugin", easyConfig.defaultName)
+            .addContext("Format plugin", easyConfig.getDefaultName())
             .addContext("Format plugin", EasyFormatPlugin.this.getClass().getSimpleName())
             .addContext("Plugin config name", getName()));
   }
@@ -498,27 +722,30 @@
   public StoragePluginConfig getStorageConfig() { return storageConfig; }
 
   @Override
-  public boolean supportsRead() { return easyConfig.readable; }
+  public boolean supportsRead() { return easyConfig.isReadable(); }
 
   @Override
-  public boolean supportsWrite() { return easyConfig.writable; }
+  public boolean supportsWrite() { return easyConfig.isWritable(); }
 
   @Override
-  public boolean supportsAutoPartitioning() { return easyConfig.supportsAutoPartitioning; }
+  public boolean supportsAutoPartitioning() { return easyConfig.supportsAutoPartitioning(); }
 
   @Override
-  public FormatMatcher getMatcher() { return easyConfig.matcher; }
+  public FormatMatcher getMatcher() { return easyConfig.getMatcher(); }
 
   @Override
   public Set<StoragePluginOptimizerRule> getOptimizerRules() {
     return ImmutableSet.of();
   }
 
-  public int getReaderOperatorType() { return easyConfig.readerOperatorType; }
-  public int getWriterOperatorType() { return easyConfig.writerOperatorType; }
+  public String getReaderOperatorType() {
+    return easyConfig.getReaderOperatorType();
+  }
+
+  public String getWriterOperatorType() { return easyConfig.getWriterOperatorType(); }
 
   @Override
-  public boolean supportsStatistics() { return easyConfig.supportsStatistics; }
+  public boolean supportsStatistics() { return easyConfig.supportsStatistics(); }
 
   @Override
   public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) throws IOException {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
index c968f6c..928ebac 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyGroupScan.java
@@ -21,6 +21,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.commons.collections.MapUtils;
 import org.apache.drill.common.PlanStringBuilder;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.expression.SchemaPath;
@@ -87,7 +88,6 @@
   private List<EndpointAffinity> endpointAffinities;
   private final Path selectionRoot;
   private final int maxRecords;
-  private final boolean supportsLimitPushdown;
 
   @JsonCreator
   public EasyGroupScan(
@@ -106,7 +106,6 @@
     this.columns = columns == null ? ALL_COLUMNS : columns;
     this.selectionRoot = selectionRoot;
     this.maxRecords = getMaxRecords();
-    this.supportsLimitPushdown = formatPlugin.easyConfig().supportsLimitPushdown;
     this.metadataProvider = defaultTableMetadataProviderBuilder(new FileSystemMetadataProviderManager())
         .withSelection(selection)
         .withSchema(schema)
@@ -142,7 +141,6 @@
     this.usedMetastore = metadataProviderManager.usesMetastore();
     initFromSelection(selection, formatPlugin);
     checkMetadataConsistency(selection, formatPlugin.getFsConf());
-    this.supportsLimitPushdown = formatPlugin.easyConfig().supportsLimitPushdown;
     this.maxRecords = getMaxRecords();
   }
 
@@ -180,7 +178,6 @@
     partitionDepth = that.partitionDepth;
     metadataProvider = that.metadataProvider;
     maxRecords = getMaxRecords();
-    supportsLimitPushdown = that.formatPlugin.easyConfig().supportsLimitPushdown;
   }
 
   @JsonIgnore
@@ -402,7 +399,7 @@
       EasyGroupScan newScan = new EasyGroupScan((EasyGroupScan) source);
       newScan.tableMetadata = tableMetadata;
       // updates common row count and nulls counts for every column
-      if (newScan.getTableMetadata() != null && files != null && newScan.getFilesMetadata().size() != files.size()) {
+      if (newScan.getTableMetadata() != null && MapUtils.isNotEmpty(files) && newScan.getFilesMetadata().size() != files.size()) {
         newScan.tableMetadata = TableMetadataUtils.updateRowCount(newScan.getTableMetadata(), files.values());
       }
       newScan.partitions = partitions;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasySubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasySubScan.java
index b9a6db2..dd31b90 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasySubScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasySubScan.java
@@ -110,5 +110,5 @@
   public int getMaxRecords() { return maxRecords; }
 
   @Override
-  public int getOperatorType() { return formatPlugin.getReaderOperatorType(); }
+  public String getOperatorType() { return formatPlugin.getReaderOperatorType(); }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyWriter.java
index 2dec8c8..7197678 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/easy/EasyWriter.java
@@ -31,12 +31,9 @@
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @JsonTypeName("fs-writer")
 public class EasyWriter extends AbstractWriter {
-  static final Logger logger = LoggerFactory.getLogger(EasyWriter.class);
 
   private final String location;
   private final List<String> partitionColumns;
@@ -98,7 +95,7 @@
   }
 
   @Override
-  public int getOperatorType() {
+  public String getOperatorType() {
     return formatPlugin.getWriterOperatorType();
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/direct/DirectSubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/direct/DirectSubScan.java
index 89b0ef6..e4fcff9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/direct/DirectSubScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/direct/DirectSubScan.java
@@ -21,7 +21,6 @@
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
 import org.apache.drill.exec.physical.base.AbstractSubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.RecordReader;
 
 import com.fasterxml.jackson.annotation.JsonTypeInfo;
@@ -32,7 +31,8 @@
 @JsonTypeName("direct-sub-scan")
 public class DirectSubScan extends AbstractSubScan {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DirectSubScan.class);
+  public static final String OPERATOR_TYPE = "DIRECT_SUB_SCAN";
+
   @JsonTypeInfo(use=NAME, include=WRAPPER_OBJECT)
   private final RecordReader reader;
 
@@ -43,14 +43,13 @@
   }
 
   @JsonProperty
-  //@JsonGetter("reader")
   public RecordReader getReader() {
     return reader;
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.DIRECT_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
index 7204686..34da747 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/json/JSONFormatPlugin.java
@@ -33,7 +33,6 @@
 import org.apache.drill.exec.planner.common.DrillStatsTable;
 import org.apache.drill.exec.planner.common.DrillStatsTable.TableStatistics;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.RecordWriter;
@@ -61,11 +60,14 @@
 import com.fasterxml.jackson.databind.ObjectMapper;
 
 public class JSONFormatPlugin extends EasyFormatPlugin<JSONFormatConfig> {
+
   private static final Logger logger = LoggerFactory.getLogger(JSONFormatPlugin.class);
   public static final String DEFAULT_NAME = "json";
 
   private static final boolean IS_COMPRESSIBLE = true;
 
+  public static final String OPERATOR_TYPE = "JSON_SUB_SCAN";
+
   public JSONFormatPlugin(String name, DrillbitContext context,
       Configuration fsConf, StoragePluginConfig storageConfig) {
     this(name, context, fsConf, storageConfig, new JSONFormatConfig(null));
@@ -215,13 +217,13 @@
   }
 
   @Override
-  public int getReaderOperatorType() {
-    return CoreOperatorType.JSON_SUB_SCAN_VALUE;
+  public String getReaderOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
-  public int getWriterOperatorType() {
-     return CoreOperatorType.JSON_WRITER_VALUE;
+  public String getWriterOperatorType() {
+     return "JSON_WRITER";
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileBatchReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileBatchReader.java
new file mode 100644
index 0000000..b5d80f4
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileBatchReader.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.easy.sequencefile;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.exec.util.ImpersonationUtil;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.shaded.guava.com.google.common.base.Stopwatch;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordReader;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SequenceFileBatchReader implements ManagedReader<FileSchemaNegotiator> {
+
+  private static final Logger logger = LoggerFactory.getLogger(SequenceFileBatchReader.class);
+
+  private final SequenceFileFormatConfig config;
+  private final EasySubScan scan;
+  private FileSplit split;
+  private String queryUserName;
+  private String opUserName;
+  public static final String KEY_SCHEMA = "binary_key";
+  public static final String VALUE_SCHEMA = "binary_value";
+  private final BytesWritable key = new BytesWritable();
+  private final BytesWritable value = new BytesWritable();
+  private final int maxRecords;
+  private RowSetLoader loader;
+  private ScalarWriter keyWriter;
+  private ScalarWriter valueWriter;
+  private RecordReader<BytesWritable, BytesWritable> reader;
+  private CustomErrorContext errorContext;
+  private Stopwatch watch;
+
+  public SequenceFileBatchReader(SequenceFileFormatConfig config, EasySubScan scan) {
+    this.config = config;
+    this.scan = scan;
+    this.maxRecords = scan.getMaxRecords();
+  }
+
+  private TupleMetadata defineMetadata() {
+    SchemaBuilder builder = new SchemaBuilder();
+    builder.addNullable(KEY_SCHEMA, MinorType.VARBINARY);
+    builder.addNullable(VALUE_SCHEMA, MinorType.VARBINARY);
+    return builder.buildSchema();
+  }
+
+  private void processReader(FileSchemaNegotiator negotiator) throws ExecutionSetupException {
+    final SequenceFileAsBinaryInputFormat inputFormat = new SequenceFileAsBinaryInputFormat();
+    split = negotiator.split();
+    // After defined the split, We should also define the errorContext.
+    errorContext = negotiator.parentErrorContext();
+    opUserName = scan.getUserName();
+    queryUserName = negotiator.context().getFragmentContext().getQueryUserName();
+    final JobConf jobConf = new JobConf(negotiator.fileSystem().getConf());
+    jobConf.setInputFormat(inputFormat.getClass());
+    reader = getRecordReader(inputFormat, jobConf);
+  }
+
+  private RecordReader<BytesWritable, BytesWritable> getRecordReader(
+    final InputFormat<BytesWritable, BytesWritable> inputFormat, final JobConf jobConf)
+    throws ExecutionSetupException {
+    try {
+      final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(opUserName, queryUserName);
+      return ugi.doAs(new PrivilegedExceptionAction<RecordReader<BytesWritable, BytesWritable>>() {
+        @Override
+        public RecordReader<BytesWritable, BytesWritable> run() throws Exception {
+          return inputFormat.getRecordReader(split, jobConf, Reporter.NULL);
+        }
+      });
+    } catch (IOException | InterruptedException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Error in creating sequencefile reader for file: %s, start: %d, length: %d. "
+               + e.getMessage(), split.getPath(), split.getStart(), split.getLength())
+              .addContext(errorContext)
+              .build(logger);
+    }
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    negotiator.tableSchema(defineMetadata(), true);
+    logger.debug("The config is {}, root is {}, columns has {}", config, scan.getSelectionRoot(), scan.getColumns());
+    // open Sequencefile
+    try {
+      processReader(negotiator);
+    } catch (ExecutionSetupException e) {
+      throw UserException
+        .dataReadError(e)
+        .message("Failure in initial sequencefile reader. " + e.getMessage())
+        .addContext(errorContext)
+        .build(logger);
+    }
+    ResultSetLoader setLoader = negotiator.build();
+    loader = setLoader.writer();
+    keyWriter = loader.scalar(KEY_SCHEMA);
+    valueWriter = loader.scalar(VALUE_SCHEMA);
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    int recordCount = 0;
+    if (watch == null) {
+      watch = Stopwatch.createStarted();
+    }
+    try {
+      while (!loader.isFull()) {
+        if (reader.next(key, value)) {
+          loader.start();
+          keyWriter.setBytes(key.getBytes(), key.getLength());
+          valueWriter.setBytes(value.getBytes(), value.getLength());
+          loader.save();
+          ++ recordCount;
+        } else {
+          logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
+          return false;
+        }
+        if (loader.limitReached(maxRecords)) {
+          return false;
+        }
+      }
+    } catch (IOException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("An error occurred while reading the next key/value pair from the sequencefile reader. "
+               + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    }
+    return true;
+  }
+
+  @Override
+  public void close() {
+    try {
+      // The reader not support AutoCloseable, must be closed by invoke close().
+      if (reader != null) {
+        reader.close();
+        reader = null;
+      }
+    } catch (IOException e) {
+      throw UserException
+              .dataReadError(e)
+              .message("Failed closing sequencefile reader. " + e.getMessage())
+              .addContext(errorContext)
+              .build(logger);
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatConfig.java
index 0572ca7..3e9b570 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatConfig.java
@@ -17,29 +17,31 @@
  */
 package org.apache.drill.exec.store.easy.sequencefile;
 
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.drill.common.PlanStringBuilder;
+import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
-import org.apache.drill.common.PlanStringBuilder;
-import org.apache.drill.common.logical.FormatPluginConfig;
 
-import java.util.List;
-import java.util.Objects;
-
-@JsonTypeName("sequencefile") @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+@JsonTypeName(SequenceFileFormatConfig.NAME)
+@JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class SequenceFileFormatConfig implements FormatPluginConfig {
 
+  public static final String NAME = "sequencefile";
   private final List<String> extensions;
 
   @JsonCreator
-  public SequenceFileFormatConfig(
-      @JsonProperty("extensions") List<String> extensions) {
-    this.extensions = extensions == null ?
-        ImmutableList.of() : ImmutableList.copyOf(extensions);
+  public SequenceFileFormatConfig(@JsonProperty("extensions") List<String> extensions) {
+    this.extensions = extensions == null ? ImmutableList.of() : ImmutableList.copyOf(extensions);
   }
 
+  @JsonProperty("extensions")
   public List<String> getExtensions() {
     return extensions;
   }
@@ -63,8 +65,7 @@
 
   @Override
   public String toString() {
-    return new PlanStringBuilder(this)
-        .field("extensions", extensions)
-        .toString();
+    return new PlanStringBuilder(this).field("extensions", extensions).toString();
   }
+
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatPlugin.java
index 9e55448..6da129e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileFormatPlugin.java
@@ -17,92 +17,78 @@
  */
 package org.apache.drill.exec.store.easy.sequencefile;
 
-import java.io.IOException;
-import java.util.List;
-
 import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.base.AbstractGroupScan;
-import org.apache.drill.exec.planner.common.DrillStatsTable.TableStatistics;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
 import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.RecordWriter;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.dfs.FileSelection;
+import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
-import org.apache.drill.exec.store.dfs.easy.EasyGroupScan;
-import org.apache.drill.exec.store.dfs.easy.EasyWriter;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.FileSplit;
 
 public class SequenceFileFormatPlugin extends EasyFormatPlugin<SequenceFileFormatConfig> {
-  public SequenceFileFormatPlugin(String name, DrillbitContext context, Configuration fsConf,
-                                  StoragePluginConfig storageConfig) {
-    this(name, context, fsConf, storageConfig, new SequenceFileFormatConfig(null));
+
+  public static final String OPERATOR_TYPE = "SEQUENCE_SUB_SCAN";
+
+  public SequenceFileFormatPlugin(String name,
+                                  DrillbitContext context,
+                                  Configuration fsConf,
+                                  StoragePluginConfig storageConfig,
+                                  SequenceFileFormatConfig formatConfig) {
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
   }
 
-  public SequenceFileFormatPlugin(String name, DrillbitContext context, Configuration fsConf,
-                                  StoragePluginConfig storageConfig, SequenceFileFormatConfig formatConfig) {
-    super(name, context, fsConf, storageConfig, formatConfig,
-      true, false, /* splittable = */ true, /* compressible = */ true,
-      formatConfig.getExtensions(), "sequencefile");
+  private static EasyFormatConfig easyConfig(Configuration fsConf, SequenceFileFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(true)
+        .compressible(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .readerOperatorType(OPERATOR_TYPE)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .supportsProjectPushdown(true)
+        .defaultName(SequenceFileFormatConfig.NAME)
+        .build();
+  }
+
+  private static class SequenceFileReaderFactory extends FileReaderFactory {
+
+    private final SequenceFileFormatConfig config;
+    private final EasySubScan scan;
+
+    public SequenceFileReaderFactory(SequenceFileFormatConfig config, EasySubScan scan) {
+      this.config = config;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileSchemaNegotiator> newReader() {
+      return new SequenceFileBatchReader(config, scan);
+    }
+
   }
 
   @Override
-  public boolean supportsPushDown() {
-    return true;
+  public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(EasySubScan scan, OptionManager options)
+      throws ExecutionSetupException {
+    return new SequenceFileBatchReader(formatConfig, scan);
   }
 
   @Override
-  public AbstractGroupScan getGroupScan(String userName, FileSelection selection, List<SchemaPath> columns)
-    throws IOException {
-    return new EasyGroupScan(userName, selection, this, columns, selection.selectionRoot, null);
-  }
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) throws ExecutionSetupException {
+    FileScanBuilder builder = new FileScanBuilder();
+    builder.setReaderFactory(new SequenceFileReaderFactory(formatConfig, scan));
 
-  @Override
-  public boolean supportsStatistics() {
-    return false;
-  }
-
-  @Override
-  public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) throws IOException {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public void writeStatistics(TableStatistics statistics, FileSystem fs, Path statsTablePath) throws IOException {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public RecordReader getRecordReader(FragmentContext context,
-                                      DrillFileSystem dfs,
-                                      FileWork fileWork,
-                                      List<SchemaPath> columns,
-                                      String userName) throws ExecutionSetupException {
-    final Path path = dfs.makeQualified(fileWork.getPath());
-    final FileSplit split = new FileSplit(path, fileWork.getStart(), fileWork.getLength(), new String[]{""});
-    return new SequenceFileRecordReader(split, dfs, context.getQueryUserName(), userName);
-  }
-
-  @Override
-  public int getReaderOperatorType() {
-    return CoreOperatorType.SEQUENCE_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer) throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public int getWriterOperatorType() {
-    throw new UnsupportedOperationException();
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(MinorType.VARCHAR));
+    return builder;
   }
 }
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileRecordReader.java
deleted file mode 100644
index 7f9b993..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/sequencefile/SequenceFileRecordReader.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.easy.sequencefile;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.concurrent.TimeUnit;
-import java.security.PrivilegedExceptionAction;
-
-import org.apache.drill.shaded.guava.com.google.common.base.Stopwatch;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.util.ImpersonationUtil;
-import org.apache.drill.exec.vector.NullableVarBinaryVector;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat;
-import org.apache.hadoop.security.UserGroupInformation;
-
-
-public class SequenceFileRecordReader extends AbstractRecordReader {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SequenceFileRecordReader.class);
-
-  private static final int PER_BATCH_RECORD_COUNT = 4096;
-  private static final int PER_BATCH_BYTES = 256*1024;
-
-  private static final MajorType KEY_TYPE = Types.optional(TypeProtos.MinorType.VARBINARY);
-  private static final MajorType VALUE_TYPE = Types.optional(TypeProtos.MinorType.VARBINARY);
-
-  private final String keySchema = "binary_key";
-  private final String valueSchema = "binary_value";
-
-  private NullableVarBinaryVector keyVector;
-  private NullableVarBinaryVector valueVector;
-  private final FileSplit split;
-  private org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> reader;
-  private final BytesWritable key = new BytesWritable();
-  private final BytesWritable value = new BytesWritable();
-  private final DrillFileSystem dfs;
-  private final String queryUserName;
-  private final String opUserName;
-
-  public SequenceFileRecordReader(final FileSplit split,
-                                  final DrillFileSystem dfs,
-                                  final String queryUserName,
-                                  final String opUserName) {
-    final List<SchemaPath> columns = new ArrayList<>();
-    columns.add(SchemaPath.getSimplePath(keySchema));
-    columns.add(SchemaPath.getSimplePath(valueSchema));
-    setColumns(columns);
-    this.dfs = dfs;
-    this.split = split;
-    this.queryUserName = queryUserName;
-    this.opUserName = opUserName;
-  }
-
-  @Override
-  protected boolean isSkipQuery() {
-    return false;
-  }
-
-  private org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> getRecordReader(
-    final InputFormat<BytesWritable, BytesWritable> inputFormat,
-    final JobConf jobConf) throws ExecutionSetupException {
-    try {
-      final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(this.opUserName, this.queryUserName);
-      return ugi.doAs(new PrivilegedExceptionAction<org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable>>() {
-        @Override
-        public org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> run() throws Exception {
-          return inputFormat.getRecordReader(split, jobConf, Reporter.NULL);
-        }
-      });
-    } catch (IOException | InterruptedException e) {
-      throw new ExecutionSetupException(
-        String.format("Error in creating sequencefile reader for file: %s, start: %d, length: %d",
-          split.getPath(), split.getStart(), split.getLength()), e);
-    }
-  }
-
-  @Override
-  public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
-    final SequenceFileAsBinaryInputFormat inputFormat = new SequenceFileAsBinaryInputFormat();
-    final JobConf jobConf = new JobConf(dfs.getConf());
-    jobConf.setInputFormat(inputFormat.getClass());
-    reader = getRecordReader(inputFormat, jobConf);
-    final MaterializedField keyField = MaterializedField.create(keySchema, KEY_TYPE);
-    final MaterializedField valueField = MaterializedField.create(valueSchema, VALUE_TYPE);
-    try {
-      keyVector = output.addField(keyField, NullableVarBinaryVector.class);
-      valueVector = output.addField(valueField, NullableVarBinaryVector.class);
-    } catch (SchemaChangeException sce) {
-      throw new ExecutionSetupException("Error in setting up sequencefile reader.", sce);
-    }
-  }
-
-  @Override
-  public int next() {
-    final Stopwatch watch = Stopwatch.createStarted();
-    if (keyVector != null) {
-      keyVector.clear();
-      keyVector.allocateNew();
-    }
-    if (valueVector != null) {
-      valueVector.clear();
-      valueVector.allocateNew();
-    }
-    int recordCount = 0;
-    int batchSize = 0;
-    try {
-      while (recordCount < PER_BATCH_RECORD_COUNT && batchSize < PER_BATCH_BYTES && reader.next(key, value)) {
-        keyVector.getMutator().setSafe(recordCount, key.getBytes(), 0, key.getLength());
-        valueVector.getMutator().setSafe(recordCount, value.getBytes(), 0, value.getLength());
-        batchSize += (key.getLength() + value.getLength());
-        ++recordCount;
-      }
-      keyVector.getMutator().setValueCount(recordCount);
-      valueVector.getMutator().setValueCount(recordCount);
-      logger.debug("Read {} records in {} ms", recordCount, watch.elapsed(TimeUnit.MILLISECONDS));
-      return recordCount;
-    } catch (IOException ioe) {
-      close();
-      throw UserException.dataReadError(ioe).addContext("File Path", split.getPath().toString()).build(logger);
-    }
-  }
-
-  @Override
-  public void close() {
-    try {
-      if (reader != null) {
-        reader.close();
-        reader = null;
-      }
-    } catch (IOException e) {
-      logger.warn("Exception closing reader: {}", e);
-    }
-  }
-
-  @Override
-  public String toString() {
-    long position = -1L;
-    try {
-      if (reader != null) {
-        position = reader.getPos();
-      }
-    } catch (IOException e) {
-      logger.trace("Unable to obtain reader position.", e);
-    }
-    return "SequenceFileRecordReader[File=" + split.getPath()
-        + ", Position=" + position
-        + "]";
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
index 3dd1e89..3cfba3a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/TextFormatPlugin.java
@@ -46,7 +46,6 @@
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.metadata.Propertied;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
@@ -83,6 +82,7 @@
  * as to support provided schema.)
  */
 public class TextFormatPlugin extends EasyFormatPlugin<TextFormatPlugin.TextFormatConfig> {
+
   private final static String PLUGIN_NAME = "text";
 
   public static final int MAXIMUM_NUMBER_COLUMNS = 64 * 1024;
@@ -109,6 +109,8 @@
   public static final String TRIM_WHITESPACE_PROP = TEXT_PREFIX + "trim";
   public static final String PARSE_UNESCAPED_QUOTES_PROP = TEXT_PREFIX + "parseQuotes";
 
+  public static final String WRITER_OPERATOR_TYPE = "TEXT_WRITER";
+
   @JsonTypeName(PLUGIN_NAME)
   @JsonInclude(Include.NON_DEFAULT)
   public static class TextFormatConfig implements FormatPluginConfig {
@@ -143,8 +145,8 @@
       this.quote = Strings.isNullOrEmpty(quote) ? '"' : quote.charAt(0);
       this.escape = Strings.isNullOrEmpty(escape) ? '"' : escape.charAt(0);
       this.comment = Strings.isNullOrEmpty(comment) ? '#' : comment.charAt(0);
-      this.skipFirstLine = skipFirstLine == null ? false : skipFirstLine;
-      this.extractHeader = extractHeader == null ? false : extractHeader;
+      this.skipFirstLine = skipFirstLine != null && skipFirstLine;
+      this.extractHeader = extractHeader != null && extractHeader;
     }
 
     public TextFormatConfig() {
@@ -231,20 +233,19 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, TextFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = true;
-    config.blockSplittable = true;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = pluginConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = PLUGIN_NAME;
-    config.readerOperatorType = CoreOperatorType.TEXT_SUB_SCAN_VALUE;
-    config.writerOperatorType = CoreOperatorType.TEXT_WRITER_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(true)
+        .blockSplittable(true)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(PLUGIN_NAME)
+        .writerOperatorType(WRITER_OPERATOR_TYPE)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/DrillDataContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/DrillDataContext.java
new file mode 100644
index 0000000..72237d2
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/DrillDataContext.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable;
+
+import org.apache.calcite.DataContext;
+import org.apache.calcite.adapter.java.JavaTypeFactory;
+import org.apache.calcite.linq4j.QueryProvider;
+import org.apache.calcite.schema.SchemaPlus;
+
+import java.util.Map;
+
+public class DrillDataContext implements DataContext {
+  private final SchemaPlus rootSchema;
+  private final JavaTypeFactory typeFactory;
+  private final Map<String, Object> properties;
+
+  public DrillDataContext(
+      SchemaPlus rootSchema, JavaTypeFactory typeFactory, Map<String, Object> properties) {
+    this.rootSchema = rootSchema;
+    this.typeFactory = typeFactory;
+    this.properties = properties;
+  }
+
+  @Override
+  public SchemaPlus getRootSchema() {
+    return rootSchema;
+  }
+
+  @Override
+  public JavaTypeFactory getTypeFactory() {
+    return typeFactory;
+  }
+
+  @Override
+  public QueryProvider getQueryProvider() {
+    return null;
+  }
+
+  @Override
+  public Object get(String name) {
+    return properties.get(name);
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableBatchCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableBatchCreator.java
new file mode 100644
index 0000000..9c4ca54
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableBatchCreator.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.ops.ExecutorFragmentContext;
+import org.apache.drill.exec.physical.impl.BatchCreator;
+import org.apache.drill.exec.physical.impl.scan.framework.BasicScanFactory;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework;
+import org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator;
+import org.apache.drill.exec.record.CloseableRecordBatch;
+import org.apache.drill.exec.record.RecordBatch;
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+
+import java.util.Collections;
+import java.util.List;
+
+@SuppressWarnings("unused")
+public class EnumerableBatchCreator implements BatchCreator<EnumerableSubScan> {
+
+  @Override
+  public CloseableRecordBatch getBatch(ExecutorFragmentContext context,
+      EnumerableSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException {
+    Preconditions.checkArgument(children.isEmpty());
+
+    try {
+      ManagedScanFramework.ScanFrameworkBuilder builder = createBuilder(subScan);
+      return builder.buildScanOperator(context, subScan);
+    } catch (UserException e) {
+      // Rethrow user exceptions directly
+      throw e;
+    } catch (Throwable e) {
+      // Wrap all others
+      throw new ExecutionSetupException(e);
+    }
+  }
+
+  private ManagedScanFramework.ScanFrameworkBuilder createBuilder(EnumerableSubScan subScan) {
+    ManagedScanFramework.ScanFrameworkBuilder builder = new ManagedScanFramework.ScanFrameworkBuilder();
+    builder.projection(subScan.getColumns());
+    builder.setUserName(subScan.getUserName());
+    builder.providedSchema(subScan.getSchema());
+
+    ManagedReader<SchemaNegotiator> reader = new EnumerableRecordReader(subScan.getColumns(),
+        subScan.getFieldsMap(), subScan.getCode(), subScan.getSchemaPath());
+    ManagedScanFramework.ReaderFactory readerFactory = new BasicScanFactory(Collections.singletonList(reader).iterator());
+    builder.setReaderFactory(readerFactory);
+    builder.nullType(Types.optional(TypeProtos.MinorType.VARCHAR));
+    return builder;
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableGroupScan.java
new file mode 100644
index 0000000..815a5c2
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableGroupScan.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.physical.base.AbstractGroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.ScanStats;
+import org.apache.drill.exec.physical.base.SubScan;
+import org.apache.drill.exec.proto.CoordinationProtos;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+
+import java.util.List;
+import java.util.Map;
+import java.util.StringJoiner;
+
+public class EnumerableGroupScan extends AbstractGroupScan {
+  private final String code;
+  private final String schemaPath;
+  private final Map<String, Integer> fieldsMap;
+  private final List<SchemaPath> columns;
+  private final double rows;
+  private final TupleMetadata schema;
+
+  @JsonCreator
+  public EnumerableGroupScan(
+      @JsonProperty("sql") String code,
+      @JsonProperty("columns") List<SchemaPath> columns,
+      @JsonProperty("fieldsMap") Map<String, Integer> fieldsMap,
+      @JsonProperty("rows") double rows,
+      @JsonProperty("schema") TupleMetadata schema,
+      @JsonProperty("schemaPath") String schemaPath) {
+    super("");
+    this.code = code;
+    this.columns = columns;
+    this.fieldsMap = fieldsMap;
+    this.rows = rows;
+    this.schema = schema;
+    this.schemaPath = schemaPath;
+  }
+
+  @Override
+  public void applyAssignments(List<CoordinationProtos.DrillbitEndpoint> endpoints) {
+  }
+
+  @Override
+  public SubScan getSpecificScan(int minorFragmentId) {
+    return new EnumerableSubScan(code, columns, fieldsMap, schema, schemaPath);
+  }
+
+  @Override
+  public int getMaxParallelizationWidth() {
+    return 1;
+  }
+
+  @Override
+  public ScanStats getScanStats() {
+    return new ScanStats(
+        ScanStats.GroupScanProperty.NO_EXACT_ROW_COUNT,
+        (long) Math.max(rows, 1),
+        1,
+        1);
+  }
+
+  public String getCode() {
+    return code;
+  }
+
+  @Override
+  public List<SchemaPath> getColumns() {
+    return columns;
+  }
+
+  public Map<String, Integer> getFieldsMap() {
+    return fieldsMap;
+  }
+
+  public double getRows() {
+    return rows;
+  }
+
+  public TupleMetadata getSchema() {
+    return schema;
+  }
+
+  public String getSchemaPath() {
+    return schemaPath;
+  }
+
+  @Override
+  public String getDigest() {
+    return toString();
+  }
+
+  @Override
+  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) {
+    Preconditions.checkArgument(children.isEmpty());
+    return new EnumerableGroupScan(code, columns, fieldsMap, rows, schema, schemaPath);
+  }
+
+  @Override
+  public String toString() {
+    return new StringJoiner(", ", EnumerableGroupScan.class.getSimpleName() + "[", "]")
+        .add("code='" + code + "'")
+        .add("columns=" + columns)
+        .add("fieldsMap=" + fieldsMap)
+        .add("rows=" + rows)
+        .toString();
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableRecordReader.java
new file mode 100644
index 0000000..66e8a24
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableRecordReader.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable;
+
+import org.apache.calcite.DataContext;
+import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.util.BuiltInMethod;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.compile.ClassBuilder;
+import org.apache.drill.exec.exception.ClassTransformationException;
+import org.apache.drill.exec.ops.OperatorContext;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.record.ColumnConverter;
+import org.apache.drill.exec.record.ColumnConverterFactory;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.record.metadata.TupleSchema;
+import org.codehaus.commons.compiler.CompileException;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.StreamSupport;
+
+/**
+ * {@link ManagedReader} implementation that compiles and executes specified code,
+ * calls the method on it for obtaining the values, and reads the results using column converters.
+ */
+public class EnumerableRecordReader implements ManagedReader<SchemaNegotiator> {
+
+  private static final String CLASS_NAME = "Baz";
+
+  private final List<SchemaPath> columns;
+
+  private final Map<String, Integer> fieldsMap;
+
+  private final String code;
+
+  private final String schemaPath;
+
+  private ColumnConverter converter;
+
+  private Iterator<Map<String, Object>> records;
+
+  private ResultSetLoader loader;
+
+  public EnumerableRecordReader(List<SchemaPath> columns, Map<String, Integer> fieldsMap, String code, String schemaPath) {
+    this.columns = columns;
+    this.fieldsMap = fieldsMap;
+    this.code = code;
+    this.schemaPath = schemaPath;
+  }
+
+  @SuppressWarnings("unchecked")
+  private void setup(OperatorContext context) {
+    SchemaPlus rootSchema = context.getFragmentContext().getFullRootSchema();
+    DataContext root = new DrillDataContext(
+        schemaPath != null ? SchemaUtilites.searchSchemaTree(rootSchema, SchemaUtilites.getSchemaPathAsList(schemaPath)) : rootSchema,
+        new JavaTypeFactoryImpl(),
+        Collections.emptyMap());
+
+    try {
+      Class<?> implementationClass = ClassBuilder.getCompiledClass(code, CLASS_NAME,
+          context.getFragmentContext().getConfig(), context.getFragmentContext().getOptions());
+      Iterable<?> iterable =
+          (Iterable<Map<String, Object>>) implementationClass.getMethod(BuiltInMethod.BINDABLE_BIND.method.getName(), DataContext.class)
+              .invoke(implementationClass.newInstance(), root);
+      if (fieldsMap.keySet().size() == 1) {
+        // for the case of projecting single column, its value is returned
+        records = StreamSupport.stream(iterable.spliterator(), false)
+            .map(this::wrap)
+            .iterator();
+      } else {
+        // for the case when all columns were projected, array is returned
+        records = StreamSupport.stream(iterable.spliterator(), false)
+            .map(row -> wrap((Object[]) row))
+            .iterator();
+      }
+    } catch (CompileException | IOException | ClassTransformationException | ReflectiveOperationException e) {
+      throw new RuntimeException("Exception happened when executing generated code", e.getCause());
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private Map<String, Object> wrap(Object[] values) {
+    Map<String, Object> row = new HashMap<>();
+    columns.stream()
+        .map(SchemaPath::getRootSegmentPath)
+        .forEach(fieldName -> {
+          if (fieldName.equals(SchemaPath.DYNAMIC_STAR)) {
+            row.putAll((Map<? extends String, ?>) values[fieldsMap.get(fieldName)]);
+          } else {
+            row.put(fieldName, values[fieldsMap.get(fieldName)]);
+          }
+        });
+    return row;
+  }
+
+  @SuppressWarnings("unchecked")
+  private Map<String, Object> wrap(Object value) {
+    SchemaPath schemaPath = columns.iterator().next();
+    if (schemaPath.equals(SchemaPath.STAR_COLUMN)) {
+      return (Map<String, Object>) value;
+    }
+    return Collections.singletonMap(schemaPath.getRootSegmentPath(), value);
+  }
+
+  @Override
+  public boolean open(SchemaNegotiator negotiator) {
+    TupleMetadata providedSchema = negotiator.providedSchema();
+    loader = negotiator.build();
+    setup(negotiator.context());
+    ColumnConverterFactory factory = new ColumnConverterFactory(providedSchema);
+    converter = factory.getRootConverter(providedSchema, new TupleSchema(), loader.writer());
+    return true;
+  }
+
+  @Override
+  public boolean next() {
+    RowSetLoader rowWriter = loader.writer();
+    while (!rowWriter.isFull()) {
+      if (records.hasNext()) {
+        processRecord(rowWriter, records.next());
+      } else {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  private void processRecord(RowSetLoader writer, Map<String, Object> record) {
+    writer.start();
+    converter.convert(record);
+    writer.save();
+  }
+
+  @Override
+  public void close() {
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableSubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableSubScan.java
new file mode 100644
index 0000000..85d282b
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/EnumerableSubScan.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.physical.base.AbstractSubScan;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+
+import java.util.List;
+import java.util.Map;
+
+public class EnumerableSubScan extends AbstractSubScan {
+
+  public static final String OPERATOR_TYPE = "ENUMERABLE_SUB_SCAN";
+
+  private final String code;
+  private final String schemaPath;
+  private final List<SchemaPath> columns;
+  private final Map<String, Integer> fieldsMap;
+  private final TupleMetadata schema;
+
+  @JsonCreator
+  public EnumerableSubScan(
+      @JsonProperty("code") String code,
+      @JsonProperty("columns") List<SchemaPath> columns,
+      @JsonProperty("fieldsMap") Map<String, Integer> fieldsMap,
+      @JsonProperty("schema") TupleMetadata schema,
+      @JsonProperty("schemaPath") String schemaPath) {
+    super("");
+    this.code = code;
+    this.columns = columns;
+    this.fieldsMap = fieldsMap;
+    this.schema = schema;
+    this.schemaPath = schemaPath;
+  }
+
+  @Override
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
+  }
+
+  public TupleMetadata getSchema() {
+    return schema;
+  }
+
+  public String getCode() {
+    return code;
+  }
+
+  public List<SchemaPath> getColumns() {
+    return columns;
+  }
+
+  public Map<String, Integer> getFieldsMap() {
+    return fieldsMap;
+  }
+
+  public String getSchemaPath() {
+    return schemaPath;
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrel.java
new file mode 100644
index 0000000..a7efc04
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrel.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable.plan;
+
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.planner.physical.PhysicalPlanCreator;
+import org.apache.drill.exec.planner.physical.Prel;
+import org.apache.drill.exec.planner.physical.SinglePrel;
+import org.apache.drill.exec.planner.physical.visitor.PrelVisitor;
+import org.apache.drill.exec.planner.sql.handlers.PrelFinalizable;
+import org.apache.drill.exec.record.BatchSchema;
+
+import java.util.List;
+
+public class EnumerableIntermediatePrel extends SinglePrel implements PrelFinalizable {
+
+  private final EnumerablePrelContext context;
+
+  public EnumerableIntermediatePrel(RelOptCluster cluster, RelTraitSet traits, RelNode child, EnumerablePrelContext context) {
+    super(cluster, traits, child);
+    this.context = context;
+  }
+
+  @Override
+  public PhysicalOperator getPhysicalOperator(PhysicalPlanCreator creator) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
+    return new EnumerableIntermediatePrel(getCluster(), traitSet, inputs.iterator().next(), context);
+  }
+
+  @Override
+  protected Object clone() {
+    return copy(getTraitSet(), getInputs());
+  }
+
+  @Override
+  public BatchSchema.SelectionVectorMode getEncoding() {
+    return BatchSchema.SelectionVectorMode.NONE;
+  }
+
+  @Override
+  public Prel finalizeRel() {
+    return new EnumerablePrel(getCluster(), getTraitSet(), getInput(), context);
+  }
+
+  @Override
+  public <T, X, E extends Throwable> T accept(PrelVisitor<T, X, E> logicalVisitor, X value) {
+    throw new UnsupportedOperationException("This needs to be finalized before using a PrelVisitor.");
+  }
+
+  @Override
+  public boolean needsFinalColumnReordering() {
+    return false;
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrelConverterRule.java
similarity index 60%
copy from contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
copy to exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrelConverterRule.java
index 7107e9f..615ec61 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerableIntermediatePrelConverterRule.java
@@ -15,25 +15,33 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.jdbc;
-
-import java.util.function.Predicate;
+package org.apache.drill.exec.store.enumerable.plan;
 
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.convert.ConverterRule;
 import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.exec.planner.physical.Prel;
 
-class JdbcDrelConverterRule extends ConverterRule {
+import java.util.function.Predicate;
 
-  JdbcDrelConverterRule(DrillJdbcConvention in) {
-    super(RelNode.class, (Predicate<RelNode>) input -> true, in, DrillRel.DRILL_LOGICAL,
-        DrillRelFactories.LOGICAL_BUILDER, "JDBC_DREL_Converter" + in.getName());
+public class EnumerableIntermediatePrelConverterRule extends ConverterRule {
+
+  private final EnumerablePrelContext context;
+
+  public EnumerableIntermediatePrelConverterRule(EnumerablePrelContext context) {
+    super(VertexDrel.class, (Predicate<RelNode>) input -> true, DrillRel.DRILL_LOGICAL,
+        Prel.DRILL_PHYSICAL, DrillRelFactories.LOGICAL_BUILDER,
+        "EnumerableIntermediatePrelConverterRule:" + context.getPlanPrefix());
+    this.context = context;
   }
 
   @Override
   public RelNode convert(RelNode in) {
-    return new JdbcDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL),
-        convert(in, in.getTraitSet().replace(this.getInTrait()).simplify()));
+    return new EnumerableIntermediatePrel(
+        in.getCluster(),
+        in.getTraitSet().replace(getOutTrait()),
+        in.getInput(0),
+        context);
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrel.java
new file mode 100644
index 0000000..0df66b5
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrel.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.enumerable.plan;
+
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.AbstractRelNode;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelWriter;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.planner.common.DrillRelOptUtil;
+import org.apache.drill.exec.planner.physical.LeafPrel;
+import org.apache.drill.exec.planner.physical.PhysicalPlanCreator;
+import org.apache.drill.exec.planner.physical.visitor.PrelVisitor;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.record.metadata.schema.SchemaProvider;
+import org.apache.drill.exec.store.enumerable.EnumerableGroupScan;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * {@link LeafPrel} implementation that generates java code that may be executed to obtain results
+ * for the provided plan part.
+ */
+public class EnumerablePrel extends AbstractRelNode implements LeafPrel {
+  private final String code;
+  private final String schemaPath;
+  private final String plan;
+  private final double rows;
+  private final Map<String, Integer> fieldsMap;
+  private final TupleMetadata schema;
+  private final String planPrefix;
+
+  public EnumerablePrel(RelOptCluster cluster, RelTraitSet traitSet, RelNode input, EnumerablePrelContext context) {
+    super(cluster, traitSet);
+    this.rowType = input.getRowType();
+    this.rows = input.estimateRowCount(cluster.getMetadataQuery());
+
+    this.planPrefix = context.getPlanPrefix();
+    RelNode transformedNode = context.transformNode(input);
+
+    this.fieldsMap = context.getFieldsMap(transformedNode);
+
+    this.plan = RelOptUtil.toString(transformedNode);
+    this.code = context.generateCode(cluster, transformedNode);
+
+    this.schemaPath = context.getTablePath(input);
+    try {
+      TableScan scan = Objects.requireNonNull(DrillRelOptUtil.findScan(input));
+      SchemaProvider schemaProvider = DrillRelOptUtil.getDrillTable(scan)
+          .getMetadataProviderManager().getSchemaProvider();
+      this.schema = schemaProvider != null ? schemaProvider.read().getSchema() : null;
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public PhysicalOperator getPhysicalOperator(PhysicalPlanCreator creator) {
+    List<SchemaPath> columns = rowType.getFieldNames().stream()
+        .map(SchemaPath::getSimplePath)
+        .collect(Collectors.toList());
+    EnumerableGroupScan groupScan = new EnumerableGroupScan(code, columns, fieldsMap, rows, schema, schemaPath);
+    return creator.addMetadata(this, groupScan);
+  }
+
+  @Override
+  public RelWriter explainTerms(RelWriter pw) {
+    pw.item(planPrefix, plan);
+    return super.explainTerms(pw);
+  }
+
+  @Override
+  public double estimateRowCount(RelMetadataQuery mq) {
+    return rows;
+  }
+
+  @Override
+  public <T, X, E extends Throwable> T accept(PrelVisitor<T, X, E> logicalVisitor, X value) throws E {
+    return logicalVisitor.visitLeaf(this, value);
+  }
+
+  @Override
+  public BatchSchema.SelectionVectorMode[] getSupportedEncodings() {
+    return BatchSchema.SelectionVectorMode.DEFAULT;
+  }
+
+  @Override
+  public BatchSchema.SelectionVectorMode getEncoding() {
+    return BatchSchema.SelectionVectorMode.NONE;
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrelContext.java
similarity index 64%
copy from exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java
copy to exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrelContext.java
index 109b7dd..43b9216 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Column.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/EnumerablePrelContext.java
@@ -15,14 +15,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.pcapng.schema;
+package org.apache.drill.exec.store.enumerable.plan;
 
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.exec.vector.ValueVector;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.rel.RelNode;
 
-public interface Column {
-  TypeProtos.MajorType getMinorType();
+import java.util.Map;
 
-  void process(IEnhancedPacketBLock block, ValueVector vv, int count);
+public interface EnumerablePrelContext {
+
+  String generateCode(RelOptCluster cluster, RelNode elasticNode);
+
+  RelNode transformNode(RelNode input);
+
+  Map<String, Integer> getFieldsMap(RelNode transformedNode);
+
+  String getPlanPrefix();
+
+  String getTablePath(RelNode input);
 }
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrel.java
similarity index 73%
rename from contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrel.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrel.java
index 14e29d6..edbc591 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrel.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrel.java
@@ -15,9 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.jdbc;
-
-import java.util.List;
+package org.apache.drill.exec.store.enumerable.plan;
 
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
@@ -27,19 +25,25 @@
 import org.apache.drill.exec.planner.logical.DrillImplementor;
 import org.apache.drill.exec.planner.logical.DrillRel;
 
-public class JdbcDrel extends SingleRel implements DrillRel {
+import java.util.List;
 
-  public JdbcDrel(RelOptCluster cluster, RelTraitSet traits, RelNode child) {
-    super(cluster, traits, child);
+/**
+ * The vertex simply holds the child nodes but contains its own traits.
+ * Used for completing Drill logical planning when child nodes have some specific traits.
+ */
+public class VertexDrel extends SingleRel implements DrillRel {
+
+  public VertexDrel(RelOptCluster cluster, RelTraitSet traits, RelNode input) {
+    super(cluster, traits, input);
   }
 
   @Override
   public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
-    return new JdbcDrel(getCluster(), traitSet, inputs.iterator().next());
+    return new VertexDrel(getCluster(), traitSet, inputs.iterator().next());
   }
 
   @Override
-  protected Object clone() throws CloneNotSupportedException {
+  protected Object clone() {
     return copy(getTraitSet(), getInputs());
   }
 
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrelConverterRule.java
similarity index 76%
rename from contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrelConverterRule.java
index 7107e9f..f69ba7c 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/enumerable/plan/VertexDrelConverterRule.java
@@ -15,25 +15,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.drill.exec.store.jdbc;
+package org.apache.drill.exec.store.enumerable.plan;
 
-import java.util.function.Predicate;
-
+import org.apache.calcite.plan.Convention;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.convert.ConverterRule;
 import org.apache.drill.exec.planner.logical.DrillRel;
 import org.apache.drill.exec.planner.logical.DrillRelFactories;
 
-class JdbcDrelConverterRule extends ConverterRule {
+import java.util.function.Predicate;
 
-  JdbcDrelConverterRule(DrillJdbcConvention in) {
+public class VertexDrelConverterRule extends ConverterRule {
+
+  public VertexDrelConverterRule(Convention in) {
     super(RelNode.class, (Predicate<RelNode>) input -> true, in, DrillRel.DRILL_LOGICAL,
-        DrillRelFactories.LOGICAL_BUILDER, "JDBC_DREL_Converter" + in.getName());
+        DrillRelFactories.LOGICAL_BUILDER, "VertexDrelConverterRule" + in.getName());
   }
 
   @Override
   public RelNode convert(RelNode in) {
-    return new JdbcDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL),
+    return new VertexDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL),
         convert(in, in.getTraitSet().replace(this.getInTrait()).simplify()));
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java
deleted file mode 100644
index 0aa7ece..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatConfig.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.httpd;
-
-import java.util.Objects;
-
-import org.apache.drill.common.PlanStringBuilder;
-import org.apache.drill.common.logical.FormatPluginConfig;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-
-@JsonTypeName("httpd")
-@JsonInclude(JsonInclude.Include.NON_DEFAULT)
-public class HttpdLogFormatConfig implements FormatPluginConfig {
-
-  public static final String DEFAULT_TS_FORMAT = "dd/MMM/yyyy:HH:mm:ss ZZ";
-
-  // No extensions?
-  private final String logFormat;
-  private final String timestampFormat;
-
-  @JsonCreator
-  public HttpdLogFormatConfig(
-      @JsonProperty("logFormat") String logFormat,
-      @JsonProperty("timestampFormat") String timestampFormat) {
-    this.logFormat = logFormat;
-    this.timestampFormat = timestampFormat == null
-        ? DEFAULT_TS_FORMAT : timestampFormat;
-  }
-
-  /**
-   * @return the log formatting string. This string is the config string from
-   *         httpd.conf or similar config file.
-   */
-  public String getLogFormat() {
-    return logFormat;
-  }
-
-  /**
-   * @return the timestampFormat
-   */
-  public String getTimestampFormat() {
-    return timestampFormat;
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(logFormat, timestampFormat);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    HttpdLogFormatConfig that = (HttpdLogFormatConfig) o;
-    return Objects.equals(logFormat, that.logFormat) &&
-           Objects.equals(timestampFormat, that.timestampFormat);
-  }
-
-  @Override
-  public String toString() {
-    return new PlanStringBuilder(this)
-        .field("log format", logFormat)
-        .field("timestamp format", timestampFormat)
-        .toString();
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
deleted file mode 100644
index 7bcb0a4..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.httpd;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-
-import nl.basjes.parse.core.exceptions.DissectionFailure;
-import nl.basjes.parse.core.exceptions.InvalidDissectorException;
-import nl.basjes.parse.core.exceptions.MissingDissectorsException;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
-import org.apache.drill.exec.planner.common.DrillStatsTable.TableStatistics;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.RecordWriter;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
-import org.apache.drill.exec.store.dfs.easy.EasyWriter;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
-import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter.ComplexWriter;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.LineRecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.TextInputFormat;
-import java.util.Collections;
-import java.util.Map;
-import org.apache.drill.exec.store.RecordReader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class HttpdLogFormatPlugin extends EasyFormatPlugin<HttpdLogFormatConfig> {
-  private static final Logger logger = LoggerFactory.getLogger(HttpdLogFormatPlugin.class);
-
-  private static final String PLUGIN_EXTENSION = "httpd";
-  private static final int VECTOR_MEMORY_ALLOCATION = 4095;
-
-  public HttpdLogFormatPlugin(final String name, final DrillbitContext context, final Configuration fsConf,
-                              final StoragePluginConfig storageConfig, final HttpdLogFormatConfig formatConfig) {
-
-    super(name, context, fsConf, storageConfig, formatConfig, true, false, true, true,
-            Collections.singletonList(PLUGIN_EXTENSION), PLUGIN_EXTENSION);
-  }
-
-  @Override
-  public boolean supportsStatistics() {
-    return false;
-  }
-
-  @Override
-  public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public void writeStatistics(TableStatistics statistics, FileSystem fs, Path statsTablePath) {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  /**
-   * Reads httpd logs lines terminated with a newline character.
-   */
-  private class HttpdLogRecordReader extends AbstractRecordReader {
-
-    private final DrillFileSystem fs;
-    private final FileWork work;
-    private final FragmentContext fragmentContext;
-    private ComplexWriter writer;
-    private HttpdParser parser;
-    private LineRecordReader lineReader;
-    private LongWritable lineNumber;
-
-    public HttpdLogRecordReader(final FragmentContext context, final DrillFileSystem fs, final FileWork work, final List<SchemaPath> columns) {
-      this.fs = fs;
-      this.work = work;
-      this.fragmentContext = context;
-      setColumns(columns);
-    }
-
-    /**
-     * The query fields passed in are formatted in a way that Drill requires.
-     * Those must be cleaned up to work with the parser.
-     *
-     * @return Map with Drill field names as a key and Parser Field names as a
-     *         value
-     */
-    private Map<String, String> makeParserFields() {
-      Map<String, String> fieldMapping = new HashMap<>();
-      for (final SchemaPath sp : getColumns()) {
-        String drillField = sp.getRootSegment().getPath();
-        try {
-          String parserField = HttpdParser.parserFormattedFieldName(drillField);
-          fieldMapping.put(drillField, parserField);
-        } catch (Exception e) {
-          logger.info("Putting field: {} into map", drillField, e);
-        }
-      }
-      return fieldMapping;
-    }
-
-    @Override
-    public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
-      try {
-        /*
-         * Extract the list of field names for the parser to use if it is NOT a star query. If it is a star query just
-         * pass through an empty map, because the parser is going to have to build all possibilities.
-         */
-        final Map<String, String> fieldMapping = !isStarQuery() ? makeParserFields() : null;
-        writer = new VectorContainerWriter(output);
-
-        parser = new HttpdParser(writer.rootAsMap(), context.getManagedBuffer(),
-                HttpdLogFormatPlugin.this.getConfig().getLogFormat(),
-                HttpdLogFormatPlugin.this.getConfig().getTimestampFormat(),
-                fieldMapping);
-
-        final Path path = fs.makeQualified(work.getPath());
-        FileSplit split = new FileSplit(path, work.getStart(), work.getLength(), new String[]{""});
-        TextInputFormat inputFormat = new TextInputFormat();
-        JobConf job = new JobConf(fs.getConf());
-        job.setInt("io.file.buffer.size", fragmentContext.getConfig().getInt(ExecConstants.TEXT_LINE_READER_BUFFER_SIZE));
-        job.setInputFormat(inputFormat.getClass());
-        lineReader = (LineRecordReader) inputFormat.getRecordReader(split, job, Reporter.NULL);
-        lineNumber = lineReader.createKey();
-      } catch (NoSuchMethodException | MissingDissectorsException | InvalidDissectorException e) {
-        throw handleAndGenerate("Failure creating HttpdParser", e);
-      } catch (IOException e) {
-        throw handleAndGenerate("Failure creating HttpdRecordReader", e);
-      }
-    }
-
-    private RuntimeException handleAndGenerate(final String s, final Exception e) {
-      throw UserException.dataReadError(e)
-              .message(s + "\n%s", e.getMessage())
-              .addContext("Path", work.getPath())
-              .addContext("Split Start", work.getStart())
-              .addContext("Split Length", work.getLength())
-              .addContext("Local Line Number", lineNumber.get())
-              .build(logger);
-    }
-
-    /**
-     * This record reader is given a batch of records (lines) to read. Next acts upon a batch of records.
-     *
-     * @return Number of records in this batch.
-     */
-    @Override
-    public int next() {
-      try {
-        final Text line = lineReader.createValue();
-
-        writer.allocate();
-        writer.reset();
-
-        int recordCount = 0;
-        while (recordCount < VECTOR_MEMORY_ALLOCATION && lineReader.next(lineNumber, line)) {
-          writer.setPosition(recordCount);
-          parser.parse(line.toString());
-          recordCount++;
-        }
-        writer.setValueCount(recordCount);
-
-        return recordCount;
-      } catch (DissectionFailure | InvalidDissectorException | MissingDissectorsException | IOException e) {
-        throw handleAndGenerate("Failure while parsing log record.", e);
-      }
-    }
-
-    @Override
-    public void close() throws Exception {
-      try {
-        if (lineReader != null) {
-          lineReader.close();
-        }
-      } catch (IOException e) {
-        logger.warn("Failure while closing Httpd reader.", e);
-      }
-    }
-
-    @Override
-    public String toString() {
-      return "HttpdLogRecordReader[Path=" + work.getPath()
-              + ", Start=" + work.getStart()
-              + ", Length=" + work.getLength()
-              + ", Line=" + lineNumber.get()
-              + "]";
-    }
-  }
-
-  /**
-   * This plugin supports pushing project down into the parser. Only fields
-   * specifically asked for within the configuration will be parsed. If no
-   * fields are asked for then all possible fields will be returned.
-   *
-   * @return true
-   */
-  @Override
-  public boolean supportsPushDown() {
-    return true;
-  }
-
-  @Override
-  public RecordReader getRecordReader(final FragmentContext context, final DrillFileSystem dfs,
-      final FileWork fileWork, final List<SchemaPath> columns, final String userName) {
-    return new HttpdLogRecordReader(context, dfs, fileWork, columns);
-  }
-
-  @Override
-  public RecordWriter getRecordWriter(final FragmentContext context, final EasyWriter writer) {
-    throw new UnsupportedOperationException("Drill doesn't currently support writing HTTPd logs");
-  }
-
-  @Override
-  public int getReaderOperatorType() {
-    return CoreOperatorType.HTPPD_LOG_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public int getWriterOperatorType() {
-    throw new UnsupportedOperationException();
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
deleted file mode 100644
index 45c251d..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.httpd;
-
-import org.apache.drill.shaded.guava.com.google.common.base.Charsets;
-import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
-import io.netty.buffer.DrillBuf;
-
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-
-import nl.basjes.parse.core.Casts;
-import nl.basjes.parse.core.Parser;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter.MapWriter;
-import org.apache.drill.exec.vector.complex.writer.BigIntWriter;
-import org.apache.drill.exec.vector.complex.writer.Float8Writer;
-import org.apache.drill.exec.vector.complex.writer.VarCharWriter;
-import org.apache.drill.exec.vector.complex.writer.TimeStampWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.text.SimpleDateFormat;
-import java.util.Date;
-
-public class HttpdLogRecord {
-
-  private static final Logger logger = LoggerFactory.getLogger(HttpdLogRecord.class);
-
-  private final Map<String, VarCharWriter> strings = Maps.newHashMap();
-  private final Map<String, BigIntWriter> longs = Maps.newHashMap();
-  private final Map<String, Float8Writer> doubles = Maps.newHashMap();
-  private final Map<String, TimeStampWriter> times = new HashMap<>();
-  private final Map<String, MapWriter> wildcards = Maps.newHashMap();
-  private final Map<String, String> cleanExtensions = Maps.newHashMap();
-  private final Map<String, MapWriter> startedWildcards = Maps.newHashMap();
-  private final Map<String, MapWriter> wildcardWriters = Maps.newHashMap();
-  private final SimpleDateFormat dateFormatter;
-  private DrillBuf managedBuffer;
-  private String timeFormat;
-
-  public HttpdLogRecord(final DrillBuf managedBuffer, final String timeFormat) {
-    this.managedBuffer = managedBuffer;
-    this.timeFormat = timeFormat;
-    this.dateFormatter = new SimpleDateFormat(this.timeFormat);
-  }
-
-  /**
-   * Call this method after a record has been parsed. This finished the lifecycle of any maps that were written and
-   * removes all the entries for the next record to be able to work.
-   */
-  public void finishRecord() {
-    for (MapWriter writer : wildcardWriters.values()) {
-      writer.end();
-    }
-    wildcardWriters.clear();
-    startedWildcards.clear();
-  }
-
-  private DrillBuf buf(final int size) {
-    if (managedBuffer.capacity() < size) {
-      managedBuffer = managedBuffer.reallocIfNeeded(size);
-    }
-    return managedBuffer;
-  }
-
-  private void writeString(VarCharWriter writer, String value) {
-    final byte[] stringBytes = value.getBytes(Charsets.UTF_8);
-    final DrillBuf stringBuffer = buf(stringBytes.length);
-    stringBuffer.clear();
-    stringBuffer.writeBytes(stringBytes);
-    writer.writeVarChar(0, stringBytes.length, stringBuffer);
-  }
-
-  /**
-   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
-   * called when the value of a log field is a String data type.
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void set(String field, String value) {
-    if (value != null) {
-      final VarCharWriter w = strings.get(field);
-      if (w != null) {
-        logger.trace("Parsed field: {}, as string: {}", field, value);
-        writeString(w, value);
-      } else {
-        logger.warn("No 'string' writer found for field: {}", field);
-      }
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
-   * called when the value of a log field is a Long data type.
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void set(String field, Long value) {
-    if (value != null) {
-      final BigIntWriter w = longs.get(field);
-      if (w != null) {
-        logger.trace("Parsed field: {}, as long: {}", field, value);
-        w.writeBigInt(value);
-      } else {
-        logger.warn("No 'long' writer found for field: {}", field);
-      }
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
-   * called when the value of a log field is a timesstamp data type.
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void setTimestamp(String field, String value) {
-    if (value != null) {
-      //Convert the date string into a long
-      long ts = 0;
-      try {
-        Date d = this.dateFormatter.parse(value);
-        ts = d.getTime();
-      } catch (Exception e) {
-        //If the date formatter does not successfully create a date, the timestamp will fall back to zero
-        //Do not throw exception
-      }
-      final TimeStampWriter tw = times.get(field);
-      if (tw != null) {
-        logger.trace("Parsed field: {}, as time: {}", field, value);
-        tw.writeTimeStamp(ts);
-      } else {
-        logger.warn("No 'timestamp' writer found for field: {}", field);
-      }
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. This is added as a parsing target for the parser. It will get
-   * called when the value of a log field is a Double data type.
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void set(String field, Double value) {
-    if (value != null) {
-      final Float8Writer w = doubles.get(field);
-      if (w != null) {
-        logger.trace("Parsed field: {}, as double: {}", field, value);
-        w.writeFloat8(value);
-      } else {
-        logger.warn("No 'double' writer found for field: {}", field);
-      }
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. When the parser processes a field like:
-   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
-   * invoked. <br>
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void setWildcard(String field, String value) {
-    if (value != null) {
-      final MapWriter mapWriter = getWildcardWriter(field);
-      logger.trace("Parsed wildcard field: {}, as string: {}", field, value);
-      final VarCharWriter w = mapWriter.varChar(cleanExtensions.get(field));
-      writeString(w, value);
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. When the parser processes a field like:
-   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
-   * invoked. <br>
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void setWildcard(String field, Long value) {
-    if (value != null) {
-      final MapWriter mapWriter = getWildcardWriter(field);
-      logger.trace("Parsed wildcard field: {}, as long: {}", field, value);
-      final BigIntWriter w = mapWriter.bigInt(cleanExtensions.get(field));
-      w.writeBigInt(value);
-    }
-  }
-
-  /**
-   * This method is referenced and called via reflection. When the parser processes a field like:
-   * HTTP.URI:request.firstline.uri.query.* where star is an arbitrary field that the parser found this method will be
-   * invoked. <br>
-   *
-   * @param field name of field
-   * @param value value of field
-   */
-  @SuppressWarnings("unused")
-  public void setWildcard(String field, Double value) {
-    if (value != null) {
-      final MapWriter mapWriter = getWildcardWriter(field);
-      logger.trace("Parsed wildcard field: {}, as double: {}", field, value);
-      final Float8Writer w = mapWriter.float8(cleanExtensions.get(field));
-      w.writeFloat8(value);
-    }
-  }
-
-  /**
-   * For a configuration like HTTP.URI:request.firstline.uri.query.*, a writer was created with name
-   * HTTP.URI:request.firstline.uri.query, we traverse the list of wildcard writers to see which one is the root of the
-   * name of the field passed in like HTTP.URI:request.firstline.uri.query.old. This is writer entry that is needed.
-   *
-   * @param field like HTTP.URI:request.firstline.uri.query.old where 'old' is one of many different parameter names.
-   * @return the writer to be used for this field.
-   */
-  private MapWriter getWildcardWriter(String field) {
-    MapWriter writer = startedWildcards.get(field);
-    if (writer == null) {
-      for (Map.Entry<String, MapWriter> entry : wildcards.entrySet()) {
-        final String root = entry.getKey();
-        if (field.startsWith(root)) {
-          writer = entry.getValue();
-
-          /**
-           * In order to save some time, store the cleaned version of the field extension. It is possible it will have
-           * unsafe characters in it.
-           */
-          if (!cleanExtensions.containsKey(field)) {
-            final String extension = field.substring(root.length() + 1);
-            final String cleanExtension = HttpdParser.drillFormattedFieldName(extension);
-            cleanExtensions.put(field, cleanExtension);
-            logger.debug("Added extension: field='{}' with cleanExtension='{}'", field, cleanExtension);
-          }
-
-          /**
-           * We already know we have the writer, but if we have put this writer in the started list, do NOT call start
-           * again.
-           */
-          if (!wildcardWriters.containsKey(root)) {
-            /**
-             * Start and store this root map writer for later retrieval.
-             */
-            logger.debug("Starting new wildcard field writer: {}", field);
-            writer.start();
-            startedWildcards.put(field, writer);
-            wildcardWriters.put(root, writer);
-          }
-
-          /**
-           * Break out of the for loop when we find a root writer that matches the field.
-           */
-          break;
-        }
-      }
-    }
-
-    return writer;
-  }
-
-  public Map<String, VarCharWriter> getStrings() {
-    return strings;
-  }
-
-  public Map<String, BigIntWriter> getLongs() {
-    return longs;
-  }
-
-  public Map<String, Float8Writer> getDoubles() {
-    return doubles;
-  }
-
-  public Map<String, TimeStampWriter> getTimes() {
-    return times;
-  }
-
-  /**
-   * This record will be used with a single parser. For each field that is to be parsed a setter will be called. It
-   * registers a setter method for each field being parsed. It also builds the data writers to hold the data beings
-   * parsed.
-   *
-   * @param parser
-   * @param mapWriter
-   * @param type
-   * @param parserFieldName
-   * @param drillFieldName
-   * @throws NoSuchMethodException
-   */
-  public void addField(final Parser<HttpdLogRecord> parser, final MapWriter mapWriter, final EnumSet<Casts> type, final String parserFieldName, final String drillFieldName) throws NoSuchMethodException {
-    final boolean hasWildcard = parserFieldName.endsWith(HttpdParser.PARSER_WILDCARD);
-
-    /**
-     * This is a dynamic way to map the setter for each specified field type. <br/>
-     * e.g. a TIME.STAMP may map to a LONG while a referrer may map to a STRING
-     */
-    if (hasWildcard) {
-      final String cleanName = parserFieldName.substring(0, parserFieldName.length() - HttpdParser.PARSER_WILDCARD.length());
-      logger.debug("Adding WILDCARD parse target: {} as {}, with field name: {}", parserFieldName, cleanName, drillFieldName);
-      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, String.class), parserFieldName);
-      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Double.class), parserFieldName);
-      parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Long.class), parserFieldName);
-      wildcards.put(cleanName, mapWriter.map(drillFieldName));
-    } else if (type.contains(Casts.DOUBLE)) {
-      logger.debug("Adding DOUBLE parse target: {}, with field name: {}", parserFieldName, drillFieldName);
-      parser.addParseTarget(this.getClass().getMethod("set", String.class, Double.class), parserFieldName);
-      doubles.put(parserFieldName, mapWriter.float8(drillFieldName));
-    } else if (type.contains(Casts.LONG)) {
-      logger.debug("Adding LONG parse target: {}, with field name: {}", parserFieldName, drillFieldName);
-      parser.addParseTarget(this.getClass().getMethod("set", String.class, Long.class), parserFieldName);
-      longs.put(parserFieldName, mapWriter.bigInt(drillFieldName));
-    } else {
-      logger.debug("Adding STRING parse target: {}, with field name: {}", parserFieldName, drillFieldName);
-      if (parserFieldName.startsWith("TIME.STAMP:")) {
-        parser.addParseTarget(this.getClass().getMethod("setTimestamp", String.class, String.class), parserFieldName);
-        times.put(parserFieldName, mapWriter.timeStamp(drillFieldName));
-      } else {
-        parser.addParseTarget(this.getClass().getMethod("set", String.class, String.class), parserFieldName);
-        strings.put(parserFieldName, mapWriter.varChar(drillFieldName));
-      }
-    }
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
deleted file mode 100644
index 7da7a95..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.httpd;
-
-import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
-import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
-import io.netty.buffer.DrillBuf;
-import nl.basjes.parse.core.Casts;
-import nl.basjes.parse.core.Parser;
-import nl.basjes.parse.core.exceptions.DissectionFailure;
-import nl.basjes.parse.core.exceptions.InvalidDissectorException;
-import nl.basjes.parse.core.exceptions.MissingDissectorsException;
-import nl.basjes.parse.httpdlog.HttpdLoglineParser;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter.MapWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-public class HttpdParser {
-
-  private static final Logger logger = LoggerFactory.getLogger(HttpdParser.class);
-
-  public static final String PARSER_WILDCARD = ".*";
-  public static final String SAFE_WILDCARD = "_$";
-  public static final String SAFE_SEPARATOR = "_";
-  public static final String REMAPPING_FLAG = "#";
-  private final Parser<HttpdLogRecord> parser;
-  private final HttpdLogRecord record;
-
-  public static final HashMap<String, String> LOGFIELDS = new HashMap<String, String>();
-
-  static {
-    LOGFIELDS.put("connection.client.ip", "IP:connection.client.ip");
-    LOGFIELDS.put("connection.client.ip.last", "IP:connection.client.ip.last");
-    LOGFIELDS.put("connection.client.ip.original", "IP:connection.client.ip.original");
-    LOGFIELDS.put("connection.client.ip.last", "IP:connection.client.ip.last");
-    LOGFIELDS.put("connection.client.peerip", "IP:connection.client.peerip");
-    LOGFIELDS.put("connection.client.peerip.last", "IP:connection.client.peerip.last");
-    LOGFIELDS.put("connection.client.peerip.original", "IP:connection.client.peerip.original");
-    LOGFIELDS.put("connection.client.peerip.last", "IP:connection.client.peerip.last");
-    LOGFIELDS.put("connection.server.ip", "IP:connection.server.ip");
-    LOGFIELDS.put("connection.server.ip.last", "IP:connection.server.ip.last");
-    LOGFIELDS.put("connection.server.ip.original", "IP:connection.server.ip.original");
-    LOGFIELDS.put("connection.server.ip.last", "IP:connection.server.ip.last");
-    LOGFIELDS.put("response.body.bytes", "BYTES:response.body.bytes");
-    LOGFIELDS.put("response.body.bytes.last", "BYTES:response.body.bytes.last");
-    LOGFIELDS.put("response.body.bytes.original", "BYTES:response.body.bytes.original");
-    LOGFIELDS.put("response.body.bytes.last", "BYTES:response.body.bytes.last");
-    LOGFIELDS.put("response.body.bytesclf", "BYTES:response.body.bytesclf");
-    LOGFIELDS.put("response.body.bytes", "BYTESCLF:response.body.bytes");
-    LOGFIELDS.put("response.body.bytes.last", "BYTESCLF:response.body.bytes.last");
-    LOGFIELDS.put("response.body.bytes.original", "BYTESCLF:response.body.bytes.original");
-    LOGFIELDS.put("response.body.bytes.last", "BYTESCLF:response.body.bytes.last");
-    LOGFIELDS.put("request.cookies.foobar", "HTTP.COOKIE:request.cookies.foobar");
-    LOGFIELDS.put("server.environment.foobar", "VARIABLE:server.environment.foobar");
-    LOGFIELDS.put("server.filename", "FILENAME:server.filename");
-    LOGFIELDS.put("server.filename.last", "FILENAME:server.filename.last");
-    LOGFIELDS.put("server.filename.original", "FILENAME:server.filename.original");
-    LOGFIELDS.put("server.filename.last", "FILENAME:server.filename.last");
-    LOGFIELDS.put("connection.client.host", "IP:connection.client.host");
-    LOGFIELDS.put("connection.client.host.last", "IP:connection.client.host.last");
-    LOGFIELDS.put("connection.client.host.original", "IP:connection.client.host.original");
-    LOGFIELDS.put("connection.client.host.last", "IP:connection.client.host.last");
-    LOGFIELDS.put("request.protocol", "PROTOCOL:request.protocol");
-    LOGFIELDS.put("request.protocol.last", "PROTOCOL:request.protocol.last");
-    LOGFIELDS.put("request.protocol.original", "PROTOCOL:request.protocol.original");
-    LOGFIELDS.put("request.protocol.last", "PROTOCOL:request.protocol.last");
-    LOGFIELDS.put("request.header.foobar", "HTTP.HEADER:request.header.foobar");
-    LOGFIELDS.put("request.trailer.foobar", "HTTP.TRAILER:request.trailer.foobar");
-    LOGFIELDS.put("connection.keepalivecount", "NUMBER:connection.keepalivecount");
-    LOGFIELDS.put("connection.keepalivecount.last", "NUMBER:connection.keepalivecount.last");
-    LOGFIELDS.put("connection.keepalivecount.original", "NUMBER:connection.keepalivecount.original");
-    LOGFIELDS.put("connection.keepalivecount.last", "NUMBER:connection.keepalivecount.last");
-    LOGFIELDS.put("connection.client.logname", "NUMBER:connection.client.logname");
-    LOGFIELDS.put("connection.client.logname.last", "NUMBER:connection.client.logname.last");
-    LOGFIELDS.put("connection.client.logname.original", "NUMBER:connection.client.logname.original");
-    LOGFIELDS.put("connection.client.logname.last", "NUMBER:connection.client.logname.last");
-    LOGFIELDS.put("request.errorlogid", "STRING:request.errorlogid");
-    LOGFIELDS.put("request.errorlogid.last", "STRING:request.errorlogid.last");
-    LOGFIELDS.put("request.errorlogid.original", "STRING:request.errorlogid.original");
-    LOGFIELDS.put("request.errorlogid.last", "STRING:request.errorlogid.last");
-    LOGFIELDS.put("request.method", "HTTP.METHOD:request.method");
-    LOGFIELDS.put("request.method.last", "HTTP.METHOD:request.method.last");
-    LOGFIELDS.put("request.method.original", "HTTP.METHOD:request.method.original");
-    LOGFIELDS.put("request.method.last", "HTTP.METHOD:request.method.last");
-    LOGFIELDS.put("server.module_note.foobar", "STRING:server.module_note.foobar");
-    LOGFIELDS.put("response.header.foobar", "HTTP.HEADER:response.header.foobar");
-    LOGFIELDS.put("response.trailer.foobar", "HTTP.TRAILER:response.trailer.foobar");
-    LOGFIELDS.put("request.server.port.canonical", "PORT:request.server.port.canonical");
-    LOGFIELDS.put("request.server.port.canonical.last", "PORT:request.server.port.canonical.last");
-    LOGFIELDS.put("request.server.port.canonical.original", "PORT:request.server.port.canonical.original");
-    LOGFIELDS.put("request.server.port.canonical.last", "PORT:request.server.port.canonical.last");
-    LOGFIELDS.put("connection.server.port.canonical", "PORT:connection.server.port.canonical");
-    LOGFIELDS.put("connection.server.port.canonical.last", "PORT:connection.server.port.canonical.last");
-    LOGFIELDS.put("connection.server.port.canonical.original", "PORT:connection.server.port.canonical.original");
-    LOGFIELDS.put("connection.server.port.canonical.last", "PORT:connection.server.port.canonical.last");
-    LOGFIELDS.put("connection.server.port", "PORT:connection.server.port");
-    LOGFIELDS.put("connection.server.port.last", "PORT:connection.server.port.last");
-    LOGFIELDS.put("connection.server.port.original", "PORT:connection.server.port.original");
-    LOGFIELDS.put("connection.server.port.last", "PORT:connection.server.port.last");
-    LOGFIELDS.put("connection.client.port", "PORT:connection.client.port");
-    LOGFIELDS.put("connection.client.port.last", "PORT:connection.client.port.last");
-    LOGFIELDS.put("connection.client.port.original", "PORT:connection.client.port.original");
-    LOGFIELDS.put("connection.client.port.last", "PORT:connection.client.port.last");
-    LOGFIELDS.put("connection.server.child.processid", "NUMBER:connection.server.child.processid");
-    LOGFIELDS.put("connection.server.child.processid.last", "NUMBER:connection.server.child.processid.last");
-    LOGFIELDS.put("connection.server.child.processid.original", "NUMBER:connection.server.child.processid.original");
-    LOGFIELDS.put("connection.server.child.processid.last", "NUMBER:connection.server.child.processid.last");
-    LOGFIELDS.put("connection.server.child.processid", "NUMBER:connection.server.child.processid");
-    LOGFIELDS.put("connection.server.child.processid.last", "NUMBER:connection.server.child.processid.last");
-    LOGFIELDS.put("connection.server.child.processid.original", "NUMBER:connection.server.child.processid.original");
-    LOGFIELDS.put("connection.server.child.processid.last", "NUMBER:connection.server.child.processid.last");
-    LOGFIELDS.put("connection.server.child.threadid", "NUMBER:connection.server.child.threadid");
-    LOGFIELDS.put("connection.server.child.threadid.last", "NUMBER:connection.server.child.threadid.last");
-    LOGFIELDS.put("connection.server.child.threadid.original", "NUMBER:connection.server.child.threadid.original");
-    LOGFIELDS.put("connection.server.child.threadid.last", "NUMBER:connection.server.child.threadid.last");
-    LOGFIELDS.put("connection.server.child.hexthreadid", "NUMBER:connection.server.child.hexthreadid");
-    LOGFIELDS.put("connection.server.child.hexthreadid.last", "NUMBER:connection.server.child.hexthreadid.last");
-    LOGFIELDS.put("connection.server.child.hexthreadid.original", "NUMBER:connection.server.child.hexthreadid.original");
-    LOGFIELDS.put("connection.server.child.hexthreadid.last", "NUMBER:connection.server.child.hexthreadid.last");
-    LOGFIELDS.put("request.querystring", "HTTP.QUERYSTRING:request.querystring");
-    LOGFIELDS.put("request.querystring.last", "HTTP.QUERYSTRING:request.querystring.last");
-    LOGFIELDS.put("request.querystring.original", "HTTP.QUERYSTRING:request.querystring.original");
-    LOGFIELDS.put("request.querystring.last", "HTTP.QUERYSTRING:request.querystring.last");
-    LOGFIELDS.put("request.firstline", "HTTP.FIRSTLINE:request.firstline");
-    LOGFIELDS.put("request.firstline.original", "HTTP.FIRSTLINE:request.firstline.original");
-    LOGFIELDS.put("request.firstline.original", "HTTP.FIRSTLINE:request.firstline.original");
-    LOGFIELDS.put("request.firstline.last", "HTTP.FIRSTLINE:request.firstline.last");
-    LOGFIELDS.put("request.handler", "STRING:request.handler");
-    LOGFIELDS.put("request.handler.last", "STRING:request.handler.last");
-    LOGFIELDS.put("request.handler.original", "STRING:request.handler.original");
-    LOGFIELDS.put("request.handler.last", "STRING:request.handler.last");
-    LOGFIELDS.put("request.status", "STRING:request.status");
-    LOGFIELDS.put("request.status.original", "STRING:request.status.original");
-    LOGFIELDS.put("request.status.original", "STRING:request.status.original");
-    LOGFIELDS.put("request.status.last", "STRING:request.status.last");
-    LOGFIELDS.put("request.receive.time", "TIME.STAMP:request.receive.time");
-    LOGFIELDS.put("request.receive.time.last", "TIME.STAMP:request.receive.time.last");
-    LOGFIELDS.put("request.receive.time.original", "TIME.STAMP:request.receive.time.original");
-    LOGFIELDS.put("request.receive.time.last", "TIME.STAMP:request.receive.time.last");
-    LOGFIELDS.put("request.receive.time.year", "TIME.YEAR:request.receive.time.year");
-    LOGFIELDS.put("request.receive.time.begin.year", "TIME.YEAR:request.receive.time.begin.year");
-    LOGFIELDS.put("request.receive.time.end.year", "TIME.YEAR:request.receive.time.end.year");
-    LOGFIELDS.put("request.receive.time.sec", "TIME.SECONDS:request.receive.time.sec");
-    LOGFIELDS.put("request.receive.time.sec", "TIME.SECONDS:request.receive.time.sec");
-    LOGFIELDS.put("request.receive.time.sec.original", "TIME.SECONDS:request.receive.time.sec.original");
-    LOGFIELDS.put("request.receive.time.sec.last", "TIME.SECONDS:request.receive.time.sec.last");
-    LOGFIELDS.put("request.receive.time.begin.sec", "TIME.SECONDS:request.receive.time.begin.sec");
-    LOGFIELDS.put("request.receive.time.begin.sec.last", "TIME.SECONDS:request.receive.time.begin.sec.last");
-    LOGFIELDS.put("request.receive.time.begin.sec.original", "TIME.SECONDS:request.receive.time.begin.sec.original");
-    LOGFIELDS.put("request.receive.time.begin.sec.last", "TIME.SECONDS:request.receive.time.begin.sec.last");
-    LOGFIELDS.put("request.receive.time.end.sec", "TIME.SECONDS:request.receive.time.end.sec");
-    LOGFIELDS.put("request.receive.time.end.sec.last", "TIME.SECONDS:request.receive.time.end.sec.last");
-    LOGFIELDS.put("request.receive.time.end.sec.original", "TIME.SECONDS:request.receive.time.end.sec.original");
-    LOGFIELDS.put("request.receive.time.end.sec.last", "TIME.SECONDS:request.receive.time.end.sec.last");
-    LOGFIELDS.put("request.receive.time.begin.msec", "TIME.EPOCH:request.receive.time.begin.msec");
-    LOGFIELDS.put("request.receive.time.msec", "TIME.EPOCH:request.receive.time.msec");
-    LOGFIELDS.put("request.receive.time.msec.last", "TIME.EPOCH:request.receive.time.msec.last");
-    LOGFIELDS.put("request.receive.time.msec.original", "TIME.EPOCH:request.receive.time.msec.original");
-    LOGFIELDS.put("request.receive.time.msec.last", "TIME.EPOCH:request.receive.time.msec.last");
-    LOGFIELDS.put("request.receive.time.begin.msec", "TIME.EPOCH:request.receive.time.begin.msec");
-    LOGFIELDS.put("request.receive.time.begin.msec.last", "TIME.EPOCH:request.receive.time.begin.msec.last");
-    LOGFIELDS.put("request.receive.time.begin.msec.original", "TIME.EPOCH:request.receive.time.begin.msec.original");
-    LOGFIELDS.put("request.receive.time.begin.msec.last", "TIME.EPOCH:request.receive.time.begin.msec.last");
-    LOGFIELDS.put("request.receive.time.end.msec", "TIME.EPOCH:request.receive.time.end.msec");
-    LOGFIELDS.put("request.receive.time.end.msec.last", "TIME.EPOCH:request.receive.time.end.msec.last");
-    LOGFIELDS.put("request.receive.time.end.msec.original", "TIME.EPOCH:request.receive.time.end.msec.original");
-    LOGFIELDS.put("request.receive.time.end.msec.last", "TIME.EPOCH:request.receive.time.end.msec.last");
-    LOGFIELDS.put("request.receive.time.begin.usec", "TIME.EPOCH.USEC:request.receive.time.begin.usec");
-    LOGFIELDS.put("request.receive.time.usec", "TIME.EPOCH.USEC:request.receive.time.usec");
-    LOGFIELDS.put("request.receive.time.usec.last", "TIME.EPOCH.USEC:request.receive.time.usec.last");
-    LOGFIELDS.put("request.receive.time.usec.original", "TIME.EPOCH.USEC:request.receive.time.usec.original");
-    LOGFIELDS.put("request.receive.time.usec.last", "TIME.EPOCH.USEC:request.receive.time.usec.last");
-    LOGFIELDS.put("request.receive.time.begin.usec", "TIME.EPOCH.USEC:request.receive.time.begin.usec");
-    LOGFIELDS.put("request.receive.time.begin.usec.last", "TIME.EPOCH.USEC:request.receive.time.begin.usec.last");
-    LOGFIELDS.put("request.receive.time.begin.usec.original", "TIME.EPOCH.USEC:request.receive.time.begin.usec.original");
-    LOGFIELDS.put("request.receive.time.begin.usec.last", "TIME.EPOCH.USEC:request.receive.time.begin.usec.last");
-    LOGFIELDS.put("request.receive.time.end.usec", "TIME.EPOCH.USEC:request.receive.time.end.usec");
-    LOGFIELDS.put("request.receive.time.end.usec.last", "TIME.EPOCH.USEC:request.receive.time.end.usec.last");
-    LOGFIELDS.put("request.receive.time.end.usec.original", "TIME.EPOCH.USEC:request.receive.time.end.usec.original");
-    LOGFIELDS.put("request.receive.time.end.usec.last", "TIME.EPOCH.USEC:request.receive.time.end.usec.last");
-    LOGFIELDS.put("request.receive.time.begin.msec_frac", "TIME.EPOCH:request.receive.time.begin.msec_frac");
-    LOGFIELDS.put("request.receive.time.msec_frac", "TIME.EPOCH:request.receive.time.msec_frac");
-    LOGFIELDS.put("request.receive.time.msec_frac.last", "TIME.EPOCH:request.receive.time.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.msec_frac.original", "TIME.EPOCH:request.receive.time.msec_frac.original");
-    LOGFIELDS.put("request.receive.time.msec_frac.last", "TIME.EPOCH:request.receive.time.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.begin.msec_frac", "TIME.EPOCH:request.receive.time.begin.msec_frac");
-    LOGFIELDS.put("request.receive.time.begin.msec_frac.last", "TIME.EPOCH:request.receive.time.begin.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.begin.msec_frac.original", "TIME.EPOCH:request.receive.time.begin.msec_frac.original");
-    LOGFIELDS.put("request.receive.time.begin.msec_frac.last", "TIME.EPOCH:request.receive.time.begin.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.end.msec_frac", "TIME.EPOCH:request.receive.time.end.msec_frac");
-    LOGFIELDS.put("request.receive.time.end.msec_frac.last", "TIME.EPOCH:request.receive.time.end.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.end.msec_frac.original", "TIME.EPOCH:request.receive.time.end.msec_frac.original");
-    LOGFIELDS.put("request.receive.time.end.msec_frac.last", "TIME.EPOCH:request.receive.time.end.msec_frac.last");
-    LOGFIELDS.put("request.receive.time.begin.usec_frac", "FRAC:request.receive.time.begin.usec_frac");
-    LOGFIELDS.put("request.receive.time.usec_frac", "FRAC:request.receive.time.usec_frac");
-    LOGFIELDS.put("request.receive.time.usec_frac.last", "FRAC:request.receive.time.usec_frac.last");
-    LOGFIELDS.put("request.receive.time.usec_frac.original", "FRAC:request.receive.time.usec_frac.original");
-    LOGFIELDS.put("request.receive.time.usec_frac.last", "FRAC:request.receive.time.usec_frac.last");
-    LOGFIELDS.put("request.receive.time.begin.usec_frac", "FRAC:request.receive.time.begin.usec_frac");
-    LOGFIELDS.put("request.receive.time.begin.usec_frac.last", "FRAC:request.receive.time.begin.usec_frac.last");
-    LOGFIELDS.put("request.receive.time.begin.usec_frac.original", "FRAC:request.receive.time.begin.usec_frac.original");
-    LOGFIELDS.put("request.receive.time.begin.usec_frac.last", "FRAC:request.receive.time.begin.usec_frac.last");
-    LOGFIELDS.put("request.receive.time.end.usec_frac", "FRAC:request.receive.time.end.usec_frac");
-    LOGFIELDS.put("request.receive.time.end.usec_frac.last", "FRAC:request.receive.time.end.usec_frac.last");
-    LOGFIELDS.put("request.receive.time.end.usec_frac.original", "FRAC:request.receive.time.end.usec_frac.original");
-    LOGFIELDS.put("request.receive.time.end.usec_frac.last", "FRAC:request.receive.time.end.usec_frac.last");
-    LOGFIELDS.put("response.server.processing.time", "SECONDS:response.server.processing.time");
-    LOGFIELDS.put("response.server.processing.time.original", "SECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.original", "SECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.last", "SECONDS:response.server.processing.time.last");
-    LOGFIELDS.put("server.process.time", "MICROSECONDS:server.process.time");
-    LOGFIELDS.put("response.server.processing.time", "MICROSECONDS:response.server.processing.time");
-    LOGFIELDS.put("response.server.processing.time.original", "MICROSECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.original", "MICROSECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.last", "MICROSECONDS:response.server.processing.time.last");
-    LOGFIELDS.put("response.server.processing.time", "MICROSECONDS:response.server.processing.time");
-    LOGFIELDS.put("response.server.processing.time.original", "MICROSECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.original", "MICROSECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.last", "MICROSECONDS:response.server.processing.time.last");
-    LOGFIELDS.put("response.server.processing.time", "MILLISECONDS:response.server.processing.time");
-    LOGFIELDS.put("response.server.processing.time.original", "MILLISECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.original", "MILLISECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.last", "MILLISECONDS:response.server.processing.time.last");
-    LOGFIELDS.put("response.server.processing.time", "SECONDS:response.server.processing.time");
-    LOGFIELDS.put("response.server.processing.time.original", "SECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.original", "SECONDS:response.server.processing.time.original");
-    LOGFIELDS.put("response.server.processing.time.last", "SECONDS:response.server.processing.time.last");
-    LOGFIELDS.put("connection.client.user", "STRING:connection.client.user");
-    LOGFIELDS.put("connection.client.user.last", "STRING:connection.client.user.last");
-    LOGFIELDS.put("connection.client.user.original", "STRING:connection.client.user.original");
-    LOGFIELDS.put("connection.client.user.last", "STRING:connection.client.user.last");
-    LOGFIELDS.put("request.urlpath", "URI:request.urlpath");
-    LOGFIELDS.put("request.urlpath.original", "URI:request.urlpath.original");
-    LOGFIELDS.put("request.urlpath.original", "URI:request.urlpath.original");
-    LOGFIELDS.put("request.urlpath.last", "URI:request.urlpath.last");
-    LOGFIELDS.put("connection.server.name.canonical", "STRING:connection.server.name.canonical");
-    LOGFIELDS.put("connection.server.name.canonical.last", "STRING:connection.server.name.canonical.last");
-    LOGFIELDS.put("connection.server.name.canonical.original", "STRING:connection.server.name.canonical.original");
-    LOGFIELDS.put("connection.server.name.canonical.last", "STRING:connection.server.name.canonical.last");
-    LOGFIELDS.put("connection.server.name", "STRING:connection.server.name");
-    LOGFIELDS.put("connection.server.name.last", "STRING:connection.server.name.last");
-    LOGFIELDS.put("connection.server.name.original", "STRING:connection.server.name.original");
-    LOGFIELDS.put("connection.server.name.last", "STRING:connection.server.name.last");
-    LOGFIELDS.put("response.connection.status", "HTTP.CONNECTSTATUS:response.connection.status");
-    LOGFIELDS.put("response.connection.status.last", "HTTP.CONNECTSTATUS:response.connection.status.last");
-    LOGFIELDS.put("response.connection.status.original", "HTTP.CONNECTSTATUS:response.connection.status.original");
-    LOGFIELDS.put("response.connection.status.last", "HTTP.CONNECTSTATUS:response.connection.status.last");
-    LOGFIELDS.put("request.bytes", "BYTES:request.bytes");
-    LOGFIELDS.put("request.bytes.last", "BYTES:request.bytes.last");
-    LOGFIELDS.put("request.bytes.original", "BYTES:request.bytes.original");
-    LOGFIELDS.put("request.bytes.last", "BYTES:request.bytes.last");
-    LOGFIELDS.put("response.bytes", "BYTES:response.bytes");
-    LOGFIELDS.put("response.bytes.last", "BYTES:response.bytes.last");
-    LOGFIELDS.put("response.bytes.original", "BYTES:response.bytes.original");
-    LOGFIELDS.put("response.bytes.last", "BYTES:response.bytes.last");
-    LOGFIELDS.put("total.bytes", "BYTES:total.bytes");
-    LOGFIELDS.put("total.bytes.last", "BYTES:total.bytes.last");
-    LOGFIELDS.put("total.bytes.original", "BYTES:total.bytes.original");
-    LOGFIELDS.put("total.bytes.last", "BYTES:total.bytes.last");
-    LOGFIELDS.put("request.cookies", "HTTP.COOKIES:request.cookies");
-    LOGFIELDS.put("request.cookies.last", "HTTP.COOKIES:request.cookies.last");
-    LOGFIELDS.put("request.cookies.original", "HTTP.COOKIES:request.cookies.original");
-    LOGFIELDS.put("request.cookies.last", "HTTP.COOKIES:request.cookies.last");
-    LOGFIELDS.put("response.cookies", "HTTP.SETCOOKIES:response.cookies");
-    LOGFIELDS.put("response.cookies.last", "HTTP.SETCOOKIES:response.cookies.last");
-    LOGFIELDS.put("response.cookies.original", "HTTP.SETCOOKIES:response.cookies.original");
-    LOGFIELDS.put("response.cookies.last", "HTTP.SETCOOKIES:response.cookies.last");
-    LOGFIELDS.put("request.user-agent", "HTTP.USERAGENT:request.user-agent");
-    LOGFIELDS.put("request.user-agent.last", "HTTP.USERAGENT:request.user-agent.last");
-    LOGFIELDS.put("request.user-agent.original", "HTTP.USERAGENT:request.user-agent.original");
-    LOGFIELDS.put("request.user-agent.last", "HTTP.USERAGENT:request.user-agent.last");
-    LOGFIELDS.put("request.referer", "HTTP.URI:request.referer");
-    LOGFIELDS.put("request.referer.last", "HTTP.URI:request.referer.last");
-    LOGFIELDS.put("request.referer.original", "HTTP.URI:request.referer.original");
-    LOGFIELDS.put("request.referer.last", "HTTP.URI:request.referer.last");
-  }
-
-  public HttpdParser(final MapWriter mapWriter, final DrillBuf managedBuffer, final String logFormat,
-                     final String timestampFormat, final Map<String, String> fieldMapping)
-          throws NoSuchMethodException, MissingDissectorsException, InvalidDissectorException {
-
-    Preconditions.checkArgument(logFormat != null && !logFormat.trim().isEmpty(), "logFormat cannot be null or empty");
-
-    this.record = new HttpdLogRecord(managedBuffer, timestampFormat);
-    this.parser = new HttpdLoglineParser<>(HttpdLogRecord.class, logFormat, timestampFormat);
-
-    setupParser(mapWriter, logFormat, fieldMapping);
-
-    if (timestampFormat != null && !timestampFormat.trim().isEmpty()) {
-      logger.info("Custom timestamp format has been specified. This is an informational note only as custom timestamps is rather unusual.");
-    }
-    if (logFormat.contains("\n")) {
-      logger.info("Specified logformat is a multiline log format: {}", logFormat);
-    }
-  }
-
-  /**
-   * We do not expose the underlying parser or the record which is used to manage the writers.
-   *
-   * @param line log line to tear apart.
-   * @throws DissectionFailure
-   * @throws InvalidDissectorException
-   * @throws MissingDissectorsException
-   */
-  public void parse(final String line) throws DissectionFailure, InvalidDissectorException, MissingDissectorsException {
-    parser.parse(record, line);
-    record.finishRecord();
-  }
-
-  /**
-   * In order to define a type remapping the format of the field configuration will look like: <br/>
-   * HTTP.URI:request.firstline.uri.query.[parameter name] <br/>
-   *
-   * @param parser    Add type remapping to this parser instance.
-   * @param fieldName request.firstline.uri.query.[parameter_name]
-   * @param fieldType HTTP.URI, etc..
-   */
-  private void addTypeRemapping(final Parser<HttpdLogRecord> parser, final String fieldName, final String fieldType) {
-    logger.debug("Adding type remapping - fieldName: {}, fieldType: {}", fieldName, fieldType);
-    parser.addTypeRemapping(fieldName, fieldType);
-  }
-
-  /**
-   * The parser deals with dots unlike Drill wanting underscores request_referer. For the sake of simplicity we are
-   * going replace the dots. The resultant output field will look like: request.referer.<br>
-   * Additionally, wild cards will get replaced with .*
-   *
-   * @param drillFieldName name to be cleansed.
-   * @return
-   */
-  public static String parserFormattedFieldName(String drillFieldName) {
-
-    //The Useragent fields contain a dash which causes potential problems if the field name is not escaped properly
-    //This removes the dash
-    if (drillFieldName.contains("useragent")) {
-      drillFieldName = drillFieldName.replace("useragent", "user-agent");
-    }
-
-    String tempFieldName;
-    tempFieldName = LOGFIELDS.get(drillFieldName);
-    return tempFieldName.replace(SAFE_WILDCARD, PARSER_WILDCARD).replaceAll(SAFE_SEPARATOR, ".").replaceAll("\\.\\.", "_");
-  }
-
-  /**
-   * Drill cannot deal with fields with dots in them like request.referer. For the sake of simplicity we are going
-   * ensure the field name is cleansed. The resultant output field will look like: request_referer.<br>
-   * Additionally, wild cards will get replaced with _$
-   *
-   * @param parserFieldName name to be cleansed.
-   * @return
-   */
-  public static String drillFormattedFieldName(String parserFieldName) {
-
-    //The Useragent fields contain a dash which causes potential problems if the field name is not escaped properly
-    //This removes the dash
-    if (parserFieldName.contains("user-agent")) {
-      parserFieldName = parserFieldName.replace("user-agent", "useragent");
-    }
-
-    if (parserFieldName.contains(":")) {
-      String[] fieldPart = parserFieldName.split(":");
-      return fieldPart[1].replaceAll("_", "__").replace(PARSER_WILDCARD, SAFE_WILDCARD).replaceAll("\\.", SAFE_SEPARATOR);
-    } else {
-      return parserFieldName.replaceAll("_", "__").replace(PARSER_WILDCARD, SAFE_WILDCARD).replaceAll("\\.", SAFE_SEPARATOR);
-    }
-  }
-
-  private void setupParser(final MapWriter mapWriter, final String logFormat, final Map<String, String> fieldMapping)
-          throws NoSuchMethodException, MissingDissectorsException, InvalidDissectorException {
-
-    /**
-     * If the user has selected fields, then we will use them to configure the parser because this would be the most
-     * efficient way to parse the log.
-     */
-    final Map<String, String> requestedPaths;
-    final List<String> allParserPaths = parser.getPossiblePaths();
-    if (fieldMapping != null && !fieldMapping.isEmpty()) {
-      logger.debug("Using fields defined by user.");
-      requestedPaths = fieldMapping;
-    } else {
-      /**
-       * Use all possible paths that the parser has determined from the specified log format.
-       */
-      logger.debug("No fields defined by user, defaulting to all possible fields.");
-      requestedPaths = Maps.newHashMap();
-      for (final String parserPath : allParserPaths) {
-        requestedPaths.put(drillFormattedFieldName(parserPath), parserPath);
-      }
-    }
-
-    /**
-     * By adding the parse target to the dummy instance we activate it for use. Which we can then use to find out which
-     * paths cast to which native data types. After we are done figuring this information out, we throw this away
-     * because this will be the slowest parsing path possible for the specified format.
-     */
-    Parser<Object> dummy = new HttpdLoglineParser<>(Object.class, logFormat);
-    dummy.addParseTarget(String.class.getMethod("indexOf", String.class), allParserPaths);
-    for (final Map.Entry<String, String> entry : requestedPaths.entrySet()) {
-      final EnumSet<Casts> casts;
-
-      /**
-       * Check the field specified by the user to see if it is supposed to be remapped.
-       */
-      if (entry.getValue().startsWith(REMAPPING_FLAG)) {
-        /**
-         * Because this field is being remapped we need to replace the field name that the parser uses.
-         */
-        entry.setValue(entry.getValue().substring(REMAPPING_FLAG.length()));
-
-        final String[] pieces = entry.getValue().split(":");
-        addTypeRemapping(parser, pieces[1], pieces[0]);
-        casts = Casts.STRING_ONLY;
-      } else {
-        casts = dummy.getCasts(entry.getValue());
-      }
-
-      logger.debug("Setting up drill field: {}, parser field: {}, which casts as: {}", entry.getKey(), entry.getValue(), casts);
-      record.addField(parser, mapWriter, casts, entry.getValue(), entry.getKey());
-    }
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java
deleted file mode 100644
index 048aa82..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageFormatPlugin.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.image;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.planner.common.DrillStatsTable;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.RecordWriter;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
-import org.apache.drill.exec.store.dfs.easy.EasyWriter;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-public class ImageFormatPlugin extends EasyFormatPlugin<ImageFormatConfig> {
-
-  private final static String DEFAULT_NAME = "image";
-
-  public ImageFormatPlugin(String name, DrillbitContext context, Configuration fsConf,
-                           StoragePluginConfig storageConfig) {
-    super(name, context, fsConf, storageConfig, new ImageFormatConfig(), true, false, false, false,
-        Collections.<String>emptyList(), DEFAULT_NAME);
-  }
-
-  public ImageFormatPlugin(String name, DrillbitContext context, Configuration fsConf,
-                           StoragePluginConfig storageConfig, ImageFormatConfig formatConfig) {
-    super(name, context, fsConf, storageConfig, formatConfig, true, false, false, false,
-        formatConfig.getExtensions(), DEFAULT_NAME);
-  }
-
-  @Override
-  public RecordReader getRecordReader(FragmentContext context, DrillFileSystem dfs, FileWork fileWork,
-      List<SchemaPath> columns, String userName) {
-    return new ImageRecordReader(context, dfs, fileWork.getPath(), formatConfig.hasFileSystemMetadata(),
-        formatConfig.isDescriptive(), formatConfig.getTimeZone());
-  }
-
-  @Override
-  public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer) throws IOException {
-    throw new UnsupportedOperationException("Drill doesn't currently support writing to image files.");
-  }
-
-  @Override
-  public int getReaderOperatorType() {
-    return CoreOperatorType.IMAGE_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public int getWriterOperatorType() {
-    throw new UnsupportedOperationException("Drill doesn't currently support writing to image files.");
-  }
-
-  @Override
-  public boolean supportsPushDown() {
-    return true;
-  }
-
-  @Override
-  public boolean supportsStatistics() {
-    return false;
-  }
-
-  @Override
-  public DrillStatsTable.TableStatistics readStatistics(FileSystem fs, Path statsTablePath) throws IOException {
-    return null;
-  }
-
-  @Override
-  public void writeStatistics(DrillStatsTable.TableStatistics statistics, FileSystem fs, Path statsTablePath) throws IOException {
-
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageRecordReader.java
deleted file mode 100644
index fa01743..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/image/ImageRecordReader.java
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.image;
-
-import io.netty.buffer.DrillBuf;
-
-import java.io.BufferedInputStream;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.TimeZone;
-
-import com.adobe.internal.xmp.XMPException;
-import com.adobe.internal.xmp.XMPIterator;
-import com.adobe.internal.xmp.XMPMeta;
-import com.adobe.internal.xmp.options.IteratorOptions;
-import com.adobe.internal.xmp.properties.XMPPropertyInfo;
-
-import com.drew.imaging.FileType;
-import com.drew.imaging.FileTypeDetector;
-import com.drew.imaging.ImageMetadataReader;
-import com.drew.imaging.ImageProcessingException;
-import com.drew.lang.Charsets;
-import com.drew.lang.KeyValuePair;
-import com.drew.lang.Rational;
-import com.drew.metadata.Directory;
-import com.drew.metadata.Metadata;
-import com.drew.metadata.StringValue;
-import com.drew.metadata.Tag;
-import com.drew.metadata.eps.EpsDirectory;
-import com.drew.metadata.exif.ExifIFD0Directory;
-import com.drew.metadata.exif.ExifInteropDirectory;
-import com.drew.metadata.exif.ExifSubIFDDirectory;
-import com.drew.metadata.exif.GpsDirectory;
-import com.drew.metadata.exif.PanasonicRawIFD0Directory;
-import com.drew.metadata.exif.makernotes.FujifilmMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.NikonType2MakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusCameraSettingsMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusEquipmentMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusFocusInfoMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusImageProcessingMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusRawDevelopment2MakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusRawDevelopmentMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.OlympusRawInfoMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.PanasonicMakernoteDirectory;
-import com.drew.metadata.exif.makernotes.SamsungType2MakernoteDirectory;
-import com.drew.metadata.exif.makernotes.SonyType6MakernoteDirectory;
-import com.drew.metadata.icc.IccDirectory;
-import com.drew.metadata.jpeg.JpegComponent;
-import com.drew.metadata.photoshop.PhotoshopDirectory;
-import com.drew.metadata.png.PngDirectory;
-import com.drew.metadata.xmp.XmpDirectory;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter.ListWriter;
-import org.apache.drill.exec.vector.complex.writer.BaseWriter.MapWriter;
-import org.apache.drill.exec.vector.complex.writer.FieldWriter;
-import org.apache.drill.exec.vector.complex.writer.VarCharWriter;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-
-public class ImageRecordReader extends AbstractRecordReader {
-
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ImageRecordReader.class);
-
-  private final DrillFileSystem fs;
-  private final Path hadoopPath;
-  private final boolean fileSystemMetadata;
-  private final boolean descriptive;
-  private final TimeZone timeZone;
-
-  private VectorContainerWriter writer;
-  private FileStatus fileStatus;
-  private BufferedInputStream metadataStream;
-  private DrillBuf managedBuffer;
-  private boolean finish;
-
-  public ImageRecordReader(FragmentContext context, DrillFileSystem fs, Path inputPath,
-                           boolean fileSystemMetadata, boolean descriptive, String timeZone) {
-    this.fs = fs;
-    hadoopPath = fs.makeQualified(inputPath);
-    this.fileSystemMetadata = fileSystemMetadata;
-    this.descriptive = descriptive;
-    this.timeZone = (timeZone != null) ? TimeZone.getTimeZone(timeZone) : TimeZone.getDefault();
-    managedBuffer = context.getManagedBuffer();
-  }
-
-  @Override
-  public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
-
-    try {
-      fileStatus = fs.getFileStatus(hadoopPath);
-      metadataStream = new BufferedInputStream(fs.open(hadoopPath));
-      writer = new VectorContainerWriter(output);
-      finish = false;
-    } catch (Exception e) {
-      throw handleAndRaise("Failure in creating record reader", e);
-    }
-  }
-
-  private DrillBuf drillBuffer(byte[] b) {
-    if (managedBuffer.capacity() < b.length) {
-      managedBuffer = managedBuffer.reallocIfNeeded(b.length);
-    }
-    managedBuffer.clear();
-    managedBuffer.writeBytes(b);
-    return managedBuffer;
-  }
-
-  protected RuntimeException handleAndRaise(String s, Exception e) {
-    throw UserException.dataReadError(e)
-        .message(s + "\n%s", e.getMessage())
-        .addContext("Path", hadoopPath.toUri().getPath())
-        .build(logger);
-  }
-
-  @Override
-  public int next() {
-
-    if (finish) {
-      return 0;
-    }
-
-    try {
-      writer.allocate();
-      writer.reset();
-
-      final MapWriter rootWriter = writer.rootAsMap();
-      final FileType fileType = FileTypeDetector.detectFileType(metadataStream);
-      final Metadata metadata = ImageMetadataReader.readMetadata(metadataStream);
-
-      try {
-        new GenericMetadataReader().read(fileType, fileStatus, metadata);
-        processGenericMetadataDirectory(rootWriter,
-            metadata.getFirstDirectoryOfType(GenericMetadataDirectory.class));
-      } catch (Exception e) {
-        // simply skip this directory
-      }
-
-      boolean skipEPSPreview = false;
-
-      for (Directory directory : metadata.getDirectories()) {
-        try {
-          if (directory instanceof GenericMetadataDirectory) {
-            continue;
-          }
-          if (directory instanceof ExifIFD0Directory && skipEPSPreview) {
-            skipEPSPreview = false;
-            continue;
-          }
-          if (directory instanceof EpsDirectory) {
-            // If an EPS file contains a TIFF preview, skip the next IFD0
-            skipEPSPreview = directory.containsTag(EpsDirectory.TAG_TIFF_PREVIEW_SIZE);
-          }
-          final MapWriter directoryWriter = rootWriter.map(formatName(directory.getName()));
-          processDirectory(directoryWriter, directory, metadata);
-          if (directory instanceof XmpDirectory) {
-            processXmpDirectory(directoryWriter, (XmpDirectory) directory);
-          }
-        } catch (Exception e) {
-          // simply skip this directory
-        }
-      }
-
-      writer.setValueCount(1);
-      finish = true;
-      return 1;
-    } catch (ImageProcessingException e) {
-      finish = true;
-      return 0;
-    } catch (Exception e) {
-      throw handleAndRaise("Failure while reading image metadata record.", e);
-    }
-  }
-
-  private void processGenericMetadataDirectory(final MapWriter writer,
-                                               final GenericMetadataDirectory directory) {
-    for (Tag tag : directory.getTags()) {
-      try {
-        final int tagType = tag.getTagType();
-        if (tagType != GenericMetadataDirectory.TAG_FILE_SIZE &&
-            tagType != GenericMetadataDirectory.TAG_FILE_DATE_TIME || fileSystemMetadata) {
-          writeValue(writer, formatName(tag.getTagName()),
-              descriptive ? directory.getDescription(tagType) : directory.getObject(tagType));
-        }
-      } catch (Exception e) {
-        // simply skip this tag
-      }
-    }
-  }
-
-  private void processDirectory(final MapWriter writer, final Directory directory, final Metadata metadata) {
-    for (Tag tag : directory.getTags()) {
-      try {
-        final int tagType = tag.getTagType();
-        Object value;
-        if (descriptive || isDescriptionTag(directory, tagType)) {
-          value = directory.getDescription(tagType);
-          if (directory instanceof PngDirectory) {
-            if (((PngDirectory) directory).getPngChunkType().areMultipleAllowed()) {
-              value = new String[] { (String) value };
-            }
-          }
-        } else {
-          value = directory.getObject(tagType);
-          if (directory instanceof ExifIFD0Directory && tagType == ExifIFD0Directory.TAG_DATETIME) {
-            ExifSubIFDDirectory exifSubIFDDir = metadata.getFirstDirectoryOfType(ExifSubIFDDirectory.class);
-            String subsecond = null;
-            if (exifSubIFDDir != null) {
-              subsecond = exifSubIFDDir.getString(ExifSubIFDDirectory.TAG_SUBSECOND_TIME);
-            }
-            value = directory.getDate(tagType, subsecond, timeZone);
-          } else if (directory instanceof ExifSubIFDDirectory) {
-            if (tagType == ExifSubIFDDirectory.TAG_DATETIME_ORIGINAL) {
-              value = ((ExifSubIFDDirectory) directory).getDateOriginal(timeZone);
-            } else if (tagType == ExifSubIFDDirectory.TAG_DATETIME_DIGITIZED) {
-              value = ((ExifSubIFDDirectory) directory).getDateDigitized(timeZone);
-            }
-          } else if (directory instanceof GpsDirectory) {
-            if (tagType == GpsDirectory.TAG_LATITUDE) {
-              value = ((GpsDirectory) directory).getGeoLocation().getLatitude();
-            } else if (tagType == GpsDirectory.TAG_LONGITUDE) {
-              value = ((GpsDirectory) directory).getGeoLocation().getLongitude();
-            }
-          }
-          if (isVersionTag(directory, tagType)) {
-            value = directory.getString(tagType, "US-ASCII");
-          } else if (isDateTag(directory, tagType)) {
-            value = directory.getDate(tagType, timeZone);
-          }
-        }
-        writeValue(writer, formatName(tag.getTagName()), value);
-      } catch (Exception e) {
-        // simply skip this tag
-      }
-    }
-  }
-
-  private void processXmpDirectory(final MapWriter writer, final XmpDirectory directory) {
-    HashSet<String> listItems = new HashSet<>();
-    XMPMeta xmpMeta = directory.getXMPMeta();
-    if (xmpMeta != null) {
-      try {
-        IteratorOptions iteratorOptions = new IteratorOptions().setJustLeafnodes(true);
-        for (final XMPIterator i = xmpMeta.iterator(iteratorOptions); i.hasNext(); ) {
-          try {
-            XMPPropertyInfo prop = (XMPPropertyInfo) i.next();
-            String path = prop.getPath();
-            String value = prop.getValue();
-            if (path != null && value != null) {
-              // handling lang-alt array items
-              if (prop.getOptions().getHasLanguage()) {
-                XMPPropertyInfo langProp = (XMPPropertyInfo) i.next();
-                if (langProp.getPath().endsWith("/xml:lang")) {
-                  String lang = langProp.getValue();
-                  path = path.replaceFirst("\\[\\d+\\]$", "") +
-                      (lang.equals("x-default") ? "" : "_" + lang);
-                }
-              }
-
-              FieldWriter writerSub = (FieldWriter) writer;
-              String[] elements = path.replaceAll("/\\w+:", "/").split(":|/|(?=\\[)");
-              for (int j = 1; j < elements.length; j++) {
-                String parent = elements[j - 1];
-                boolean isList = elements[j].startsWith("[");
-                if (parent.startsWith("[")) {
-                  writerSub = (FieldWriter) (isList ? writerSub.list() : writerSub.map());
-                  if (listItems.add(path.replaceFirst("[^\\]]+$", ""))) {
-                    writerSub.start();
-                  }
-                } else {
-                  writerSub = (FieldWriter)
-                      (isList ? writerSub.list(formatName(parent)) : writerSub.map(formatName(parent)));
-                }
-              }
-              String parent = elements[elements.length - 1];
-              VarCharWriter varCharWriter = parent.startsWith("[") ?
-                  writerSub.varChar() : writerSub.varChar(formatName(parent));
-              writeString(varCharWriter, value);
-            }
-          } catch (Exception e) {
-            // simply skip this property
-          }
-        }
-      } catch (XMPException ignored) {
-      }
-    }
-  }
-
-  private void writeValue(final MapWriter writer, final String tagName, final Object value) {
-    if (value == null) {
-      return;
-    }
-
-    if (value instanceof Boolean) {
-      writer.bit(tagName).writeBit((Boolean) value ? 1 : 0);
-    } else if (value instanceof Byte) {
-      // TINYINT is not supported
-      writer.integer(tagName).writeInt(((Byte) value).intValue());
-    } else if (value instanceof Short) {
-      // SMALLINT is not supported
-      writer.integer(tagName).writeInt(((Short) value).intValue());
-    } else if (value instanceof Integer) {
-      writer.integer(tagName).writeInt((Integer) value);
-    } else if (value instanceof Long) {
-      writer.bigInt(tagName).writeBigInt((Long) value);
-    } else if (value instanceof Float) {
-      writer.float4(tagName).writeFloat4((Float) value);
-    } else if (value instanceof Double) {
-      writer.float8(tagName).writeFloat8((Double) value);
-    } else if (value instanceof Rational) {
-      writer.float8(tagName).writeFloat8(((Rational) value).doubleValue());
-    } else if (value instanceof String) {
-      writeString(writer.varChar(tagName), (String) value);
-    } else if (value instanceof StringValue) {
-      writeString(writer.varChar(tagName), ((StringValue) value).toString());
-    } else if (value instanceof Date) {
-      writer.timeStamp(tagName).writeTimeStamp(((Date) value).getTime());
-    } else if (value instanceof boolean[]) {
-      for (boolean v : (boolean[]) value) {
-        writer.list(tagName).bit().writeBit(v ? 1 : 0);
-      }
-    } else if (value instanceof byte[]) {
-      final byte[] bytes = (byte[]) value;
-      if (bytes.length == 1) {
-        writer.integer(tagName).writeInt(bytes[0]);
-      } else if (bytes.length <= 4) {
-        ListWriter listWriter = writer.list(tagName);
-        for (byte v : bytes) {
-          listWriter.integer().writeInt(v);
-        }
-      } else {
-        writer.varBinary(tagName).writeVarBinary(0, bytes.length, drillBuffer(bytes));
-      }
-    } else if (value instanceof short[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (short v : (short[]) value) {
-        // SMALLINT is not supported
-        listWriter.integer().writeInt(v);
-      }
-    } else if (value instanceof int[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (int v : (int[]) value) {
-        listWriter.integer().writeInt(v);
-      }
-    } else if (value instanceof long[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (long v : (long[]) value) {
-        listWriter.bigInt().writeBigInt(v);
-      }
-    } else if (value instanceof float[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (float v : (float[]) value) {
-        listWriter.float4().writeFloat4(v);
-      }
-    } else if (value instanceof double[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (double v : (double[]) value) {
-        listWriter.float8().writeFloat8(v);
-      }
-    } else if (value instanceof Rational[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (Rational v : (Rational[]) value) {
-        listWriter.float8().writeFloat8(v.doubleValue());
-      }
-    } else if (value instanceof String[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (String v : (String[]) value) {
-        writeString(listWriter.varChar(), v);
-      }
-    } else if (value instanceof StringValue[]) {
-      ListWriter listWriter = writer.list(tagName);
-      for (StringValue v : (StringValue[]) value) {
-        writeString(listWriter.varChar(), v.toString());
-      }
-    } else if (value instanceof JpegComponent) {
-      final JpegComponent v = (JpegComponent) value;
-      writer.map(tagName).integer("ComponentId").writeInt(v.getComponentId());
-      writer.map(tagName).integer("HorizontalSamplingFactor").writeInt(v.getHorizontalSamplingFactor());
-      writer.map(tagName).integer("VerticalSamplingFactor").writeInt(v.getVerticalSamplingFactor());
-      writer.map(tagName).integer("QuantizationTableNumber").writeInt(v.getQuantizationTableNumber());
-    } else if (value instanceof List<?>) {
-      ListWriter listWriter = writer.list(tagName);
-      for (Object v : (List<?>) value) {
-        if (v instanceof KeyValuePair) {
-          listWriter.map().start();
-          writeString(listWriter.map().varChar("Key"), ((KeyValuePair) v).getKey());
-          writeString(listWriter.map().varChar("Value"), ((KeyValuePair) v).getValue().toString());
-          listWriter.map().end();
-        } else {
-          writeString(listWriter.varChar(), v.toString());
-        }
-      }
-    } else {
-      writeString(writer.varChar(tagName), value.toString());
-    }
-  }
-
-  private void writeString(final VarCharWriter writer, final String value) {
-    final byte[] stringBytes = value.getBytes(Charsets.UTF_8);
-    writer.writeVarChar(0, stringBytes.length, drillBuffer(stringBytes));
-  }
-
-  private String formatName(final String tagName) {
-    StringBuilder builder = new StringBuilder();
-    boolean upperCase = true;
-    for (char c : tagName.toCharArray()) {
-      if (c == ' ' || c == '-' || c == '/') {
-        upperCase = true;
-      } else {
-        builder.append(upperCase ? Character.toUpperCase(c) : c);
-        upperCase = false;
-      }
-    }
-    return builder.toString();
-  }
-
-  private boolean isDescriptionTag(final Directory directory, final int tagType) {
-    return directory instanceof IccDirectory && tagType > 0x20202020 && tagType < 0x7a7a7a7a ||
-        directory instanceof PhotoshopDirectory;
-  }
-
-  private boolean isVersionTag(final Directory directory, final int tagType) {
-    return directory instanceof ExifSubIFDDirectory &&
-        (tagType == ExifSubIFDDirectory.TAG_EXIF_VERSION || tagType == ExifSubIFDDirectory.TAG_FLASHPIX_VERSION) ||
-        directory instanceof ExifInteropDirectory &&
-        tagType == ExifInteropDirectory.TAG_INTEROP_VERSION ||
-        directory instanceof FujifilmMakernoteDirectory &&
-        tagType == FujifilmMakernoteDirectory.TAG_MAKERNOTE_VERSION ||
-        directory instanceof NikonType2MakernoteDirectory &&
-        tagType == NikonType2MakernoteDirectory.TAG_FIRMWARE_VERSION ||
-        directory instanceof OlympusCameraSettingsMakernoteDirectory &&
-        tagType == OlympusCameraSettingsMakernoteDirectory.TagCameraSettingsVersion ||
-        directory instanceof OlympusEquipmentMakernoteDirectory &&
-        tagType == OlympusEquipmentMakernoteDirectory.TAG_EQUIPMENT_VERSION ||
-        directory instanceof OlympusFocusInfoMakernoteDirectory &&
-        tagType == OlympusFocusInfoMakernoteDirectory.TagFocusInfoVersion ||
-        directory instanceof OlympusImageProcessingMakernoteDirectory &&
-        tagType == OlympusImageProcessingMakernoteDirectory.TagImageProcessingVersion ||
-        directory instanceof OlympusMakernoteDirectory &&
-        tagType == OlympusMakernoteDirectory.TAG_MAKERNOTE_VERSION ||
-        directory instanceof OlympusRawDevelopment2MakernoteDirectory &&
-        tagType == OlympusRawDevelopment2MakernoteDirectory.TagRawDevVersion ||
-        directory instanceof OlympusRawDevelopmentMakernoteDirectory &&
-        tagType == OlympusRawDevelopmentMakernoteDirectory.TagRawDevVersion ||
-        directory instanceof OlympusRawInfoMakernoteDirectory &&
-        tagType == OlympusRawInfoMakernoteDirectory.TagRawInfoVersion ||
-        directory instanceof PanasonicMakernoteDirectory &&
-        (tagType == PanasonicMakernoteDirectory.TAG_FIRMWARE_VERSION || tagType == PanasonicMakernoteDirectory.TAG_MAKERNOTE_VERSION || tagType == PanasonicMakernoteDirectory.TAG_EXIF_VERSION) ||
-        directory instanceof SamsungType2MakernoteDirectory &&
-        tagType == SamsungType2MakernoteDirectory.TagMakerNoteVersion ||
-        directory instanceof SonyType6MakernoteDirectory &&
-        tagType == SonyType6MakernoteDirectory.TAG_MAKERNOTE_THUMB_VERSION ||
-        directory instanceof PanasonicRawIFD0Directory &&
-        tagType == PanasonicRawIFD0Directory.TagPanasonicRawVersion;
-  }
-
-  private boolean isDateTag(final Directory directory, final int tagType) {
-    return directory instanceof IccDirectory && tagType == IccDirectory.TAG_PROFILE_DATETIME ||
-        directory instanceof PngDirectory && tagType == PngDirectory.TAG_LAST_MODIFICATION_TIME;
-  }
-
-  @Override
-  public void close() throws Exception {
-    if (metadataStream != null) {
-      metadataStream.close();
-    }
-  }
-
-  @Override
-  public String toString() {
-    return "ImageRecordReader[Path=" + hadoopPath.toUri().getPath() + "]";
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
index 4a0bba5..8b19e4e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
@@ -20,10 +20,11 @@
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import org.apache.drill.exec.physical.base.AbstractSubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 public class InfoSchemaSubScan extends AbstractSubScan {
 
+  public static final String OPERATOR_TYPE = "INFO_SCHEMA_SUB_SCAN";
+
   private final InfoSchemaTableType table;
   private final InfoSchemaFilter filter;
 
@@ -46,7 +47,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.INFO_SCHEMA_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/log/LogFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/log/LogFormatPlugin.java
index 1027fc7..67305c9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/log/LogFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/log/LogFormatPlugin.java
@@ -28,7 +28,6 @@
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
 import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
 import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.metadata.ColumnMetadata;
 import org.apache.drill.exec.record.metadata.MetadataUtils;
 import org.apache.drill.exec.record.metadata.Propertied;
@@ -44,13 +43,13 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Collections;
 import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
 
 public class LogFormatPlugin extends EasyFormatPlugin<LogFormatConfig> {
+
   private static final Logger logger = LoggerFactory.getLogger(LogFormatPlugin.class);
 
   public static final String PLUGIN_NAME = "logRegex";
@@ -58,6 +57,8 @@
   public static final String REGEX_PROP = PROP_PREFIX + "regex";
   public static final String MAX_ERRORS_PROP = PROP_PREFIX + "maxErrors";
 
+  public static final String OPERATOR_TYPE = "REGEX_SUB_SCAN";
+
   private static class LogReaderFactory extends FileReaderFactory {
     private final LogReaderConfig readerConfig;
     private final int maxRecords;
@@ -80,20 +81,19 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, LogFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    // Should be block splitable, but logic not yet implemented.
-    config.blockSplittable = false;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = Collections.singletonList(pluginConfig.getExtension());
-    config.fsConf = fsConf;
-    config.defaultName = PLUGIN_NAME;
-    config.readerOperatorType = CoreOperatorType.REGEX_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false) // Should be block splitable, but logic not yet implemented.
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtension())
+        .fsConf(fsConf)
+        .defaultName(PLUGIN_NAME)
+        .readerOperatorType(OPERATOR_TYPE)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   /**
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorePOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorePOP.java
index a723aca..d09bf0c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorePOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockStorePOP.java
@@ -32,7 +32,6 @@
 
 @JsonTypeName("mock-store")
 public class MockStorePOP extends AbstractStore {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MockStorePOP.class);
 
   @JsonCreator
   public MockStorePOP(@JsonProperty("child") PhysicalOperator child) {
@@ -65,7 +64,7 @@
   }
 
   @Override
-  public int getOperatorType() {
+  public String getOperatorType() {
     throw new UnsupportedOperationException();
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
index 644a3be..980187f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
@@ -25,7 +25,6 @@
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
 import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.mock.MockTableDef.MockScanEntry;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -43,6 +42,8 @@
 @JsonTypeName("mock-sub-scan")
 public class MockSubScanPOP extends AbstractBase implements SubScan {
 
+  public static final String OPERATOR_TYPE = "MOCK_SUB_SCAN";
+
   private final String url;
   protected final List<MockScanEntry> readEntries;
   private final boolean extended;
@@ -116,7 +117,7 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.MOCK_SUB_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRowGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRowGroupScan.java
index 75cb302..9478061 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRowGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRowGroupScan.java
@@ -26,7 +26,6 @@
 import org.apache.drill.common.logical.FormatPluginConfig;
 import org.apache.drill.common.logical.StoragePluginConfig;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.metadata.TupleMetadata;
 import org.apache.drill.exec.store.ColumnExplorer;
 import org.apache.drill.exec.store.StoragePluginRegistry;
@@ -44,6 +43,8 @@
 @JsonTypeName("parquet-row-group-scan")
 public class ParquetRowGroupScan extends AbstractParquetRowGroupScan {
 
+  public static final String OPERATOR_TYPE = "PARQUET_ROW_GROUP_SCAN";
+
   private final ParquetFormatPlugin formatPlugin;
   private final ParquetFormatConfig formatConfig;
 
@@ -103,8 +104,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.PARQUET_ROW_GROUP_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetWriter.java
index 61bd44f..9ab886a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetWriter.java
@@ -26,9 +26,6 @@
 import org.apache.drill.exec.physical.base.AbstractWriter;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.store.StorageStrategy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 import com.fasterxml.jackson.annotation.JacksonInject;
@@ -39,7 +36,6 @@
 
 @JsonTypeName("parquet-writer")
 public class ParquetWriter extends AbstractWriter {
-  static final Logger logger = LoggerFactory.getLogger(ParquetWriter.class);
 
 /** Version of Drill's Parquet writer. Increment this version (by 1) any time we make any format change to the file.
  * Format changes include:
@@ -53,6 +49,8 @@
  */
   public static final int WRITER_VERSION = 3;
 
+  public static final String OPERATOR_TYPE = "PARQUET_WRITER";
+
   private final String location;
   private final List<String> partitionColumns;
   private final ParquetFormatPlugin formatPlugin;
@@ -117,8 +115,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.PARQUET_WRITER_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcap/PcapFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcap/PcapFormatPlugin.java
index b5b77fc..69dbfc5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcap/PcapFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcap/PcapFormatPlugin.java
@@ -26,7 +26,6 @@
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasySubScan;
 import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
 import org.apache.hadoop.conf.Configuration;
@@ -59,19 +58,18 @@
   }
 
   private static EasyFormatConfig easyConfig(Configuration fsConf, PcapFormatConfig pluginConfig) {
-    EasyFormatConfig config = new EasyFormatConfig();
-    config.readable = true;
-    config.writable = false;
-    config.blockSplittable = false;
-    config.compressible = true;
-    config.supportsProjectPushdown = true;
-    config.extensions = pluginConfig.getExtensions();
-    config.fsConf = fsConf;
-    config.defaultName = PLUGIN_NAME;
-    config.readerOperatorType = UserBitShared.CoreOperatorType.PCAP_SUB_SCAN_VALUE;
-    config.useEnhancedScan = true;
-    config.supportsLimitPushdown = true;
-    return config;
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .supportsProjectPushdown(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .defaultName(PLUGIN_NAME)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .build();
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapColumn.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapColumn.java
new file mode 100644
index 0000000..ec9d5f5
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapColumn.java
@@ -0,0 +1,1020 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.pcapng;
+
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.store.pcap.PcapFormatUtils;
+import org.apache.drill.exec.store.pcapng.decoder.PacketDecoder;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.joda.time.Instant;
+
+import fr.bmartel.pcapdecoder.structure.options.inter.IOptionsStatisticsHeader;
+import fr.bmartel.pcapdecoder.structure.types.IPcapngType;
+import fr.bmartel.pcapdecoder.structure.types.inter.IDescriptionBlock;
+import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
+import fr.bmartel.pcapdecoder.structure.types.inter.INameResolutionBlock;
+import fr.bmartel.pcapdecoder.structure.types.inter.ISectionHeaderBlock;
+import fr.bmartel.pcapdecoder.structure.types.inter.IStatisticsBlock;
+
+public abstract class PcapColumn {
+
+  private static final Map<String, PcapColumn> columns = new LinkedHashMap<>();
+  private static final Map<String, PcapColumn> summary_columns = new LinkedHashMap<>();
+  public static final String DUMMY_NAME = "dummy";
+  public static final String PATH_NAME = "path";
+
+  static {
+    // Basic
+    columns.put("timestamp", new PcapTimestamp());
+    columns.put("packet_length", new PcapPacketLength());
+    columns.put("type", new PcapType());
+    columns.put("src_ip", new PcapSrcIp());
+    columns.put("dst_ip", new PcapDstIp());
+    columns.put("src_port", new PcapSrcPort());
+    columns.put("dst_port", new PcapDstPort());
+    columns.put("src_mac_address", new PcapSrcMac());
+    columns.put("dst_mac_address", new PcapDstMac());
+    columns.put("tcp_session", new PcapTcpSession());
+    columns.put("tcp_ack", new PcapTcpAck());
+    columns.put("tcp_flags", new PcapTcpFlags());
+    columns.put("tcp_flags_ns", new PcapTcpFlagsNs());
+    columns.put("tcp_flags_cwr", new PcapTcpFlagsCwr());
+    columns.put("tcp_flags_ece", new PcapTcpFlagsEce());
+    columns.put("tcp_flags_ece_ecn_capable", new PcapTcpFlagsEceEcnCapable());
+    columns.put("tcp_flags_ece_congestion_experienced", new PcapTcpFlagsEceCongestionExperienced());
+    columns.put("tcp_flags_urg", new PcapTcpFlagsUrg());
+    columns.put("tcp_flags_ack", new PcapTcpFlagsAck());
+    columns.put("tcp_flags_psh", new PcapTcpFlagsPsh());
+    columns.put("tcp_flags_rst", new PcapTcpFlagsRst());
+    columns.put("tcp_flags_syn", new PcapTcpFlagsSyn());
+    columns.put("tcp_flags_fin", new PcapTcpFlagsFin());
+    columns.put("tcp_parsed_flags", new PcapTcpParsedFlags());
+    columns.put("packet_data", new PcapPacketData());
+
+    // Extensions
+    summary_columns.put("path", new PcapStatPath());
+    // Section Header Block
+    summary_columns.put("shb_hardware", new PcapHardware());
+    summary_columns.put("shb_os", new PcapOS());
+    summary_columns.put("shb_userappl", new PcapUserAppl());
+    // Interface Description Block
+    summary_columns.put("if_name", new PcapIfName());
+    summary_columns.put("if_description", new PcapIfDescription());
+    summary_columns.put("if_ipv4addr", new PcapIfIPv4addr());
+    summary_columns.put("if_ipv6addr", new PcapIfIPv6addr());
+    summary_columns.put("if_macaddr", new PcapIfMACaddr());
+    summary_columns.put("if_euiaddr", new PcapIfEUIaddr());
+    summary_columns.put("if_speed", new PcapIfSpeed());
+    summary_columns.put("if_tsresol", new PcapIfTsresol());
+    summary_columns.put("if_tzone", new PcapIfTzone());
+    summary_columns.put("if_os", new PcapIfOS());
+    summary_columns.put("if_fcslen", new PcapIfFcslen());
+    summary_columns.put("if_tsoffset", new PcapIfTsOffset());
+    // Name Resolution Block
+    summary_columns.put("ns_dnsname", new PcapDnsName());
+    summary_columns.put("ns_dnsip4addr", new PcapDnsIP4addr());
+    summary_columns.put("ns_dnsip6addr", new PcapDnsIP6addr());
+    // Interface Statistics Block
+    summary_columns.put("isb_starttime", new PcapIsbStarttime());
+    summary_columns.put("isb_endtime", new PcapIsbEndtime());
+    summary_columns.put("isb_ifrecv", new PcapIsbIfrecv());
+    summary_columns.put("isb_ifdrop", new PcapIsbIfdrop());
+    summary_columns.put("isb_filteraccept", new PcapIsbFilterAccept());
+    summary_columns.put("isb_osdrop", new PcapIsbOSdrop());
+    summary_columns.put("isb_usrdeliv", new PcapIsbUsrdeliv());
+  }
+
+  abstract MajorType getType();
+
+  abstract void process(IPcapngType block, ScalarWriter writer);
+
+  public static Map<String, PcapColumn> getColumns() {
+    return Collections.unmodifiableMap(columns);
+  }
+
+  public static Map<String, PcapColumn> getSummaryColumns() {
+    return Collections.unmodifiableMap(summary_columns);
+  }
+
+  static class PcapDummy extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) { }
+  }
+
+  static class PcapStatPath extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) { }
+  }
+
+  static class PcapTimestamp extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.required(MinorType.TIMESTAMP);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      writer.setTimestamp(Instant.ofEpochMilli(((IEnhancedPacketBLock) block).getTimeStamp() / 1000));
+    }
+  }
+
+  static class PcapPacketLength extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.required(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      writer.setInt(((IEnhancedPacketBLock) block).getPacketLength());
+    }
+  }
+
+  static class PcapType extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getPacketType());
+      }
+    }
+  }
+
+  static class PcapSrcIp extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getSrc_ip().getHostAddress());
+      }
+    }
+  }
+
+  static class PcapDstIp extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getDst_ip().getHostAddress());
+      }
+    }
+  }
+
+  static class PcapSrcPort extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setInt(packet.getSrc_port());
+      }
+    }
+  }
+
+  static class PcapDstPort extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setInt(packet.getDst_port());
+      }
+    }
+  }
+
+  static class PcapSrcMac extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getEthernetSource());
+      }
+    }
+  }
+
+  static class PcapDstMac extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getEthernetDestination());
+      }
+    }
+  }
+
+  static class PcapTcpSession extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setLong(packet.getSessionHash());
+      }
+    }
+  }
+
+  static class PcapTcpAck extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setInt(packet.getAckNumber());
+      }
+    }
+  }
+
+  static class PcapTcpFlags extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setInt(packet.getFlags());
+      }
+    }
+  }
+
+  static class PcapTcpFlagsNs extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x100) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsCwr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x80) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsEce extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x40) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsEceEcnCapable extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x42) == 0x42);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsEceCongestionExperienced extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x42) == 0x40);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsUrg extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x20) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsAck extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x10) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsPsh extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x8) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsRst extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x4) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsSyn extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x2) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpFlagsFin extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setBoolean((packet.getFlags() & 0x1) != 0);
+      }
+    }
+  }
+
+  static class PcapTcpParsedFlags extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(packet.getParsedFlags());
+      }
+    }
+  }
+
+  static class PcapPacketData extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      PacketDecoder packet = new PacketDecoder();
+      if (packet.readPcapng(((IEnhancedPacketBLock) block).getPacketData())) {
+        writer.setString(PcapFormatUtils.parseBytesToASCII(((IEnhancedPacketBLock) block).getPacketData()));
+      }
+    }
+  }
+
+  /**
+   * shb_hardware: description of the hardware
+   */
+  static class PcapHardware extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof ISectionHeaderBlock)) {
+        return;
+      }
+      writer.setString(((ISectionHeaderBlock) block).getOptions().getHardware());
+    }
+  }
+
+  // Section Header Block
+
+  /**
+   * shb_os: name of the OS
+   */
+  static class PcapOS extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof ISectionHeaderBlock)) {
+        return;
+      }
+      writer.setString(((ISectionHeaderBlock) block).getOptions().getOS());
+    }
+  }
+
+  /**
+   * shb_userappl: name of the user application
+   */
+  static class PcapUserAppl extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof ISectionHeaderBlock)) {
+        return;
+      }
+      writer.setString(((ISectionHeaderBlock) block).getOptions().getUserAppl());
+    }
+  }
+
+  // Interface Description Block
+
+  /**
+   * if_name: name of the device used to capture
+   */
+  static class PcapIfName extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceName());
+    }
+  }
+
+  /**
+   * if_description: Description of the device used to capture the data
+   */
+  static class PcapIfDescription extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceDescription());
+    }
+  }
+
+  /**
+   * if_IPv4addr: IPV4 address
+   */
+  static class PcapIfIPv4addr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceIpv4NetworkAddr());
+    }
+  }
+
+  /**
+   * if_IPv6addr: IPV6 address
+   */
+  static class PcapIfIPv6addr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getIpv6NetworkAddr());
+    }
+  }
+
+  /**
+   * if_MACaddr: MAC address
+   */
+  static class PcapIfMACaddr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceMacAddr());
+    }
+  }
+
+  /**
+   * if_EUIaddr: EUI address
+   */
+  static class PcapIfEUIaddr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceEuiAddr());
+    }
+  }
+
+  /**
+   * if_speed: interface speed in bps
+   */
+  static class PcapIfSpeed extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setInt(((IDescriptionBlock) block).getOptions().getInterfaceSpeed());
+    }
+  }
+
+  /**
+   * if_tsresol: Resolution of timestamp (6 means microsecond resolution for instance)
+   */
+  static class PcapIfTsresol extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setInt(((IDescriptionBlock) block).getOptions().getTimeStampResolution());
+    }
+  }
+
+  /**
+   * if_tzone: indicate Time zone => offset from UTC time
+   */
+  static class PcapIfTzone extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setInt(((IDescriptionBlock) block).getOptions().getTimeBias());
+    }
+  }
+
+  /**
+   * if_os: Name of the operating system
+   */
+  static class PcapIfOS extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setString(((IDescriptionBlock) block).getOptions().getInterfaceOperatingSystem());
+    }
+  }
+
+  /**
+   * if_fcslen: Length of the Frame Check Sequence (in bits)
+   */
+  static class PcapIfFcslen extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setInt(((IDescriptionBlock) block).getOptions().getInterfaceFrameCheckSequenceLength());
+    }
+  }
+
+  /**
+   * if_tsoffset: Timestamp offset for each packet / if not present timestamp are absolute
+   */
+  static class PcapIfTsOffset extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.INT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IDescriptionBlock)) {
+        return;
+      }
+      writer.setInt(((IDescriptionBlock) block).getOptions().getTimeStampOffset());
+    }
+  }
+
+  // Name Resolution Block
+
+  /**
+   * ns_dnsname: Retrieve DNS server name
+   */
+  static class PcapDnsName extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof INameResolutionBlock)) {
+        return;
+      }
+      writer.setString(((INameResolutionBlock) block).getOptions().getDnsName());
+    }
+  }
+
+  /**
+   * ns_dnsIP4addr: Retrieve DNS IPV4 server address
+   */
+  static class PcapDnsIP4addr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof INameResolutionBlock)) {
+        return;
+      }
+      writer.setString(((INameResolutionBlock) block).getOptions().getDnsIpv4Addr());
+    }
+  }
+
+  /**
+   * ns_dnsIP6addr: Retrieve DNS IPV6 server address
+   */
+  static class PcapDnsIP6addr extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.VARCHAR);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof INameResolutionBlock)) {
+        return;
+      }
+      writer.setString(((INameResolutionBlock) block).getOptions().getDnsIpv6Addr());
+    }
+  }
+
+  // Interface Statistics Block
+
+  /**
+   * isb_starttime: capture start time (timestamp resolution is defined in Interface description header check exemple)
+   */
+  static class PcapIsbStarttime extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.TIMESTAMP);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      IOptionsStatisticsHeader statisticsHeader = ((IStatisticsBlock) block).getOptions();
+      writer.setTimestamp(Instant.ofEpochMilli(statisticsHeader.getCaptureStartTime() / 1000));
+    }
+  }
+
+  /**
+   * isb_endtime: capture end time (timestamp resolution is defined in Interface description header check example)
+   */
+  static class PcapIsbEndtime extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.TIMESTAMP);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      IOptionsStatisticsHeader statisticsHeader = ((IStatisticsBlock) block).getOptions();
+      writer.setTimestamp(Instant.ofEpochMilli(statisticsHeader.getCaptureEndTime() / 1000));
+    }
+  }
+
+  /**
+   * isb_ifrecv: packet received count
+   */
+  static class PcapIsbIfrecv extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      writer.setLong(((IStatisticsBlock) block).getOptions().getPacketReceivedCount());
+    }
+  }
+
+  /**
+   * isb_ifdrop: packet drop count
+   */
+  static class PcapIsbIfdrop extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      writer.setLong(((IStatisticsBlock) block).getOptions().getPacketDropCount());
+    }
+  }
+
+  /**
+   * isb_filteraccept: packet accepted by filter count
+   */
+  static class PcapIsbFilterAccept extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      writer.setLong(((IStatisticsBlock) block).getOptions().getPacketAcceptedByFilterCount());
+    }
+  }
+
+  /**
+   * isb_osdrop: packet dropped by Operating system count
+   */
+  static class PcapIsbOSdrop extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      writer.setLong(((IStatisticsBlock) block).getOptions().getPacketDroppedByOS());
+    }
+  }
+
+  /**
+   * isb_usrdeliv: packet deliver to use count
+   */
+  static class PcapIsbUsrdeliv extends PcapColumn {
+
+    @Override
+    MajorType getType() {
+      return Types.optional(MinorType.BIGINT);
+    }
+
+    @Override
+    void process(IPcapngType block, ScalarWriter writer) {
+      if (!(block instanceof IStatisticsBlock)) {
+        return;
+      }
+      writer.setLong(((IStatisticsBlock) block).getOptions().getPacketDeliveredToUser());
+    }
+  }
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngBatchReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngBatchReader.java
new file mode 100644
index 0000000..eeabebf
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngBatchReader.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.pcapng;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.common.exceptions.CustomErrorContext;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
+import org.apache.drill.exec.physical.resultSet.ResultSetLoader;
+import org.apache.drill.exec.physical.resultSet.RowSetLoader;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.dfs.DrillFileSystem;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
+import org.apache.drill.exec.util.Utilities;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import fr.bmartel.pcapdecoder.PcapDecoder;
+import fr.bmartel.pcapdecoder.structure.types.IPcapngType;
+import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
+
+public class PcapngBatchReader implements ManagedReader<FileSchemaNegotiator> {
+
+  private static final Logger logger = LoggerFactory.getLogger(PcapngBatchReader.class);
+
+  private final PcapngFormatConfig config;
+  private final EasySubScan scan;
+  private final int maxRecords;
+  private CustomErrorContext errorContext;
+  private List<SchemaPath> columns;
+  private List<ColumnDefn> projectedColumns;
+  private Iterator<IPcapngType> pcapIterator;
+  private IPcapngType block;
+  private RowSetLoader loader;
+  private InputStream in;
+  private Path path;
+
+  public PcapngBatchReader(final PcapngFormatConfig config, final EasySubScan scan) {
+    this.config = config;
+    this.scan = scan;
+    this.maxRecords = scan.getMaxRecords();
+    this.columns = scan.getColumns();
+  }
+
+  @Override
+  public boolean open(FileSchemaNegotiator negotiator) {
+    try {
+      // init InputStream for pcap file
+      errorContext = negotiator.parentErrorContext();
+      DrillFileSystem dfs = negotiator.fileSystem();
+      path = dfs.makeQualified(negotiator.split().getPath());
+      in = dfs.openPossiblyCompressedStream(path);
+      // decode the pcap file
+      PcapDecoder decoder = new PcapDecoder(IOUtils.toByteArray(in));
+      decoder.decode();
+      pcapIterator = decoder.getSectionList().iterator();
+      logger.debug("The config is {}, root is {}, columns has {}", config, scan.getSelectionRoot(), columns);
+    } catch (IOException e) {
+      throw UserException
+             .dataReadError(e)
+             .message("Failure in initial pcapng inputstream. " + e.getMessage())
+             .addContext(errorContext)
+             .build(logger);
+    } catch (Exception e) {
+      throw UserException
+             .dataReadError(e)
+             .message("Failed to decode the pcapng file. " + e.getMessage())
+             .addContext(errorContext)
+             .build(logger);
+    }
+    // define the schema
+    negotiator.tableSchema(defineMetadata(), true);
+    ResultSetLoader resultSetLoader = negotiator.build();
+    loader = resultSetLoader.writer();
+    // bind the writer for columns
+    bindColumns(loader);
+    return true;
+  }
+
+  /**
+   * The default of the `stat` parameter is false,
+   * which means that the packet data is parsed and returned,
+   * but if true, will return the statistics data about the each pcapng file only
+   * (consist of the information about collect devices and the summary of the packet data above).
+   *
+   * In addition, a pcapng file contains a single Section Header Block (SHB),
+   * a single Interface Description Block (IDB) and a few Enhanced Packet Blocks (EPB).
+   * <pre>
+   * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   * | SHB | IDB | EPB | EPB |    ...    | EPB |
+   * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   * </pre>
+   * https://pcapng.github.io/pcapng/draft-tuexen-opsawg-pcapng.html#name-physical-file-layout
+   */
+  @Override
+  public boolean next() {
+    while (!loader.isFull()) {
+      if (!pcapIterator.hasNext()) {
+        return false;
+      } else if (config.getStat() && isIEnhancedPacketBlock()) {
+        continue;
+      } else if (!config.getStat() && !isIEnhancedPacketBlock()) {
+        continue;
+      }
+      processBlock();
+      if (loader.limitReached(maxRecords)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  @Override
+  public void close() {
+    AutoCloseables.closeSilently(in);
+  }
+
+  private boolean isIEnhancedPacketBlock() {
+    block = pcapIterator.next();
+    return block instanceof IEnhancedPacketBLock;
+  }
+
+  private void processBlock() {
+    loader.start();
+    for (ColumnDefn columnDefn : projectedColumns) {
+      // pcapng file name
+      if (columnDefn.getName().equals(PcapColumn.PATH_NAME)) {
+        columnDefn.load(path.getName());
+      } else {
+        // pcapng block data
+        columnDefn.load(block);
+      }
+    }
+    loader.save();
+  }
+
+  private boolean isSkipQuery() {
+    return columns.isEmpty();
+  }
+
+  private boolean isStarQuery() {
+    return Utilities.isStarQuery(columns);
+  }
+
+  private TupleMetadata defineMetadata() {
+    SchemaBuilder builder = new SchemaBuilder();
+    processProjected(columns);
+    for (ColumnDefn columnDefn : projectedColumns) {
+      columnDefn.define(builder);
+    }
+    return builder.buildSchema();
+  }
+
+  /**
+   * <b> Define the schema based on projected </b><br/>
+   * 1. SkipQuery: no field specified, such as count(*) <br/>
+   * 2. StarQuery: select * <br/>
+   * 3. ProjectPushdownQuery: select a,b,c <br/>
+   */
+  private void processProjected(List<SchemaPath> columns) {
+    projectedColumns = new ArrayList<ColumnDefn>();
+    if (isSkipQuery()) {
+      projectedColumns.add(new ColumnDefn(PcapColumn.DUMMY_NAME, new PcapColumn.PcapDummy()));
+    } else if (isStarQuery()) {
+      Set<Map.Entry<String, PcapColumn>> pcapColumns;
+      if (config.getStat()) {
+        pcapColumns = PcapColumn.getSummaryColumns().entrySet();
+      } else {
+        pcapColumns = PcapColumn.getColumns().entrySet();
+      }
+      for (Map.Entry<String, PcapColumn> pcapColumn : pcapColumns) {
+        makePcapColumns(projectedColumns, pcapColumn.getKey(), pcapColumn.getValue());
+      }
+    } else {
+      for (SchemaPath schemaPath : columns) {
+        // Support Case-Insensitive
+        String projectedName = schemaPath.rootName().toLowerCase();
+        PcapColumn pcapColumn;
+        if (config.getStat()) {
+          pcapColumn = PcapColumn.getSummaryColumns().get(projectedName);
+        } else {
+          pcapColumn = PcapColumn.getColumns().get(projectedName);
+        }
+        if (pcapColumn != null) {
+          makePcapColumns(projectedColumns, projectedName, pcapColumn);
+        } else {
+          makePcapColumns(projectedColumns, projectedName, new PcapColumn.PcapDummy());
+          logger.debug("{} missing the PcapColumn implement class.", projectedName);
+        }
+      }
+    }
+    Collections.unmodifiableList(projectedColumns);
+  }
+
+  private void makePcapColumns(List<ColumnDefn> projectedColumns, String name, PcapColumn column) {
+    projectedColumns.add(new ColumnDefn(name, column));
+  }
+
+  private void bindColumns(RowSetLoader loader) {
+    for (ColumnDefn columnDefn : projectedColumns) {
+      columnDefn.bind(loader);
+    }
+  }
+
+  private static class ColumnDefn {
+
+    private final String name;
+    private PcapColumn processor;
+    private ScalarWriter writer;
+
+    public ColumnDefn(String name, PcapColumn column) {
+      this.name = name;
+      this.processor = column;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public PcapColumn getProcessor() {
+      return processor;
+    }
+
+    public void bind(RowSetLoader loader) {
+      writer = loader.scalar(getName());
+    }
+
+    public void define(SchemaBuilder builder) {
+      if (getProcessor().getType().getMode() == DataMode.REQUIRED) {
+        builder.add(getName(), getProcessor().getType().getMinorType());
+      } else {
+        builder.addNullable(getName(), getProcessor().getType().getMinorType());
+      }
+    }
+
+    public void load(IPcapngType block) {
+      getProcessor().process(block, writer);
+    }
+
+    public void load(String value) {
+      writer.setString(value);
+    }
+  }
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatConfig.java
index 8ded7ad..7210f93 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatConfig.java
@@ -17,24 +17,42 @@
  */
 package org.apache.drill.exec.store.pcapng;
 
-import com.fasterxml.jackson.annotation.JsonTypeName;
-
-import org.apache.drill.common.PlanStringBuilder;
-import org.apache.drill.common.logical.FormatPluginConfig;
-
-import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
 
-@JsonTypeName("pcapng")
+import org.apache.drill.common.PlanStringBuilder;
+import org.apache.drill.common.logical.FormatPluginConfig;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+
+@JsonTypeName(PcapngFormatConfig.NAME)
+@JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class PcapngFormatConfig implements FormatPluginConfig {
 
-  public List<String> extensions = Collections.singletonList("pcapng");
+  public static final String NAME = "pcapng";
+  private final List<String> extensions;
+  private final boolean stat;
 
+  @JsonCreator
+  public PcapngFormatConfig(@JsonProperty("extensions") List<String> extensions, @JsonProperty("stat") boolean stat) {
+    this.extensions = extensions == null ? ImmutableList.of(PcapngFormatConfig.NAME) : ImmutableList.copyOf(extensions);
+    this.stat = stat;
+  }
+
+  @JsonProperty("extensions")
   public List<String> getExtensions() {
     return extensions;
   }
 
+  @JsonProperty("stat")
+  public boolean getStat() {
+    return this.stat;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) {
@@ -44,18 +62,16 @@
       return false;
     }
     PcapngFormatConfig that = (PcapngFormatConfig) o;
-    return Objects.equals(extensions, that.extensions);
+    return Objects.equals(extensions, that.extensions) && Objects.equals(stat, that.getStat());
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(extensions);
+    return Objects.hash(extensions, stat);
   }
 
   @Override
   public String toString() {
-    return new PlanStringBuilder(this)
-        .field("extensions", extensions)
-        .toString();
+    return new PlanStringBuilder(this).field("extensions", extensions).field("stat", stat).toString();
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatPlugin.java
index 41be760..0cccd6b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngFormatPlugin.java
@@ -17,79 +17,74 @@
  */
 package org.apache.drill.exec.store.pcapng;
 
-import java.io.IOException;
-import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.planner.common.DrillStatsTable;
-import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileReaderFactory;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileScanBuilder;
+import org.apache.drill.exec.physical.impl.scan.file.FileScanFramework.FileSchemaNegotiator;
+import org.apache.drill.exec.physical.impl.scan.framework.ManagedReader;
 import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.RecordWriter;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
+import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.dfs.easy.EasyFormatPlugin;
-import org.apache.drill.exec.store.dfs.easy.EasyWriter;
-import org.apache.drill.exec.store.dfs.easy.FileWork;
+import org.apache.drill.exec.store.dfs.easy.EasySubScan;
 import org.apache.hadoop.conf.Configuration;
 
-import java.util.List;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
 public class PcapngFormatPlugin extends EasyFormatPlugin<PcapngFormatConfig> {
 
-  public static final String DEFAULT_NAME = "pcapng";
-
-  public PcapngFormatPlugin(String name, DrillbitContext context, Configuration fsConf,
-                            StoragePluginConfig storagePluginConfig) {
-    this(name, context, fsConf, storagePluginConfig, new PcapngFormatConfig());
+  public PcapngFormatPlugin(String name,
+                            DrillbitContext context,
+                            Configuration fsConf,
+                            StoragePluginConfig storageConfig,
+                            PcapngFormatConfig formatConfig) {
+    super(name, easyConfig(fsConf, formatConfig), context, storageConfig, formatConfig);
   }
 
-  public PcapngFormatPlugin(String name, DrillbitContext context, Configuration fsConf, StoragePluginConfig config, PcapngFormatConfig formatPluginConfig) {
-    super(name, context, fsConf, config, formatPluginConfig, true,
-        false, false, true,
-        formatPluginConfig.getExtensions(), DEFAULT_NAME);
+  private static EasyFormatConfig easyConfig(Configuration fsConf, PcapngFormatConfig pluginConfig) {
+    return EasyFormatConfig.builder()
+        .readable(true)
+        .writable(false)
+        .blockSplittable(false)
+        .compressible(true)
+        .extensions(pluginConfig.getExtensions())
+        .fsConf(fsConf)
+        .useEnhancedScan(true)
+        .supportsLimitPushdown(true)
+        .supportsProjectPushdown(true)
+        .defaultName(PcapngFormatConfig.NAME)
+        .build();
+  }
+
+  private static class PcapngReaderFactory extends FileReaderFactory {
+
+    private final PcapngFormatConfig config;
+    private final EasySubScan scan;
+
+    public PcapngReaderFactory(PcapngFormatConfig config, EasySubScan scan) {
+      this.config = config;
+      this.scan = scan;
+    }
+
+    @Override
+    public ManagedReader<? extends FileSchemaNegotiator> newReader() {
+      return new PcapngBatchReader(config, scan);
+    }
   }
 
   @Override
-  public boolean supportsPushDown() {
-    return true;
+  public ManagedReader<? extends FileSchemaNegotiator> newBatchReader(EasySubScan scan, OptionManager options)
+      throws ExecutionSetupException {
+    return new PcapngBatchReader(formatConfig, scan);
   }
 
   @Override
-  public RecordReader getRecordReader(FragmentContext context, DrillFileSystem dfs,
-                                      FileWork fileWork, List<SchemaPath> columns,
-                                      String userName) {
-    return new PcapngRecordReader(fileWork.getPath(), dfs, columns);
-  }
+  protected FileScanBuilder frameworkBuilder(OptionManager options, EasySubScan scan) throws ExecutionSetupException {
+    FileScanBuilder builder = new FileScanBuilder();
+    builder.setReaderFactory(new PcapngReaderFactory(formatConfig, scan));
 
-  @Override
-  public RecordWriter getRecordWriter(FragmentContext context, EasyWriter writer) {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public int getReaderOperatorType() {
-    return UserBitShared.CoreOperatorType.PCAPNG_SUB_SCAN_VALUE;
-  }
-
-  @Override
-  public int getWriterOperatorType() {
-    throw new UnsupportedOperationException("unimplemented");
-  }
-
-  @Override
-  public boolean supportsStatistics() {
-    return false;
-  }
-
-  @Override
-  public DrillStatsTable.TableStatistics readStatistics(FileSystem fs, Path statsTablePath) throws IOException {
-    return null;
-  }
-
-  @Override
-  public void writeStatistics(DrillStatsTable.TableStatistics statistics, FileSystem fs, Path statsTablePath) throws IOException {
-
+    initScanBuilder(builder, scan);
+    builder.nullType(Types.optional(MinorType.VARCHAR));
+    return builder;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngRecordReader.java
deleted file mode 100644
index 152e2e6..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/PcapngRecordReader.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.pcapng;
-
-import fr.bmartel.pcapdecoder.PcapDecoder;
-import fr.bmartel.pcapdecoder.structure.types.IPcapngType;
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.commons.io.IOUtils;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.ops.OperatorContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.store.AbstractRecordReader;
-import org.apache.drill.exec.store.dfs.DrillFileSystem;
-import org.apache.drill.exec.store.pcapng.schema.Column;
-import org.apache.drill.exec.store.pcapng.schema.DummyArrayImpl;
-import org.apache.drill.exec.store.pcapng.schema.DummyImpl;
-import org.apache.drill.exec.store.pcapng.schema.Schema;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-import java.util.function.BiConsumer;
-
-public class PcapngRecordReader extends AbstractRecordReader {
-  private static final Logger logger = LoggerFactory.getLogger(PcapngRecordReader.class);
-
-  // batch size should not exceed max allowed record count
-  private static final int BATCH_SIZE = 40_000;
-
-  private final Path pathToFile;
-  private OutputMutator output;
-  private List<ProjectedColumnInfo> projectedCols;
-  private DrillFileSystem fs;
-  private InputStream in;
-  private List<SchemaPath> columns;
-
-  private Iterator<IPcapngType> it;
-
-  public PcapngRecordReader(final Path pathToFile,
-                            final DrillFileSystem fileSystem,
-                            final List<SchemaPath> columns) {
-    this.fs = fileSystem;
-    this.pathToFile = fs.makeQualified(pathToFile);
-    this.columns = columns;
-    setColumns(columns);
-  }
-
-  @Override
-  public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
-    try {
-
-      this.output = output;
-      this.in = fs.openPossiblyCompressedStream(pathToFile);
-      PcapDecoder decoder = new PcapDecoder(IOUtils.toByteArray(in));
-      decoder.decode();
-      this.it = decoder.getSectionList().iterator();
-      setupProjection();
-    } catch (IOException io) {
-      throw UserException.dataReadError(io)
-          .addContext("File name:", pathToFile.toUri().getPath())
-          .build(logger);
-    }
-  }
-
-  @Override
-  public int next() {
-    if (isSkipQuery()) {
-      return iterateOverBlocks((block, counter) -> {
-      });
-    } else {
-      return iterateOverBlocks((block, counter) -> putToTable((IEnhancedPacketBLock) block, counter));
-    }
-  }
-
-  private void putToTable(IEnhancedPacketBLock bLock, Integer counter) {
-    for (ProjectedColumnInfo pci : projectedCols) {
-      pci.getColumn().process(bLock, pci.getVv(), counter);
-    }
-  }
-
-  @Override
-  public void close() throws Exception {
-    if (in != null) {
-      in.close();
-      in = null;
-    }
-  }
-
-  private void setupProjection() {
-    if (isSkipQuery()) {
-      projectedCols = projectNone();
-    } else if (isStarQuery()) {
-      projectedCols = projectAllCols(Schema.getColumnsNames());
-    } else {
-      projectedCols = projectCols(columns);
-    }
-  }
-
-  private List<ProjectedColumnInfo> projectNone() {
-    List<ProjectedColumnInfo> pciBuilder = new ArrayList<>();
-    pciBuilder.add(makeColumn("dummy", new DummyImpl()));
-    return Collections.unmodifiableList(pciBuilder);
-  }
-
-  private List<ProjectedColumnInfo> projectAllCols(final Set<String> columns) {
-    List<ProjectedColumnInfo> pciBuilder = new ArrayList<>();
-    for (String colName : columns) {
-      pciBuilder.add(makeColumn(colName, Schema.getColumns().get(colName)));
-    }
-    return Collections.unmodifiableList(pciBuilder);
-  }
-
-  private List<ProjectedColumnInfo> projectCols(final List<SchemaPath> columns) {
-    List<ProjectedColumnInfo> pciBuilder = new ArrayList<>();
-    for (SchemaPath schemaPath : columns) {
-      String projectedName = schemaPath.rootName();
-      if (schemaPath.isArray()) {
-        pciBuilder.add(makeColumn(projectedName, new DummyArrayImpl()));
-      } else if (Schema.getColumns().containsKey(projectedName.toLowerCase())) {
-        pciBuilder.add(makeColumn(projectedName,
-            Schema.getColumns().get(projectedName.toLowerCase())));
-      } else {
-        pciBuilder.add(makeColumn(projectedName, new DummyImpl()));
-      }
-    }
-    return Collections.unmodifiableList(pciBuilder);
-  }
-
-  private ProjectedColumnInfo makeColumn(final String colName, final Column column) {
-    MaterializedField field = MaterializedField.create(colName, column.getMinorType());
-    ValueVector vector = getValueVector(field, output);
-    return new ProjectedColumnInfo(vector, column, colName);
-  }
-
-  private ValueVector getValueVector(final MaterializedField field, final OutputMutator output) {
-    try {
-      TypeProtos.MajorType majorType = field.getType();
-      final Class<? extends ValueVector> clazz = TypeHelper.getValueVectorClass(
-          majorType.getMinorType(), majorType.getMode());
-
-      return output.addField(field, clazz);
-    } catch (SchemaChangeException sce) {
-      throw UserException.internalError(sce)
-          .addContext("The addition of this field is incompatible with this OutputMutator's capabilities")
-          .build(logger);
-    }
-  }
-
-  private Integer iterateOverBlocks(BiConsumer<IPcapngType, Integer> consumer) {
-    int counter = 0;
-    while (it.hasNext() && counter < BATCH_SIZE) {
-      IPcapngType block = it.next();
-      if (block instanceof IEnhancedPacketBLock) {
-        consumer.accept(block, counter);
-        counter++;
-      }
-    }
-    return counter;
-  }
-
-  private static class ProjectedColumnInfo {
-
-    private ValueVector vv;
-    private Column colDef;
-    private String columnName;
-
-    ProjectedColumnInfo(ValueVector vv, Column colDef, String columnName) {
-      this.vv = vv;
-      this.colDef = colDef;
-      this.columnName = columnName;
-    }
-
-    public ValueVector getVv() {
-      return vv;
-    }
-
-    Column getColumn() {
-      return colDef;
-    }
-
-    public String getColumnName() {
-      return columnName;
-    }
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyImpl.java
deleted file mode 100644
index a8c26a0..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/DummyImpl.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.pcapng.schema;
-
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.vector.ValueVector;
-
-public class DummyImpl implements Column {
-  @Override
-  public TypeProtos.MajorType getMinorType() {
-    return Types.optional(TypeProtos.MinorType.INT);
-  }
-
-  @Override
-  public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Schema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Schema.java
deleted file mode 100644
index a9738bd..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Schema.java
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.pcapng.schema;
-
-import fr.bmartel.pcapdecoder.structure.types.inter.IEnhancedPacketBLock;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.store.pcapng.decoder.PacketDecoder;
-import org.apache.drill.exec.vector.ValueVector;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.drill.exec.store.pcap.PcapFormatUtils.parseBytesToASCII;
-import static org.apache.drill.exec.store.pcapng.schema.Util.setNullableLongColumnValue;
-
-public class Schema {
-
-  private final static Map<String, Column> columns = new HashMap<>();
-
-  static {
-    columns.put("timestamp", new TimestampImpl());
-    columns.put("packet_length", new PacketLenImpl());
-    columns.put("type", new TypeImpl());
-    columns.put("src_ip", new SrcIpImpl());
-    columns.put("dst_ip", new DstIpImpl());
-    columns.put("src_port", new SrcPortImpl());
-    columns.put("dst_port", new DstPortImpl());
-    columns.put("src_mac_address", new SrcMacImpl());
-    columns.put("dst_mac_address", new DstMacImpl());
-    columns.put("tcp_session", new TcpSessionImpl());
-    columns.put("tcp_ack", new TcpAckImpl());
-    columns.put("tcp_flags", new TcpFlags());
-    columns.put("tcp_flags_ns", new TcpFlagsNsImpl());
-    columns.put("tcp_flags_cwr", new TcpFlagsCwrImpl());
-    columns.put("tcp_flags_ece", new TcpFlagsEceImpl());
-    columns.put("tcp_flags_ece_ecn_capable", new TcpFlagsEceEcnCapableImpl());
-    columns.put("tcp_flags_ece_congestion_experienced", new TcpFlagsEceCongestionExperiencedImpl());
-    columns.put("tcp_flags_urg", new TcpFlagsUrgIml());
-    columns.put("tcp_flags_ack", new TcpFlagsAckImpl());
-    columns.put("tcp_flags_psh", new TcpFlagsPshImpl());
-    columns.put("tcp_flags_rst", new TcpFlagsRstImpl());
-    columns.put("tcp_flags_syn", new TcpFlagsSynImpl());
-    columns.put("tcp_flags_fin", new TcpFlagsFinImpl());
-    columns.put("tcp_parsed_flags", new TcpParsedFlags());
-    columns.put("packet_data", new PacketDataImpl());
-  }
-
-  public static Map<String, Column> getColumns() {
-    return columns;
-  }
-
-  public static Set<String> getColumnsNames() {
-    return columns.keySet();
-  }
-
-  static class TimestampImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.required(TypeProtos.MinorType.TIMESTAMP);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      Util.setTimestampColumnValue(block.getTimeStamp(), vv, count);
-    }
-  }
-
-  static class PacketLenImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.required(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      Util.setIntegerColumnValue(block.getPacketLength(), vv, count);
-    }
-  }
-
-  static class TypeImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getPacketType(), vv, count);
-      }
-    }
-  }
-
-  static class SrcIpImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getSrc_ip().getHostAddress(), vv, count);
-      }
-    }
-  }
-
-  static class DstIpImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getDst_ip().getHostAddress(), vv, count);
-      }
-    }
-  }
-
-  static class SrcPortImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableIntegerColumnValue(packet.getSrc_port(), vv, count);
-      }
-    }
-  }
-
-  static class DstPortImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableIntegerColumnValue(packet.getDst_port(), vv, count);
-      }
-    }
-  }
-
-  static class SrcMacImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getEthernetSource(), vv, count);
-      }
-    }
-  }
-
-  static class DstMacImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getEthernetDestination(), vv, count);
-      }
-    }
-  }
-
-  static class TcpSessionImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.BIGINT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        setNullableLongColumnValue(packet.getSessionHash(), vv, count);
-      }
-    }
-  }
-
-  static class TcpAckImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableIntegerColumnValue(packet.getAckNumber(), vv, count);
-      }
-    }
-  }
-
-  static class TcpFlags implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableIntegerColumnValue(packet.getFlags(), vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsNsImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x100) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsCwrImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x80) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsEceImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x40) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsEceEcnCapableImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x42) == 0x42, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsEceCongestionExperiencedImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x42) == 0x40, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsUrgIml implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x20) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsAckImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x10) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsPshImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x8) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsRstImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x4) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsSynImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x2) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpFlagsFinImpl implements Column {
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.INT);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableBooleanColumnValue((packet.getFlags() & 0x1) != 0, vv, count);
-      }
-    }
-  }
-
-  static class TcpParsedFlags implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(packet.getParsedFlags(), vv, count);
-      }
-    }
-  }
-
-  static class PacketDataImpl implements Column {
-    @Override
-    public TypeProtos.MajorType getMinorType() {
-      return Types.optional(TypeProtos.MinorType.VARCHAR);
-    }
-
-    @Override
-    public void process(IEnhancedPacketBLock block, ValueVector vv, int count) {
-      PacketDecoder packet = new PacketDecoder();
-      if (packet.readPcapng(block.getPacketData())) {
-        Util.setNullableStringColumnValue(parseBytesToASCII(block.getPacketData()), vv, count);
-      }
-    }
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Util.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Util.java
deleted file mode 100644
index 06e8e6a..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/pcapng/schema/Util.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.pcapng.schema;
-
-import org.apache.drill.exec.vector.IntVector;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.TimeStampVector;
-import org.apache.drill.exec.vector.ValueVector;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-public class Util {
-  static void setNullableIntegerColumnValue(final int data, final ValueVector vv, final int count) {
-    ((NullableIntVector.Mutator) vv.getMutator())
-        .setSafe(count, data);
-  }
-
-  static void setIntegerColumnValue(final int data, final ValueVector vv, final int count) {
-    ((IntVector.Mutator) vv.getMutator())
-        .setSafe(count, data);
-  }
-
-  static void setTimestampColumnValue(final long data, final ValueVector vv, final int count) {
-    ((TimeStampVector.Mutator) vv.getMutator())
-        .setSafe(count, data / 1000);
-  }
-
-  static void setNullableLongColumnValue(final long data, final ValueVector vv, final int count) {
-    ((NullableBigIntVector.Mutator) vv.getMutator())
-        .setSafe(count, data);
-  }
-
-  static void setNullableStringColumnValue(final String data, final ValueVector vv, final int count) {
-    ((NullableVarCharVector.Mutator) vv.getMutator())
-        .setSafe(count, data.getBytes(UTF_8), 0, data.length());
-  }
-
-  static void setNullableBooleanColumnValue(final boolean data, final ValueVector vv, final int count) {
-    ((NullableIntVector.Mutator) vv.getMutator())
-        .setSafe(count, data ? 1 : 0);
-  }
-}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTableScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTableScan.java
index b2e37fe..150ed1a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTableScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/SystemTableScan.java
@@ -37,12 +37,13 @@
 import org.apache.drill.exec.physical.base.SubScan;
 import org.apache.drill.exec.planner.fragment.DistributionAffinity;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 
 @JsonTypeName("sys")
 public class SystemTableScan extends AbstractGroupScan implements SubScan {
 
+  public static final String OPERATOR_TYPE = "SYSTEM_TABLE_SCAN";
+
   private final SystemTable table;
   private final SystemTablePlugin plugin;
   private final int maxRecordsToRead;
@@ -137,8 +138,8 @@
   }
 
   @Override
-  public int getOperatorType() {
-    return CoreOperatorType.SYSTEM_TABLE_SCAN_VALUE;
+  public String getOperatorType() {
+    return OPERATOR_TYPE;
   }
 
   /**
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
index 620dad5..2a6bebc 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/table/function/WithOptionsTableMacro.java
@@ -54,7 +54,9 @@
         .message("Unable to find table [%s]", sig.getName())
         .build(logger);
     }
-    return new DrillTranslatableTable(drillTable);
+    return drillTable instanceof TranslatableTable
+        ? (TranslatableTable) drillTable :
+        new DrillTranslatableTable(drillTable);
   }
 
   @Override
diff --git a/exec/java-exec/src/main/resources/bootstrap-storage-plugins.json b/exec/java-exec/src/main/resources/bootstrap-storage-plugins.json
index 4aa1754..a88deb2 100644
--- a/exec/java-exec/src/main/resources/bootstrap-storage-plugins.json
+++ b/exec/java-exec/src/main/resources/bootstrap-storage-plugins.json
@@ -31,11 +31,6 @@
           "extensions" : [ "tsv" ],
           "fieldDelimiter" : "\t"
         },
-        "httpd" : {
-          "type" : "httpd",
-          "logFormat" : "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
-          "timestampFormat" : "dd/MMM/yyyy:HH:mm:ss ZZ"
-        },
         "parquet" : {
           "type" : "parquet"
         },
@@ -62,10 +57,6 @@
           "extensions" : [ "csvh" ],
           "delimiter" : ",",
           "extractHeader" : true
-        },
-        "image" : {
-          "type" : "image",
-          "extensions" : [ "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f" ]
         }
       },
       "enabled" : true
@@ -167,10 +158,6 @@
           "extensions" : [ "csvh" ],
           "fieldDelimiter" : ",",
           "extractHeader" : true
-        },
-        "image" : {
-          "type" : "image",
-          "extensions" : [ "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f" ]
         }
       },
       "enabled" : true
diff --git a/exec/java-exec/src/main/resources/rest/profile/profile.ftl b/exec/java-exec/src/main/resources/rest/profile/profile.ftl
index 34eed51..e9a8a5b 100644
--- a/exec/java-exec/src/main/resources/rest/profile/profile.ftl
+++ b/exec/java-exec/src/main/resources/rest/profile/profile.ftl
@@ -37,8 +37,7 @@
 
 <script>
     var globalconfig = {
-        "queryid" : "${model.getQueryId()}",
-        "operators" : ${model.getOperatorsJSON()?no_esc}
+        "queryid" : "${model.getQueryId()}"
     };
 
     $(document).ready(function() {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestOperatorMetrics.java b/exec/java-exec/src/test/java/org/apache/drill/TestOperatorMetrics.java
index ed21275..e411271 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestOperatorMetrics.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestOperatorMetrics.java
@@ -19,13 +19,15 @@
 
 import org.apache.drill.categories.OperatorTest;
 import org.apache.drill.exec.ops.OperatorMetricRegistry;
-import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.physical.config.ExternalSort;
+import org.apache.drill.exec.physical.config.NestedLoopJoinPOP;
+import org.apache.drill.exec.physical.config.Screen;
 import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertNull;
 
 @Category(OperatorTest.class)
@@ -33,18 +35,18 @@
 
   @Test
   public void testMetricNames() {
-    assertEquals(new String[]{"BYTES_SENT"},
-              OperatorMetricRegistry.getMetricNames(UserBitShared.CoreOperatorType.SCREEN_VALUE));
+    assertArrayEquals(new String[]{"BYTES_SENT"},
+              OperatorMetricRegistry.getMetricNames(Screen.OPERATOR_TYPE));
 
-    assertEquals(new String[]{"SPILL_COUNT", "NOT_USED", "PEAK_BATCHES_IN_MEMORY", "MERGE_COUNT", "MIN_BUFFER",
+    assertArrayEquals(new String[]{"SPILL_COUNT", "NOT_USED", "PEAK_BATCHES_IN_MEMORY", "MERGE_COUNT", "MIN_BUFFER",
                       "SPILL_MB"},
-              OperatorMetricRegistry.getMetricNames(UserBitShared.CoreOperatorType.EXTERNAL_SORT_VALUE));
+              OperatorMetricRegistry.getMetricNames(ExternalSort.OPERATOR_TYPE));
   }
 
   @Test
   public void testNonExistentMetricNames() {
-    assertNull(OperatorMetricRegistry.getMetricNames(UserBitShared.CoreOperatorType.NESTED_LOOP_JOIN_VALUE));
+    assertNull(OperatorMetricRegistry.getMetricNames(NestedLoopJoinPOP.OPERATOR_TYPE));
 
-    assertNull(OperatorMetricRegistry.getMetricNames(202));
+    assertNull(OperatorMetricRegistry.getMetricNames("FOO_BAR"));
   }
 }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/BaseTestInheritance.java b/exec/java-exec/src/test/java/org/apache/drill/TestforBaseTestInheritance.java
similarity index 97%
rename from exec/java-exec/src/test/java/org/apache/drill/BaseTestInheritance.java
rename to exec/java-exec/src/test/java/org/apache/drill/TestforBaseTestInheritance.java
index 4ae7e88..d806451 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/BaseTestInheritance.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestforBaseTestInheritance.java
@@ -30,7 +30,7 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
-public class BaseTestInheritance extends BaseTest {
+public class TestforBaseTestInheritance extends BaseTest {
 
   @Test
   @Category(UnlikelyTest.class)
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/impl/TestStringFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/impl/TestStringFunctions.java
index 0f79daa..8965edf 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/impl/TestStringFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/impl/TestStringFunctions.java
@@ -55,17 +55,19 @@
   @Test
   public void testSplitPart() throws Exception {
     testBuilder()
-        .sqlQuery("select split_part('abc~@~def~@~ghi', '~@~', 1) res1 from (values(1))")
+        .sqlQuery("select split_part(a, '~@~', 1) res1 from (values('abc~@~def~@~ghi'), ('qwe~@~rty~@~uio')) as t(a)")
         .ordered()
         .baselineColumns("res1")
         .baselineValues("abc")
+        .baselineValues("qwe")
         .go();
 
     testBuilder()
-        .sqlQuery("select split_part('abc~@~def~@~ghi', '~@~', 2) res1 from (values(1))")
+        .sqlQuery("select split_part(a, '~@~', 2) res1 from (values('abc~@~def~@~ghi'), ('qwe~@~rty~@~uio')) as t(a)")
         .ordered()
         .baselineColumns("res1")
         .baselineValues("def")
+        .baselineValues("rty")
         .go();
 
     // invalid index
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/memory/TestAllocators.java b/exec/java-exec/src/test/java/org/apache/drill/exec/memory/TestAllocators.java
index 4fb5a8f..061a86c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/memory/TestAllocators.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/memory/TestAllocators.java
@@ -39,16 +39,18 @@
 import org.apache.drill.exec.ops.OperatorUtilities;
 import org.apache.drill.exec.physical.PhysicalPlan;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.config.UnionAll;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.StoragePluginRegistryImpl;
+import org.apache.drill.exec.store.easy.text.TextFormatPlugin;
+import org.apache.drill.exec.store.mock.MockSubScanPOP;
 import org.apache.drill.exec.vector.BitVector;
 import org.apache.drill.exec.vector.IntVector;
 import org.apache.drill.test.DrillTest;
@@ -219,7 +221,7 @@
       OperatorStats stats;
 
       // Use some bogus operator type to create a new operator context.
-      def = new OpProfileDef(physicalOperator1.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE,
+      def = new OpProfileDef(physicalOperator1.getOperatorId(), MockSubScanPOP.OPERATOR_TYPE,
           OperatorUtilities.getChildCount(physicalOperator1));
       stats = fragmentContext1.getStats().newOperatorStats(def, fragmentContext1.getAllocator());
 
@@ -233,7 +235,7 @@
 
       OperatorContext oContext21 = fragmentContext1.newOperatorContext(physicalOperator3);
 
-      def = new OpProfileDef(physicalOperator4.getOperatorId(), UserBitShared.CoreOperatorType.TEXT_WRITER_VALUE,
+      def = new OpProfileDef(physicalOperator4.getOperatorId(), TextFormatPlugin.WRITER_OPERATOR_TYPE,
           OperatorUtilities.getChildCount(physicalOperator4));
       stats = fragmentContext2.getStats().newOperatorStats(def, fragmentContext2.getAllocator());
       OperatorContext oContext22 = fragmentContext2.newOperatorContext(physicalOperator4, stats);
@@ -247,7 +249,7 @@
       FragmentContextImpl fragmentContext3 = new FragmentContextImpl(bitContext, pf3, null, functionRegistry);
 
       // New fragment starts an operator that allocates an amount within the limit
-      def = new OpProfileDef(physicalOperator5.getOperatorId(), UserBitShared.CoreOperatorType.UNION_VALUE,
+      def = new OpProfileDef(physicalOperator5.getOperatorId(), UnionAll.OPERATOR_TYPE,
           OperatorUtilities.getChildCount(physicalOperator5));
       stats = fragmentContext3.getStats().newOperatorStats(def, fragmentContext3.getAllocator());
       OperatorContext oContext31 = fragmentContext3.newOperatorContext(physicalOperator5, stats);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStackAnalyzer.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStackAnalyzer.java
index fd8f2ac..99e1b8f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStackAnalyzer.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStackAnalyzer.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
 
+import org.apache.drill.test.BaseTest;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -31,7 +32,7 @@
  * an exception call stack. Does the tests using dummy classes
  * (which is why the stack analyzer function is parameterized.)
  */
-public class TestStackAnalyzer {
+public class TestStackAnalyzer extends BaseTest{
 
   private static class OperA {
     public void throwNow() {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestHashAggrSpill.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestHashAggrSpill.java
index 87a324f..cae84b6 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestHashAggrSpill.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestHashAggrSpill.java
@@ -28,6 +28,7 @@
 import org.apache.drill.categories.SlowTest;
 import org.apache.drill.common.exceptions.UserRemoteException;
 import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.physical.config.HashAggregate;
 import org.apache.drill.exec.physical.impl.aggregate.HashAggTemplate;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.proto.UserBitShared;
@@ -107,7 +108,7 @@
     }
 
     ProfileParser profile = client.parseProfile(summary.queryIdString());
-    List<ProfileParser.OperatorProfile> ops = profile.getOpsOfType(UserBitShared.CoreOperatorType.HASH_AGGREGATE_VALUE);
+    List<ProfileParser.OperatorProfile> ops = profile.getOpsOfType(HashAggregate.OPERATOR_TYPE);
 
     assertFalse(ops.isEmpty());
     // check for the first op only
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
index e98b4c0..1675919 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
@@ -88,9 +88,9 @@
       @Override
       public CloseableRecordBatch createProbeBatch(BatchSchema schema, FragmentContext context) {
         probeRowSet = new RowSetBuilder(context.getAllocator(), schema)
-          .addRow(.5, "yellow")
-          .addRow(1.5, "blue")
-          .addRow(2.5, "black")
+          .addRow(.5f, "yellow")
+          .addRow(1.5f, "blue")
+          .addRow(2.5f, "black")
           .build();
         return new MockRecordBatch.Builder().
           sendData(probeRowSet).
@@ -187,9 +187,9 @@
       @Override
       public CloseableRecordBatch createProbeBatch(BatchSchema schema, FragmentContext context) {
         probeRowSet = new RowSetBuilder(context.getAllocator(), schema)
-          .addRow(.5, "yellow")
-          .addRow(1.5, "blue")
-          .addRow(2.5, "black")
+          .addRow(.5f, "yellow")
+          .addRow(1.5f, "blue")
+          .addRow(2.5f, "black")
           .build();
         return new MockRecordBatch.Builder().
           sendData(probeRowSet).
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/ScanTestUtils.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/ScanTestUtils.java
index bfdff94..4cfca2f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/ScanTestUtils.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/ScanTestUtils.java
@@ -102,8 +102,8 @@
       Scan scanConfig = new AbstractSubScan("bob") {
 
         @Override
-        public int getOperatorType() {
-          return 0;
+        public String getOperatorType() {
+          return "";
         }
       };
       OperatorContext opContext = opFixture.newOperatorContext(scanConfig);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/TestScanBatchWriters.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/TestScanBatchWriters.java
index a9572ce..fd53d6f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/TestScanBatchWriters.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/TestScanBatchWriters.java
@@ -52,8 +52,8 @@
     Scan scanConfig = new AbstractSubScan("bob") {
 
       @Override
-      public int getOperatorType() {
-        return 0;
+      public String getOperatorType() {
+        return "";
       }
     };
     OperatorContext opContext = fixture.newOperatorContext(scanConfig);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/convert/TestDirectConverter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/convert/TestDirectConverter.java
index aa106c2..1615827 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/convert/TestDirectConverter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/convert/TestDirectConverter.java
@@ -708,7 +708,7 @@
     expect(ConversionType.IMPLICIT, conversions.analyze(tinyIntCol, bigIntCol));
     expect(ConversionType.IMPLICIT, conversions.analyze(tinyIntCol, float4Col));
     expect(ConversionType.IMPLICIT, conversions.analyze(tinyIntCol, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(tinyIntCol, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(tinyIntCol, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(tinyIntCol, stringCol));
 
     // SmallInt --> x
@@ -718,7 +718,7 @@
     expect(ConversionType.IMPLICIT, conversions.analyze(smallIntCol, bigIntCol));
     expect(ConversionType.IMPLICIT, conversions.analyze(smallIntCol, float4Col));
     expect(ConversionType.IMPLICIT, conversions.analyze(smallIntCol, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(smallIntCol, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(smallIntCol, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(smallIntCol, stringCol));
 
     // Int --> x
@@ -728,7 +728,7 @@
     expect(ConversionType.IMPLICIT, conversions.analyze(intCol, bigIntCol));
     expect(ConversionType.IMPLICIT, conversions.analyze(intCol, float4Col));
     expect(ConversionType.IMPLICIT, conversions.analyze(intCol, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(intCol, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(intCol, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(intCol, stringCol));
 
     // BigInt --> x
@@ -738,7 +738,7 @@
     expect(ConversionType.NONE, conversions.analyze(bigIntCol, bigIntCol));
     expect(ConversionType.IMPLICIT, conversions.analyze(bigIntCol, float4Col));
     expect(ConversionType.IMPLICIT, conversions.analyze(bigIntCol, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(bigIntCol, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(bigIntCol, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(bigIntCol, stringCol));
 
     // Float4 --> x
@@ -748,7 +748,7 @@
     expect(ConversionType.IMPLICIT_UNSAFE, conversions.analyze(float4Col, bigIntCol));
     expect(ConversionType.NONE, conversions.analyze(float4Col, float4Col));
     expect(ConversionType.IMPLICIT, conversions.analyze(float4Col, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(float4Col, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(float4Col, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(float4Col, stringCol));
 
     // Float8 --> x
@@ -758,7 +758,7 @@
     expect(ConversionType.IMPLICIT_UNSAFE, conversions.analyze(float8Col, bigIntCol));
     expect(ConversionType.IMPLICIT_UNSAFE, conversions.analyze(float8Col, float4Col));
     expect(ConversionType.NONE, conversions.analyze(float8Col, float8Col));
-    expect(ConversionType.IMPLICIT, conversions.analyze(float8Col, decimalCol));
+    expect(ConversionType.EXPLICIT, conversions.analyze(float8Col, decimalCol));
     expect(ConversionType.EXPLICIT, conversions.analyze(float8Col, stringCol));
 
     // Decimal --> x
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/ScanFixture.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/ScanFixture.java
index 5b1b01d..6fe612b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/ScanFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/ScanFixture.java
@@ -81,8 +81,8 @@
       Scan scanConfig = new AbstractSubScan("bob") {
 
         @Override
-        public int getOperatorType() {
-          return 0;
+        public String getOperatorType() {
+          return "";
         }
       };
       OperatorContext opContext = opFixture.newOperatorContext(scanConfig);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/BaseTestScanLifecycle.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/BaseTestScanLifecycle.java
index 66f10fc..df1fa0c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/BaseTestScanLifecycle.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/lifecycle/BaseTestScanLifecycle.java
@@ -52,7 +52,7 @@
     }
 
     @Override
-    public int getOperatorType() { return 0; }
+    public String getOperatorType() { return "DUMMY_SUB_SCAN"; }
   }
 
   protected static abstract class SingleReaderFactory implements ReaderFactory<SchemaNegotiator> {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestDynamicSchemaFilter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestDynamicSchemaFilter.java
index b7fa328..6020456 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestDynamicSchemaFilter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestDynamicSchemaFilter.java
@@ -24,6 +24,7 @@
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.drill.test.BaseTest;
 import org.apache.drill.categories.EvfTest;
 import org.apache.drill.common.exceptions.EmptyErrorContext;
 import org.apache.drill.common.types.TypeProtos.MinorType;
@@ -40,7 +41,7 @@
 import org.junit.experimental.categories.Category;
 
 @Category(EvfTest.class)
-public class TestDynamicSchemaFilter {
+public class TestDynamicSchemaFilter extends BaseTest{
 
   private static final ColumnMetadata A_COL =
       MetadataUtils.newScalar("a", Types.required(MinorType.INT));
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestProjectedPath.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestProjectedPath.java
index 57277b5..69258b2 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestProjectedPath.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestProjectedPath.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.drill.test.BaseTest;
 import org.apache.drill.categories.EvfTest;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
@@ -38,7 +39,7 @@
  * verify the consistency checks.
  */
 @Category(EvfTest.class)
-public class TestProjectedPath {
+public class TestProjectedPath extends BaseTest{
 
   // INT is a proxy for all scalar columns.
   private static final ColumnMetadata INT_COLUMN = intSchema().metadata("a");
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestSchemaTrackerDefined.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestSchemaTrackerDefined.java
index fc343bd..ced6095 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestSchemaTrackerDefined.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/scan/v3/schema/TestSchemaTrackerDefined.java
@@ -23,6 +23,7 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import org.apache.drill.test.BaseTest;
 import org.apache.drill.categories.EvfTest;
 import org.apache.drill.common.exceptions.CustomErrorContext;
 import org.apache.drill.common.exceptions.EmptyErrorContext;
@@ -39,7 +40,7 @@
 import org.junit.experimental.categories.Category;
 
 @Category(EvfTest.class)
-public class TestSchemaTrackerDefined {
+public class TestSchemaTrackerDefined extends BaseTest{
   private static final CustomErrorContext ERROR_CONTEXT = EmptyErrorContext.INSTANCE;
 
   private boolean isProjected(ProjectionFilter filter, ColumnMetadata col) {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortExec.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortExec.java
index 32a98e0..d567fe8 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortExec.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortExec.java
@@ -32,7 +32,6 @@
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.physical.base.AbstractBase;
 import org.apache.drill.exec.physical.config.ExternalSort;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.test.DrillTest;
 import org.junit.Test;
@@ -174,7 +173,7 @@
     assertSame(ordering, popConfig.getOrderings().get(0));
     assertFalse(popConfig.getReverse());
     assertEquals(SelectionVectorMode.FOUR_BYTE, popConfig.getSVMode());
-    assertEquals(CoreOperatorType.EXTERNAL_SORT_VALUE, popConfig.getOperatorType());
+    assertEquals(ExternalSort.OPERATOR_TYPE, popConfig.getOperatorType());
     assertEquals(ExternalSort.DEFAULT_SORT_ALLOCATION, popConfig.getInitialAllocation());
     assertEquals(AbstractBase.MAX_ALLOCATION, popConfig.getMaxAllocation());
     assertTrue(popConfig.isExecutable());
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortInternals.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortInternals.java
index 856bfff..200785f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortInternals.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestExternalSortInternals.java
@@ -25,6 +25,7 @@
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.ops.OperatorStats;
+import org.apache.drill.exec.physical.config.ExternalSort;
 import org.apache.drill.exec.physical.impl.xsort.SortMemoryManager.MergeAction;
 import org.apache.drill.exec.physical.impl.xsort.SortMemoryManager.MergeTask;
 import org.apache.drill.test.BaseDirTestWatcher;
@@ -655,7 +656,7 @@
 
   @Test
   public void testMetrics() {
-    OperatorStats stats = new OperatorStats(100, 101, 0, fixture.allocator());
+    OperatorStats stats = new OperatorStats(100, ExternalSort.OPERATOR_TYPE, 0, fixture.allocator());
     SortMetrics metrics = new SortMetrics(stats);
 
     // Input stats
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestProjectionFilter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestProjectionFilter.java
index 5c72a45..24597fc 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestProjectionFilter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestProjectionFilter.java
@@ -22,6 +22,7 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import org.apache.drill.test.BaseTest;
 import org.apache.drill.common.exceptions.EmptyErrorContext;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.types.TypeProtos.MinorType;
@@ -41,7 +42,7 @@
 import org.apache.drill.exec.record.metadata.TupleSchema;
 import org.junit.Test;
 
-public class TestProjectionFilter {
+public class TestProjectionFilter extends BaseTest{
   private static final ColumnMetadata A_COL = MetadataUtils.newScalar("a", Types.required(MinorType.INT));
   private static final ColumnMetadata B_COL = MetadataUtils.newScalar("b", Types.optional(MinorType.VARCHAR));
   private static final ColumnMetadata MAP_COL = MetadataUtils.newMap("m", new TupleSchema());
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/project/TestProjectedPath.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/project/TestProjectedPath.java
index 250afc4..8ff867f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/project/TestProjectedPath.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/project/TestProjectedPath.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.drill.test.BaseTest;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.physical.rowSet.RowSetTestUtils;
@@ -34,7 +35,7 @@
  * to see if the projection path is consistent with the type. Tests here
  * verify the consistency checks.
  */
-public class TestProjectedPath {
+public class TestProjectedPath extends BaseTest{
 
   // INT is a proxy for all scalar columns.
   private static final ColumnMetadata INT_COLUMN = intSchema().metadata("a");
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestRowSet.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestRowSet.java
index da4705b..987bd02 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestRowSet.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestRowSet.java
@@ -901,7 +901,7 @@
     final String dictName = "d";
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
-        .addDictArray(dictName, MinorType.FLOAT4)
+        .addDictArray(dictName, MinorType.FLOAT8)
           .value(MinorType.VARCHAR)
           .resumeSchema()
         .buildSchema();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestScalarAccessors.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestScalarAccessors.java
index eaf2754..7f990c8 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestScalarAccessors.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestScalarAccessors.java
@@ -533,21 +533,21 @@
 
     RowSetReader reader = rs.reader();
     ScalarReader colReader = reader.scalar(0);
-    assertEquals(ValueType.DOUBLE, colReader.valueType());
+    assertEquals(ValueType.FLOAT, colReader.valueType());
 
     assertTrue(reader.next());
     assertFalse(colReader.isNull());
-    assertEquals(0, colReader.getDouble(), 0.000001);
+    assertEquals(0, colReader.getFloat(), 0.000001);
 
     assertTrue(reader.next());
-    assertEquals(Float.MAX_VALUE, colReader.getDouble(), 0.000001);
-    assertEquals(Float.MAX_VALUE, (double) colReader.getObject(), 0.000001);
+    assertEquals(Float.MAX_VALUE, colReader.getFloat(), 0.000001);
+    assertEquals(Float.MAX_VALUE, (float) colReader.getObject(), 0.000001);
 
     assertTrue(reader.next());
-    assertEquals(Float.MIN_VALUE, colReader.getDouble(), 0.000001);
+    assertEquals(Float.MIN_VALUE, colReader.getFloat(), 0.000001);
 
     assertTrue(reader.next());
-    assertEquals(100, colReader.getDouble(), 0.000001);
+    assertEquals(100, colReader.getFloat(), 0.000001);
     assertEquals("100.0", colReader.getAsString());
 
     assertFalse(reader.next());
@@ -587,7 +587,7 @@
 
   @Test
   public void testNullableFloat() {
-    nullableDoubleTester(MinorType.FLOAT4);
+    nullableDoubleTester(MinorType.FLOAT8);
   }
 
   private void doubleArrayTester(MinorType type) {
@@ -636,7 +636,7 @@
 
   @Test
   public void testFloatArray() {
-    doubleArrayTester(MinorType.FLOAT4);
+    doubleArrayTester(MinorType.FLOAT8);
   }
 
   @Test
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java b/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
index 01c06a4..e85dd1b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
@@ -22,6 +22,7 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import org.apache.drill.exec.store.mock.MockSubScanPOP;
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 
 import org.apache.drill.categories.VectorTest;
@@ -41,7 +42,6 @@
 import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.proto.BitControl;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
@@ -76,7 +76,7 @@
 
     RecordBatch singleBatch = exec.getIncoming();
     PhysicalOperator dummyPop = operatorList.iterator().next();
-    OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE,
+    OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), MockSubScanPOP.OPERATOR_TYPE,
       OperatorUtilities.getChildCount(dummyPop));
     OperatorStats stats = exec.getContext().getStats().newOperatorStats(def, exec.getContext().getAllocator());
     RecordIterator iter = new RecordIterator(singleBatch, null, exec.getContext().newOperatorContext(dummyPop, stats), 0, false, null);
@@ -132,7 +132,7 @@
 
     RecordBatch singleBatch = exec.getIncoming();
     PhysicalOperator dummyPop = operatorList.iterator().next();
-    OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), UserBitShared.CoreOperatorType.MOCK_SUB_SCAN_VALUE,
+    OpProfileDef def = new OpProfileDef(dummyPop.getOperatorId(), MockSubScanPOP.OPERATOR_TYPE,
         OperatorUtilities.getChildCount(dummyPop));
     OperatorStats stats = exec.getContext().getStats().newOperatorStats(def, exec.getContext().getAllocator());
     RecordIterator iter = new RecordIterator(singleBatch, null, exec.getContext().newOperatorContext(dummyPop, stats), 0, null);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/FormatPluginSerDeTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/FormatPluginSerDeTest.java
index ebcb300..6001773 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/FormatPluginSerDeTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/FormatPluginSerDeTest.java
@@ -92,19 +92,6 @@
   }
 
   @Test
-  public void testHttpd() throws Exception {
-    String path = "store/httpd/dfs-test-bootstrap-test.httpd";
-    dirTestWatcher.copyResourceToRoot(Paths.get(path));
-    String logFormat = "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"";
-    String timeStampFormat = "dd/MMM/yyyy:HH:mm:ss ZZ";
-    testPhysicalPlanSubmission(
-        String.format("select * from dfs.`%s`", path),
-        String.format("select * from table(dfs.`%s`(type=>'httpd', logFormat=>'%s'))", path, logFormat),
-        String.format("select * from table(dfs.`%s`(type=>'httpd', logFormat=>'%s', timestampFormat=>'%s'))", path, logFormat, timeStampFormat)
-    );
-  }
-
-  @Test
   public void testJson() throws Exception {
     testPhysicalPlanSubmission(
         "select * from cp.`donuts.json`",
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/bson/TestBsonRecordReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/bson/TestBsonRecordReader.java
index 4a77fbe..2c2363c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/bson/TestBsonRecordReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/bson/TestBsonRecordReader.java
@@ -21,6 +21,7 @@
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.math.BigDecimal;
 import java.time.ZoneOffset;
 import java.util.Arrays;
 
@@ -37,6 +38,7 @@
 import org.bson.BsonBinarySubType;
 import org.bson.BsonBoolean;
 import org.bson.BsonDateTime;
+import org.bson.BsonDecimal128;
 import org.bson.BsonDocument;
 import org.bson.BsonDocumentReader;
 import org.bson.BsonDocumentWriter;
@@ -48,6 +50,7 @@
 import org.bson.BsonSymbol;
 import org.bson.BsonTimestamp;
 import org.bson.BsonWriter;
+import org.bson.types.Decimal128;
 import org.bson.types.ObjectId;
 import org.junit.After;
 import org.junit.Before;
@@ -274,6 +277,16 @@
     assertEquals(3, reader.size());
   }
 
+    @Test
+    public void testDecimal128Type() throws IOException {
+        BsonDocument bsonDoc = new BsonDocument();
+        bsonDoc.append("decimal128Key", new BsonDecimal128(Decimal128.parse("12.12345624")));
+        writer.reset();
+        bsonReader.write(writer, new BsonDocumentReader(bsonDoc));
+        SingleMapReaderImpl mapReader = (SingleMapReaderImpl) writer.getMapVector().getReader();
+        assertEquals(new BigDecimal("12.12345624"), mapReader.reader("decimal128Key").readBigDecimal());
+    }
+
   @After
   public void cleanUp() {
     try {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestDrillFileSystem.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestDrillFileSystem.java
index ec841a3..b5582ef 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestDrillFileSystem.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestDrillFileSystem.java
@@ -68,7 +68,7 @@
     InputStream is = null;
     Configuration conf = new Configuration();
     conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
-    OpProfileDef profileDef = new OpProfileDef(0 /*operatorId*/, 0 /*operatorType*/, 0 /*inputCount*/);
+    OpProfileDef profileDef = new OpProfileDef(0 /*operatorId*/, "" /*operatorType*/, 0 /*inputCount*/);
     OperatorStats stats = new OperatorStats(profileDef, null /*allocator*/);
 
     // start wait time method in OperatorStats expects the OperatorStats state to be in "processing"
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFormatPluginOptionExtractor.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFormatPluginOptionExtractor.java
index 871233b..e126383 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFormatPluginOptionExtractor.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFormatPluginOptionExtractor.java
@@ -22,7 +22,6 @@
 import org.apache.drill.common.scanner.RunTimeScan;
 import org.apache.drill.common.scanner.persistence.ScanResult;
 import org.apache.drill.exec.store.easy.text.TextFormatPlugin.TextFormatConfig;
-import org.apache.drill.exec.store.image.ImageFormatConfig;
 import org.apache.drill.test.BaseTest;
 import org.junit.Test;
 
@@ -58,8 +57,14 @@
           assertEquals(d.typeName, "(type: String, autoCorrectCorruptDates: boolean, enableStringsSignedMinMax: boolean)", d.presentParams());
           break;
         case "json":
+          assertEquals(d.typeName, "(type: String)", d.presentParams());
+          break;
         case "sequencefile":
+          assertEquals(d.typeName, "(type: String)", d.presentParams());
+          break;
         case "pcapng":
+          assertEquals(d.typeName, "(type: String, stat: boolean)", d.presentParams());
+          break;
         case "avro":
           assertEquals(d.typeName, "(type: String)", d.presentParams());
           break;
@@ -69,12 +74,6 @@
         case "httpd":
           assertEquals("(type: String, logFormat: String, timestampFormat: String)", d.presentParams());
           break;
-        case "image":
-          assertEquals(ImageFormatConfig.class, d.pluginConfigClass);
-          assertEquals(
-              "(type: String, fileSystemMetadata: boolean, descriptive: boolean, timeZone: String)", d.presentParams()
-          );
-          break;
         case "logRegex":
           assertEquals(d.typeName, "(type: String, regex: String, extension: String, maxErrors: int, schema: List)", d.presentParams());
           break;
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsvWithSchema.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsvWithSchema.java
index ac27d2f..e56fcfe 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsvWithSchema.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsvWithSchema.java
@@ -1022,7 +1022,7 @@
           "col_int integer not null default '10', " +
           "col_bigint bigint not null default '10', " +
           "col_double double not null default '10.5', " +
-          "col_float float not null default '10.5', " +
+          "col_float float not null default '10.5f', " +
           "col_var varchar not null default 'foo', " +
           "col_boolean boolean not null default '1', " +
           "col_interval interval not null default 'P10D', " +
@@ -1051,7 +1051,7 @@
       LocalDate ld = new LocalDate(2019, 3, 28);
       Instant ts = ld.toDateTime(lt).toInstant();
       RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-          .addRow(10, 10L, 10.5, 10.5D, "foo", true, new Period(0).plusDays(10),
+          .addRow(10, 10L, 10.5, 10.5f, "foo", true, new Period(0).plusDays(10),
               lt, ld, ts, "1")
           .build();
       RowSetUtilities.verify(expected, actual);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java
deleted file mode 100644
index c86ee52..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/httpd/TestHTTPDLogReader.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.httpd;
-
-import org.apache.drill.common.types.TypeProtos.MinorType;
-import org.apache.drill.exec.record.metadata.SchemaBuilder;
-import org.apache.drill.exec.record.metadata.TupleMetadata;
-import org.apache.drill.exec.rpc.RpcException;
-import org.apache.drill.test.BaseDirTestWatcher;
-import org.apache.drill.test.ClusterFixture;
-import org.apache.drill.test.ClusterTest;
-import org.apache.drill.exec.physical.rowSet.RowSet;
-import org.apache.drill.test.rowSet.RowSetUtilities;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-
-import java.time.LocalDateTime;
-import java.util.HashMap;
-
-import static org.junit.Assert.assertEquals;
-
-public class TestHTTPDLogReader extends ClusterTest {
-
-  @ClassRule
-  public static final BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
-
-    // Define a temporary format plugin for the "cp" storage plugin.
-    HttpdLogFormatConfig sampleConfig = new HttpdLogFormatConfig(
-        "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"", null);
-    cluster.defineFormat("cp", "sample", sampleConfig);
-  }
-
-  @Test
-  public void testDateField() throws RpcException {
-    String sql = "SELECT `request_receive_time` FROM cp.`httpd/hackers-access-small.httpd` LIMIT 5";
-    RowSet results = client.queryBuilder().sql(sql).rowSet();
-
-    TupleMetadata expectedSchema = new SchemaBuilder()
-            .addNullable("request_receive_time", MinorType.TIMESTAMP)
-            .buildSchema();
-    RowSet expected = client.rowSetBuilder(expectedSchema)
-            .addRow(1445742685000L)
-            .addRow(1445742686000L)
-            .addRow(1445742687000L)
-            .addRow(1445743471000L)
-            .addRow(1445743472000L)
-            .build();
-
-    RowSetUtilities.verify(expected, results);
-  }
-
-  @Test
-  public void testSelectColumns() throws Exception {
-    String sql = "SELECT request_referer_ref,\n" +
-            "request_receive_time_last_time,\n" +
-            "request_firstline_uri_protocol,\n" +
-            "request_receive_time_microsecond,\n" +
-            "request_receive_time_last_microsecond__utc,\n" +
-            "request_firstline_original_protocol,\n" +
-            "request_firstline_original_uri_host,\n" +
-            "request_referer_host,\n" +
-            "request_receive_time_month__utc,\n" +
-            "request_receive_time_last_minute,\n" +
-            "request_firstline_protocol_version,\n" +
-            "request_receive_time_time__utc,\n" +
-            "request_referer_last_ref,\n" +
-            "request_receive_time_last_timezone,\n" +
-            "request_receive_time_last_weekofweekyear,\n" +
-            "request_referer_last,\n" +
-            "request_receive_time_minute,\n" +
-            "connection_client_host_last,\n" +
-            "request_receive_time_last_millisecond__utc,\n" +
-            "request_firstline_original_uri,\n" +
-            "request_firstline,\n" +
-            "request_receive_time_nanosecond,\n" +
-            "request_receive_time_last_millisecond,\n" +
-            "request_receive_time_day,\n" +
-            "request_referer_port,\n" +
-            "request_firstline_original_uri_port,\n" +
-            "request_receive_time_year,\n" +
-            "request_receive_time_last_date,\n" +
-            "request_receive_time_last_time__utc,\n" +
-            "request_receive_time_last_hour__utc,\n" +
-            "request_firstline_original_protocol_version,\n" +
-            "request_firstline_original_method,\n" +
-            "request_receive_time_last_year__utc,\n" +
-            "request_firstline_uri,\n" +
-            "request_referer_last_host,\n" +
-            "request_receive_time_last_minute__utc,\n" +
-            "request_receive_time_weekofweekyear,\n" +
-            "request_firstline_uri_userinfo,\n" +
-            "request_receive_time_epoch,\n" +
-            "connection_client_logname,\n" +
-            "response_body_bytes,\n" +
-            "request_receive_time_nanosecond__utc,\n" +
-            "request_firstline_protocol,\n" +
-            "request_receive_time_microsecond__utc,\n" +
-            "request_receive_time_hour,\n" +
-            "request_firstline_uri_host,\n" +
-            "request_referer_last_port,\n" +
-            "request_receive_time_last_epoch,\n" +
-            "request_receive_time_last_weekyear__utc,\n" +
-            "request_useragent,\n" +
-            "request_receive_time_weekyear,\n" +
-            "request_receive_time_timezone,\n" +
-            "response_body_bytesclf,\n" +
-            "request_receive_time_last_date__utc,\n" +
-            "request_receive_time_millisecond__utc,\n" +
-            "request_referer_last_protocol,\n" +
-            "request_status_last,\n" +
-            "request_firstline_uri_query,\n" +
-            "request_receive_time_minute__utc,\n" +
-            "request_firstline_original_uri_protocol,\n" +
-            "request_referer_query,\n" +
-            "request_receive_time_date,\n" +
-            "request_firstline_uri_port,\n" +
-            "request_receive_time_last_second__utc,\n" +
-            "request_referer_last_userinfo,\n" +
-            "request_receive_time_last_second,\n" +
-            "request_receive_time_last_monthname__utc,\n" +
-            "request_firstline_method,\n" +
-            "request_receive_time_last_month__utc,\n" +
-            "request_receive_time_millisecond,\n" +
-            "request_receive_time_day__utc,\n" +
-            "request_receive_time_year__utc,\n" +
-            "request_receive_time_weekofweekyear__utc,\n" +
-            "request_receive_time_second,\n" +
-            "request_firstline_original_uri_ref,\n" +
-            "connection_client_logname_last,\n" +
-            "request_receive_time_last_year,\n" +
-            "request_firstline_original_uri_path,\n" +
-            "connection_client_host,\n" +
-            "request_firstline_original_uri_query,\n" +
-            "request_referer_userinfo,\n" +
-            "request_receive_time_last_monthname,\n" +
-            "request_referer_path,\n" +
-            "request_receive_time_monthname,\n" +
-            "request_receive_time_last_month,\n" +
-            "request_referer_last_query,\n" +
-            "request_firstline_uri_ref,\n" +
-            "request_receive_time_last_day,\n" +
-            "request_receive_time_time,\n" +
-            "request_receive_time_last_weekofweekyear__utc,\n" +
-            "request_useragent_last,\n" +
-            "request_receive_time_last_weekyear,\n" +
-            "request_receive_time_last_microsecond,\n" +
-            "request_firstline_original,\n" +
-            "request_referer_last_path,\n" +
-            "request_receive_time_month,\n" +
-            "request_receive_time_last_day__utc,\n" +
-            "request_referer,\n" +
-            "request_referer_protocol,\n" +
-            "request_receive_time_monthname__utc,\n" +
-            "response_body_bytes_last,\n" +
-            "request_receive_time,\n" +
-            "request_receive_time_last_nanosecond,\n" +
-            "request_firstline_uri_path,\n" +
-            "request_firstline_original_uri_userinfo,\n" +
-            "request_receive_time_date__utc,\n" +
-            "request_receive_time_last,\n" +
-            "request_receive_time_last_nanosecond__utc,\n" +
-            "request_receive_time_last_hour,\n" +
-            "request_receive_time_hour__utc,\n" +
-            "request_receive_time_second__utc,\n" +
-            "connection_client_user_last,\n" +
-            "request_receive_time_weekyear__utc,\n" +
-            "connection_client_user\n" +
-            "FROM cp.`httpd/hackers-access-small.httpd`\n" +
-            "LIMIT 1";
-
-    testBuilder()
-            .sqlQuery(sql)
-            .unOrdered()
-            .baselineColumns("request_referer_ref", "request_receive_time_last_time", "request_firstline_uri_protocol", "request_receive_time_microsecond", "request_receive_time_last_microsecond__utc", "request_firstline_original_protocol", "request_firstline_original_uri_host", "request_referer_host", "request_receive_time_month__utc", "request_receive_time_last_minute", "request_firstline_protocol_version", "request_receive_time_time__utc", "request_referer_last_ref", "request_receive_time_last_timezone", "request_receive_time_last_weekofweekyear", "request_referer_last", "request_receive_time_minute", "connection_client_host_last", "request_receive_time_last_millisecond__utc", "request_firstline_original_uri", "request_firstline", "request_receive_time_nanosecond", "request_receive_time_last_millisecond", "request_receive_time_day", "request_referer_port", "request_firstline_original_uri_port", "request_receive_time_year", "request_receive_time_last_date", "request_receive_time_last_time__utc", "request_receive_time_last_hour__utc", "request_firstline_original_protocol_version", "request_firstline_original_method", "request_receive_time_last_year__utc", "request_firstline_uri", "request_referer_last_host", "request_receive_time_last_minute__utc", "request_receive_time_weekofweekyear", "request_firstline_uri_userinfo", "request_receive_time_epoch", "connection_client_logname", "response_body_bytes", "request_receive_time_nanosecond__utc", "request_firstline_protocol", "request_receive_time_microsecond__utc", "request_receive_time_hour", "request_firstline_uri_host", "request_referer_last_port", "request_receive_time_last_epoch", "request_receive_time_last_weekyear__utc", "request_useragent", "request_receive_time_weekyear", "request_receive_time_timezone", "response_body_bytesclf", "request_receive_time_last_date__utc", "request_receive_time_millisecond__utc", "request_referer_last_protocol", "request_status_last", "request_firstline_uri_query", "request_receive_time_minute__utc", "request_firstline_original_uri_protocol", "request_referer_query", "request_receive_time_date", "request_firstline_uri_port", "request_receive_time_last_second__utc", "request_referer_last_userinfo", "request_receive_time_last_second", "request_receive_time_last_monthname__utc", "request_firstline_method", "request_receive_time_last_month__utc", "request_receive_time_millisecond", "request_receive_time_day__utc", "request_receive_time_year__utc", "request_receive_time_weekofweekyear__utc", "request_receive_time_second", "request_firstline_original_uri_ref", "connection_client_logname_last", "request_receive_time_last_year", "request_firstline_original_uri_path", "connection_client_host", "request_firstline_original_uri_query", "request_referer_userinfo", "request_receive_time_last_monthname", "request_referer_path", "request_receive_time_monthname", "request_receive_time_last_month", "request_referer_last_query", "request_firstline_uri_ref", "request_receive_time_last_day", "request_receive_time_time", "request_receive_time_last_weekofweekyear__utc", "request_useragent_last", "request_receive_time_last_weekyear", "request_receive_time_last_microsecond", "request_firstline_original", "request_referer_last_path", "request_receive_time_month", "request_receive_time_last_day__utc", "request_referer", "request_referer_protocol", "request_receive_time_monthname__utc", "response_body_bytes_last", "request_receive_time", "request_receive_time_last_nanosecond", "request_firstline_uri_path", "request_firstline_original_uri_userinfo", "request_receive_time_date__utc", "request_receive_time_last", "request_receive_time_last_nanosecond__utc", "request_receive_time_last_hour", "request_receive_time_hour__utc", "request_receive_time_second__utc", "connection_client_user_last", "request_receive_time_weekyear__utc", "connection_client_user")
-            .baselineValues(null, "04:11:25", null, 0L, 0L, "HTTP", null, "howto.basjes.nl", 10L, 11L, "1.1", "03:11:25", null, null, 43L, "http://howto.basjes.nl/", 11L, "195.154.46.135", 0L, "/linux/doing-pxe-without-dhcp-control", "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1", 0L, 0L, 25L, null, null, 2015L, "2015-10-25", "03:11:25", 3L, "1.1", "GET", 2015L, "/linux/doing-pxe-without-dhcp-control", "howto.basjes.nl", 11L, 43L, null, 1445742685000L, null, 24323L, 0L, "HTTP", 0L, 4L, null, null, 1445742685000L, 2015L, "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0", 2015L, null, 24323L, "2015-10-25", 0L, "http", "200", "", 11L, null, "", "2015-10-25", null, 25L, null, 25L, "October", "GET", 10L, 0L, 25L, 2015L, 43L, 25L, null, null, 2015L, "/linux/doing-pxe-without-dhcp-control", "195.154.46.135", "", null, "October", "/", "October", 10L, "", null, 25L, "04:11:25", 43L, "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0", 2015L, 0L, "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1", "/", 10L, 25L, "http://howto.basjes.nl/", "http", "October", 24323L, LocalDateTime.parse("2015-10-25T03:11:25"), 0L, "/linux/doing-pxe-without-dhcp-control", null, "2015-10-25", LocalDateTime.parse("2015-10-25T03:11:25"), 0L, 4L, 3L, 25L, null, 2015L, null)
-            .go();
-  }
-
-
-  @Test
-  public void testCount() throws Exception {
-    String sql = "SELECT COUNT(*) FROM cp.`httpd/hackers-access-small.httpd`";
-    long result = client.queryBuilder().sql(sql).singletonLong();
-    assertEquals(10, result);
-  }
-
-  @Test
-  public void testStar() throws Exception {
-    String sql = "SELECT * FROM cp.`httpd/hackers-access-small.httpd` LIMIT 1";
-
-    testBuilder()
-            .sqlQuery(sql)
-            .unOrdered()
-            .baselineColumns("request_referer_ref","request_receive_time_last_time","request_firstline_uri_protocol","request_receive_time_microsecond","request_receive_time_last_microsecond__utc","request_firstline_original_uri_query_$","request_firstline_original_protocol","request_firstline_original_uri_host","request_referer_host","request_receive_time_month__utc","request_receive_time_last_minute","request_firstline_protocol_version","request_receive_time_time__utc","request_referer_last_ref","request_receive_time_last_timezone","request_receive_time_last_weekofweekyear","request_referer_last","request_receive_time_minute","connection_client_host_last","request_receive_time_last_millisecond__utc","request_firstline_original_uri","request_firstline","request_receive_time_nanosecond","request_receive_time_last_millisecond","request_receive_time_day","request_referer_port","request_firstline_original_uri_port","request_receive_time_year","request_receive_time_last_date","request_referer_query_$","request_receive_time_last_time__utc","request_receive_time_last_hour__utc","request_firstline_original_protocol_version","request_firstline_original_method","request_receive_time_last_year__utc","request_firstline_uri","request_referer_last_host","request_receive_time_last_minute__utc","request_receive_time_weekofweekyear","request_firstline_uri_userinfo","request_receive_time_epoch","connection_client_logname","response_body_bytes","request_receive_time_nanosecond__utc","request_firstline_protocol","request_receive_time_microsecond__utc","request_receive_time_hour","request_firstline_uri_host","request_referer_last_port","request_receive_time_last_epoch","request_receive_time_last_weekyear__utc","request_receive_time_weekyear","request_receive_time_timezone","response_body_bytesclf","request_receive_time_last_date__utc","request_useragent_last","request_useragent","request_receive_time_millisecond__utc","request_referer_last_protocol","request_status_last","request_firstline_uri_query","request_receive_time_minute__utc","request_firstline_original_uri_protocol","request_referer_query","request_receive_time_date","request_firstline_uri_port","request_receive_time_last_second__utc","request_referer_last_userinfo","request_receive_time_last_second","request_receive_time_last_monthname__utc","request_firstline_method","request_receive_time_last_month__utc","request_receive_time_millisecond","request_receive_time_day__utc","request_receive_time_year__utc","request_receive_time_weekofweekyear__utc","request_receive_time_second","request_firstline_original_uri_ref","connection_client_logname_last","request_receive_time_last_year","request_firstline_original_uri_path","connection_client_host","request_referer_last_query_$","request_firstline_original_uri_query","request_referer_userinfo","request_receive_time_last_monthname","request_referer_path","request_receive_time_monthname","request_receive_time_last_month","request_referer_last_query","request_firstline_uri_ref","request_receive_time_last_day","request_receive_time_time","request_receive_time_last_weekofweekyear__utc","request_receive_time_last_weekyear","request_receive_time_last_microsecond","request_firstline_original","request_firstline_uri_query_$","request_referer_last_path","request_receive_time_month","request_receive_time_last_day__utc","request_referer","request_referer_protocol","request_receive_time_monthname__utc","response_body_bytes_last","request_receive_time","request_receive_time_last_nanosecond","request_firstline_uri_path","request_firstline_original_uri_userinfo","request_receive_time_date__utc","request_receive_time_last","request_receive_time_last_nanosecond__utc","request_receive_time_last_hour","request_receive_time_hour__utc","request_receive_time_second__utc","connection_client_user_last","request_receive_time_weekyear__utc","connection_client_user")
-            .baselineValues(null,"04:11:25",null,0L,0L,new HashMap<>(),"HTTP",null,"howto.basjes.nl",10L,11L,"1.1","03:11:25",null,null,43L,"http://howto.basjes.nl/",11L,"195.154.46.135",0L,"/linux/doing-pxe-without-dhcp-control","GET /linux/doing-pxe-without-dhcp-control HTTP/1.1",0L,0L,25L,null,null,2015L,"2015-10-25",new HashMap<>(),"03:11:25",3L,"1.1","GET",2015L,"/linux/doing-pxe-without-dhcp-control","howto.basjes.nl",11L,43L,null,1445742685000L,null,24323L,0L,"HTTP",0L,4L,null,null,1445742685000L,2015L,2015L,null,24323L,"2015-10-25","Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0","Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0",0L,"http","200","",11L,null,"","2015-10-25",null,25L,null,25L,"October","GET",10L,0L,25L,2015L,43L,25L,null,null,2015L,"/linux/doing-pxe-without-dhcp-control","195.154.46.135",new HashMap<>(),"",null,"October","/","October",10L,"",null,25L,"04:11:25",43L,2015L,0L,"GET /linux/doing-pxe-without-dhcp-control HTTP/1.1",new HashMap<>(),"/",10L,25L,"http://howto.basjes.nl/","http","October",24323L,LocalDateTime.parse("2015-10-25T03:11:25"),0L,"/linux/doing-pxe-without-dhcp-control",null,"2015-10-25",LocalDateTime.parse("2015-10-25T03:11:25"),0L,4L,3L,25L,null,2015L,null)
-            .go();
-  }
-}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
index f530945..bf57128 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
@@ -26,7 +26,6 @@
 import org.apache.drill.exec.ops.FragmentContextImpl;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.proto.BitControl;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader;
 import org.apache.drill.exec.store.parquet.metadata.Metadata;
 import org.apache.drill.exec.store.parquet.metadata.MetadataBase;
@@ -59,7 +58,7 @@
 import java.util.List;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 
 public class TestParquetFilterPushDown extends PlanTestBase {
   private static final String CTAS_TABLE = "order_ctas";
@@ -781,9 +780,9 @@
     }
 
     ProfileParser profile = client.parseProfile(summary.queryIdString());
-    List<ProfileParser.OperatorProfile> ops = profile.getOpsOfType(UserBitShared.CoreOperatorType.PARQUET_ROW_GROUP_SCAN_VALUE);
+    List<ProfileParser.OperatorProfile> ops = profile.getOpsOfType(ParquetRowGroupScan.OPERATOR_TYPE);
 
-    assertTrue(!ops.isEmpty());
+    assertFalse(ops.isEmpty());
     // check for the first op only
     ProfileParser.OperatorProfile parquestScan0 = ops.get(0);
     long resultNumRowgroups = parquestScan0.getMetric(ParquetRecordReader.Metric.NUM_ROWGROUPS.ordinal());
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngHeaders.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngHeaders.java
deleted file mode 100644
index 6228766..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngHeaders.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.pcapng;
-
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.record.metadata.TupleMetadata;
-import org.apache.drill.exec.record.metadata.TupleSchema;
-import org.apache.drill.test.ClusterFixture;
-import org.apache.drill.test.ClusterTest;
-import org.apache.drill.exec.physical.rowSet.RowSet;
-import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
-import org.apache.drill.test.rowSet.RowSetComparison;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.nio.file.Paths;
-
-public class TestPcapngHeaders extends ClusterTest {
-  @BeforeClass
-  public static void setupTestFiles() throws Exception {
-    startCluster(ClusterFixture.builder(dirTestWatcher).maxParallelization(1));
-    dirTestWatcher.copyResourceToRoot(Paths.get("store", "pcapng"));
-  }
-
-  @Test
-  public void testValidHeadersForStarQuery() throws IOException {
-    String query = "select * from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("tcp_flags_ece_ecn_capable", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_ece_congestion_experienced", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_psh", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("type", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_cwr", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("dst_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("src_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_fin", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_ece", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_ack", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("src_mac_address", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_syn", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_rst", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("timestamp", Types.required(TypeProtos.MinorType.TIMESTAMP)));
-    expectedSchema.add(MaterializedField.create("tcp_session", Types.optional(TypeProtos.MinorType.BIGINT)));
-    expectedSchema.add(MaterializedField.create("packet_data", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("tcp_parsed_flags", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_ns", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("src_port", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("packet_length", Types.required(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_flags_urg", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_ack", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("dst_port", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("dst_mac_address", Types.optional(TypeProtos.MinorType.VARCHAR)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-
-  @Test
-  public void testValidHeadersForProjection() throws IOException {
-    String query = "select sRc_ip, dst_IP, dst_mAc_address, src_Port, tcp_session, `Timestamp`  from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("sRc_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("dst_IP", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("dst_mAc_address", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("src_Port", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_session", Types.optional(TypeProtos.MinorType.BIGINT)));
-    expectedSchema.add(MaterializedField.create("Timestamp", Types.required(TypeProtos.MinorType.TIMESTAMP)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-
-  @Test
-  public void testValidHeadersForMissColumns() throws IOException {
-    String query = "select `timestamp`, `name`, `color` from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("timestamp", Types.required(TypeProtos.MinorType.TIMESTAMP)));
-    expectedSchema.add(MaterializedField.create("name", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("color", Types.optional(TypeProtos.MinorType.INT)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-
-  @Test
-  public void testMixColumns() throws IOException {
-    String query = "select src_ip, dst_ip, dst_mac_address, src_port, tcp_session, `timestamp`  from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("sRc_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("dst_IP", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("dst_mAc_address", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("src_Port", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_session", Types.optional(TypeProtos.MinorType.BIGINT)));
-    expectedSchema.add(MaterializedField.create("Timestamp", Types.required(TypeProtos.MinorType.TIMESTAMP)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-
-    String queryWithDiffOrder = "select `timestamp`, src_ip, dst_ip, src_port, tcp_session, dst_mac_address from dfs.`store/pcapng/sniff.pcapng`";
-    actual = client.queryBuilder().sql(queryWithDiffOrder).rowSet();
-
-    expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("timestamp", Types.required(TypeProtos.MinorType.TIMESTAMP)));
-    expectedSchema.add(MaterializedField.create("src_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("dst_ip", Types.optional(TypeProtos.MinorType.VARCHAR)));
-    expectedSchema.add(MaterializedField.create("src_port", Types.optional(TypeProtos.MinorType.INT)));
-    expectedSchema.add(MaterializedField.create("tcp_session", Types.optional(TypeProtos.MinorType.BIGINT)));
-    expectedSchema.add(MaterializedField.create("dst_mac_address", Types.optional(TypeProtos.MinorType.VARCHAR)));
-
-    expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-
-  @Test
-  public void testValidHeaderForArrayColumns() throws IOException {
-    // query with non-existent field
-    String query = "select arr[3] as arr from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("arr", Types.optional(TypeProtos.MinorType.INT)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-
-    // query with an existent field which doesn't support arrays
-    query = "select type[45] as arr from dfs.`store/pcapng/sniff.pcapng`";
-
-    expectedSchema = new TupleSchema();
-    actual = client.queryBuilder().sql(query).rowSet();
-
-    expectedSchema.add(MaterializedField.create("arr", Types.optional(TypeProtos.MinorType.INT)));
-
-    expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-
-  @Test
-  public void testValidHeaderForNestedColumns() throws IOException {
-    // query with non-existent field
-    String query = "select top['nested'] as nested from dfs.`store/pcapng/sniff.pcapng`";
-    RowSet actual = client.queryBuilder().sql(query).rowSet();
-
-    TupleMetadata expectedSchema = new TupleSchema();
-
-    expectedSchema.add(MaterializedField.create("nested", Types.optional(TypeProtos.MinorType.INT)));
-
-    RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-
-    // query with an existent field which doesn't support nesting
-    query = "select type['nested'] as nested from dfs.`store/pcapng/sniff.pcapng`";
-
-    expectedSchema = new TupleSchema();
-    actual = client.queryBuilder().sql(query).rowSet();
-
-    expectedSchema.add(MaterializedField.create("nested", Types.optional(TypeProtos.MinorType.INT)));
-
-    expected = new RowSetBuilder(client.allocator(), expectedSchema)
-        .build();
-    new RowSetComparison(expected)
-        .verifyAndClearAll(actual);
-  }
-}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngRecordReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngRecordReader.java
index 98d7b67..d0c4efc 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngRecordReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngRecordReader.java
@@ -17,84 +17,201 @@
  */
 package org.apache.drill.exec.store.pcapng;
 
-import org.apache.drill.PlanTestBase;
-import org.apache.drill.common.exceptions.UserRemoteException;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 
 import java.nio.file.Paths;
 
-public class TestPcapngRecordReader extends PlanTestBase {
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.QueryBuilder;
+import org.apache.drill.test.QueryTestUtil;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.joda.time.Instant;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(RowSetTests.class)
+public class TestPcapngRecordReader extends ClusterTest {
+
   @BeforeClass
-  public static void setupTestFiles() {
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
     dirTestWatcher.copyResourceToRoot(Paths.get("store", "pcapng"));
   }
 
   @Test
   public void testStarQuery() throws Exception {
-    Assert.assertEquals(123, testSql("select * from dfs.`store/pcapng/sniff.pcapng`"));
-    Assert.assertEquals(1, testSql("select * from dfs.`store/pcapng/example.pcapng`"));
+    String sql = "select * from dfs.`store/pcapng/sniff.pcapng`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(123, sets.rowCount());
+    sets.clear();
   }
 
   @Test
-  public void testProjectingByName() throws Exception {
-    Assert.assertEquals(123, testSql("select `timestamp`, packet_data, type from dfs.`store/pcapng/sniff.pcapng`"));
-    Assert.assertEquals(1, testSql("select src_ip, dst_ip, `timestamp` from dfs.`store/pcapng/example.pcapng`"));
+  public void testExplicitQuery() throws Exception {
+    String sql = "select type, packet_length, `timestamp` from dfs.`store/pcapng/sniff.pcapng` where type = 'ARP'";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("type", MinorType.VARCHAR)
+        .add("packet_length", MinorType.INT)
+        .add("timestamp", MinorType.TIMESTAMP)
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("ARP", 90, Instant.ofEpochMilli(1518010669927L))
+        .addRow("ARP", 90, Instant.ofEpochMilli(1518010671874L))
+        .build();
+
+    assertEquals(2, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
   }
 
   @Test
-  public void testDiffCaseQuery() throws Exception {
-    Assert.assertEquals(123, testSql("select `timestamp`, paCket_dAta, TyPe from dfs.`store/pcapng/sniff.pcapng`"));
-    Assert.assertEquals(1, testSql("select src_ip, dst_ip, `Timestamp` from dfs.`store/pcapng/example.pcapng`"));
+  public void testLimitPushdown() throws Exception {
+    String sql = "select * from dfs.`store/pcapng/sniff.pcapng` where type = 'UDP' limit 10 offset 65";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(6, sets.rowCount());
+    sets.clear();
   }
 
   @Test
-  public void testProjectingMissColls() throws Exception {
-    Assert.assertEquals(123, testSql("select `timestamp`, `name`, `color` from dfs.`store/pcapng/sniff.pcapng`"));
-    Assert.assertEquals(1, testSql("select src_ip, `time` from dfs.`store/pcapng/example.pcapng`"));
+  public void testSerDe() throws Exception {
+    String sql = "select count(*) from dfs.`store/pcapng/example.pcapng`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+
+    assertEquals("Counts should match", 1, cnt);
   }
 
+  @Test
+  public void testExplicitQueryWithCompressedFile() throws Exception {
+    QueryTestUtil.generateCompressedFile("store/pcapng/sniff.pcapng", "zip", "store/pcapng/sniff.pcapng.zip");
+    String sql = "select type, packet_length, `timestamp` from dfs.`store/pcapng/sniff.pcapng.zip` where type = 'ARP'";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("type", MinorType.VARCHAR)
+        .add("packet_length", MinorType.INT)
+        .add("timestamp", MinorType.TIMESTAMP)
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("ARP", 90, Instant.ofEpochMilli(1518010669927L))
+        .addRow("ARP", 90, Instant.ofEpochMilli(1518010671874L))
+        .build();
+
+    assertEquals(2, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
 
   @Test
-  public void testCountQuery() throws Exception {
-    testBuilder()
-        .sqlQuery("select count(*) as ct from dfs.`store/pcapng/sniff.pcapng`")
-        .ordered()
-        .baselineColumns("ct")
-        .baselineValues(123L)
-        .build()
-        .run();
+  public void testCaseInsensitiveQuery() throws Exception {
+    String sql = "select `timestamp`, paCket_dAta, TyPe from dfs.`store/pcapng/sniff.pcapng`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
 
-    testBuilder()
-        .sqlQuery("select count(*) as ct from dfs.`store/pcapng/example.pcapng`")
-        .ordered()
-        .baselineColumns("ct")
-        .baselineValues(1L)
-        .build()
-        .run();
+    assertEquals(123, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testWhereSyntaxQuery() throws Exception {
+    String sql = "select type, src_ip, dst_ip, packet_length from dfs.`store/pcapng/sniff.pcapng` where src_ip= '10.2.15.239'";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("type", MinorType.VARCHAR)
+        .addNullable("src_ip", MinorType.VARCHAR)
+        .addNullable("dst_ip", MinorType.VARCHAR)
+        .add("packet_length", MinorType.INT)
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("UDP", "10.2.15.239", "239.255.255.250", 214)
+        .addRow("UDP", "10.2.15.239", "239.255.255.250", 214)
+        .addRow("UDP", "10.2.15.239", "239.255.255.250", 214)
+        .build();
+
+    assertEquals(3, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testValidHeaders() throws Exception {
+    String sql = "select * from dfs.`store/pcapng/sniff.pcapng`";
+    RowSet sets = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .add("timestamp", MinorType.TIMESTAMP)
+        .add("packet_length", MinorType.INT)
+        .addNullable("type", MinorType.VARCHAR)
+        .addNullable("src_ip", MinorType.VARCHAR)
+        .addNullable("dst_ip", MinorType.VARCHAR)
+        .addNullable("src_port", MinorType.INT)
+        .addNullable("dst_port", MinorType.INT)
+        .addNullable("src_mac_address", MinorType.VARCHAR)
+        .addNullable("dst_mac_address", MinorType.VARCHAR)
+        .addNullable("tcp_session", MinorType.BIGINT)
+        .addNullable("tcp_ack", MinorType.INT)
+        .addNullable("tcp_flags", MinorType.INT)
+        .addNullable("tcp_flags_ns", MinorType.INT)
+        .addNullable("tcp_flags_cwr", MinorType.INT)
+        .addNullable("tcp_flags_ece", MinorType.INT)
+        .addNullable("tcp_flags_ece_ecn_capable", MinorType.INT)
+        .addNullable("tcp_flags_ece_congestion_experienced", MinorType.INT)
+        .addNullable("tcp_flags_urg", MinorType.INT)
+        .addNullable("tcp_flags_ack", MinorType.INT)
+        .addNullable("tcp_flags_psh", MinorType.INT)
+        .addNullable("tcp_flags_rst", MinorType.INT)
+        .addNullable("tcp_flags_syn", MinorType.INT)
+        .addNullable("tcp_flags_fin", MinorType.INT)
+        .addNullable("tcp_parsed_flags", MinorType.VARCHAR)
+        .addNullable("packet_data", MinorType.VARCHAR)
+        .build();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema).build();
+    new RowSetComparison(expected).verifyAndClearAll(sets);
   }
 
   @Test
   public void testGroupBy() throws Exception {
-    Assert.assertEquals(47, testSql("select src_ip, count(1), sum(packet_length) from dfs.`store/pcapng/sniff.pcapng` group by src_ip"));
+    String sql = "select src_ip, count(1), sum(packet_length) from dfs.`store/pcapng/sniff.pcapng` group by src_ip";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(47, sets.rowCount());
+    sets.clear();
   }
 
   @Test
   public void testDistinctQuery() throws Exception {
-    Assert.assertEquals(119, testSql("select distinct `timestamp`, src_ip from dfs.`store/pcapng/sniff.pcapng`"));
-    Assert.assertEquals(1, testSql("select distinct packet_data from dfs.`store/pcapng/example.pcapng`"));
+    String sql = "select distinct `timestamp`, src_ip from dfs.`store/pcapng/sniff.pcapng`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(119, sets.rowCount());
+    sets.clear();
   }
 
   @Test(expected = UserRemoteException.class)
   public void testBasicQueryWithIncorrectFileName() throws Exception {
-    testSql("select * from dfs.`store/pcapng/snaff.pcapng`");
+    String sql = "select * from dfs.`store/pcapng/drill.pcapng`";
+    client.queryBuilder().sql(sql).rowSet();
   }
-
-  @Test
-  public void testPhysicalPlanExecutionBasedOnQuery() throws Exception {
-    String query = "EXPLAIN PLAN for select * from dfs.`store/pcapng/sniff.pcapng`";
-    String plan = getPlanInString(query, JSON_FORMAT);
-    Assert.assertEquals(123, testPhysical(plan));
-  }
-}
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngStatRecordReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngStatRecordReader.java
new file mode 100644
index 0000000..2dbdda7
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcapng/TestPcapngStatRecordReader.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.pcapng;
+
+import static org.junit.Assert.assertEquals;
+
+import java.nio.file.Paths;
+
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.QueryBuilder;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(RowSetTests.class)
+public class TestPcapngStatRecordReader extends ClusterTest {
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+    cluster.defineFormat("dfs", "pcapng", new PcapngFormatConfig(null, true));
+    dirTestWatcher.copyResourceToRoot(Paths.get("store", "pcapng"));
+  }
+
+  @Test
+  public void testStarQuery() throws Exception {
+    String sql = "select * from dfs.`store/pcapng/example.pcapng`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(3, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testExplicitQuery() throws Exception {
+    String sql = "select path, shb_hardware, shb_os, if_name, isb_ifrecv from dfs.`store/pcapng/sniff.pcapng`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("path", MinorType.VARCHAR)
+        .addNullable("shb_hardware", MinorType.VARCHAR)
+        .addNullable("shb_os", MinorType.VARCHAR)
+        .addNullable("if_name", MinorType.VARCHAR)
+        .addNullable("isb_ifrecv", MinorType.BIGINT)
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("sniff.pcapng", "Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz (with SSE4.2)",
+            "Mac OS X 10.13.3, build 17D47 (Darwin 17.4.0)", null, null)
+        .addRow("sniff.pcapng", null, null, "en0", null)
+        .addRow("sniff.pcapng", null, null, null, 123)
+        .build();
+
+    assertEquals(3, sets.rowCount());
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    String sql = "select * from dfs.`store/pcapng/example.pcapng` limit 2";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(2, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    String sql = "select count(*) from dfs.`store/pcapng/*.pcapng`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+
+    assertEquals("Counts should match", 6, cnt);
+  }
+
+  @Test
+  public void testValidHeaders() throws Exception {
+    String sql = "select * from dfs.`store/pcapng/sniff.pcapng`";
+    RowSet sets = client.queryBuilder().sql(sql).rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable("path", MinorType.VARCHAR)
+        .addNullable("shb_hardware", MinorType.VARCHAR)
+        .addNullable("shb_os", MinorType.VARCHAR)
+        .addNullable("shb_userappl", MinorType.VARCHAR)
+        .addNullable("if_name", MinorType.VARCHAR)
+        .addNullable("if_description", MinorType.VARCHAR)
+        .addNullable("if_ipv4addr", MinorType.VARCHAR)
+        .addNullable("if_ipv6addr", MinorType.VARCHAR)
+        .addNullable("if_macaddr", MinorType.VARCHAR)
+        .addNullable("if_euiaddr", MinorType.VARCHAR)
+        .addNullable("if_speed", MinorType.INT)
+        .addNullable("if_tsresol", MinorType.INT)
+        .addNullable("if_tzone", MinorType.INT)
+        .addNullable("if_os", MinorType.VARCHAR)
+        .addNullable("if_fcslen", MinorType.INT)
+        .addNullable("if_tsoffset", MinorType.INT)
+        .addNullable("ns_dnsname", MinorType.VARCHAR)
+        .addNullable("ns_dnsip4addr", MinorType.VARCHAR)
+        .addNullable("ns_dnsip6addr", MinorType.VARCHAR)
+        .addNullable("isb_starttime", MinorType.TIMESTAMP)
+        .addNullable("isb_endtime", MinorType.TIMESTAMP)
+        .addNullable("isb_ifrecv", MinorType.BIGINT)
+        .addNullable("isb_ifdrop", MinorType.BIGINT)
+        .addNullable("isb_filteraccept", MinorType.BIGINT)
+        .addNullable("isb_osdrop", MinorType.BIGINT)
+        .addNullable("isb_usrdeliv", MinorType.BIGINT)
+        .build();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema).build();
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+}
\ No newline at end of file
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
index 7d23b1d..fd72e8e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
@@ -17,32 +17,102 @@
  */
 package org.apache.drill.exec.store.sequencefile;
 
-import java.io.DataOutputStream;
+import static org.junit.Assert.assertEquals;
+
 import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.nio.file.Paths;
 
-import org.junit.Test;
-import org.apache.drill.test.BaseTestQuery;
+import org.apache.drill.categories.RowSetTests;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.rowSet.RowSet;
+import org.apache.drill.exec.physical.rowSet.RowSetBuilder;
+import org.apache.drill.exec.record.metadata.SchemaBuilder;
+import org.apache.drill.exec.record.metadata.TupleMetadata;
+import org.apache.drill.exec.store.easy.sequencefile.SequenceFileBatchReader;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.QueryBuilder;
+import org.apache.drill.test.rowSet.RowSetComparison;
 import org.apache.hadoop.io.BytesWritable;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
-public class TestSequenceFileReader extends BaseTestQuery {
+@Category(RowSetTests.class)
+public class TestSequenceFileReader extends ClusterTest {
 
-  public static String byteWritableString(String input) throws Exception {
+  @BeforeClass
+  public static void setup() throws Exception {
+    ClusterTest.startCluster(ClusterFixture.builder(dirTestWatcher));
+    dirTestWatcher.copyResourceToRoot(Paths.get("sequencefiles/"));
+  }
+
+  @Test
+  public void testStarQuery() throws Exception {
+    String sql = "select * from cp.`sequencefiles/simple.seq`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable(SequenceFileBatchReader.KEY_SCHEMA, MinorType.VARBINARY)
+        .addNullable(SequenceFileBatchReader.VALUE_SCHEMA, MinorType.VARBINARY)
+        .buildSchema();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow("key1".getBytes(), "value1".getBytes())
+        .addRow("key2".getBytes(), "value2".getBytes())
+        .build();
+
+    assertEquals(2, sets.rowCount());
+
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testExplicitQuery() throws Exception {
+    String sql = "select convert_from(binary_key, 'UTF8') as binary_key from cp.`sequencefiles/simple.seq`";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    TupleMetadata schema = new SchemaBuilder()
+        .addNullable(SequenceFileBatchReader.KEY_SCHEMA, MinorType.VARCHAR)
+        .build();
+
+    RowSet expected = new RowSetBuilder(client.allocator(), schema)
+        .addRow(byteWritableString("key0"))
+        .addRow(byteWritableString("key1"))
+        .build();
+
+    assertEquals(2, sets.rowCount());
+
+    new RowSetComparison(expected).verifyAndClearAll(sets);
+  }
+
+  @Test
+  public void testLimitPushdown() throws Exception {
+    String sql = "select * from cp.`sequencefiles/simple.seq` limit 1 offset 1";
+    QueryBuilder builder = client.queryBuilder().sql(sql);
+    RowSet sets = builder.rowSet();
+
+    assertEquals(1, sets.rowCount());
+    sets.clear();
+  }
+
+  @Test
+  public void testSerDe() throws Exception {
+    String sql = "select count(*) from cp.`sequencefiles/simple.seq`";
+    String plan = queryBuilder().sql(sql).explainJson();
+    long cnt = queryBuilder().physical(plan).singletonLong();
+
+    assertEquals("Counts should match", 2, cnt);
+  }
+
+  private static String byteWritableString(String input) throws Exception {
     final ByteArrayOutputStream bout = new ByteArrayOutputStream();
     DataOutputStream out = new DataOutputStream(bout);
     final BytesWritable writable = new BytesWritable(input.getBytes("UTF-8"));
     writable.write(out);
     return new String(bout.toByteArray());
   }
-
-  @Test
-  public void testSequenceFileReader() throws Exception {
-    testBuilder()
-      .sqlQuery("select convert_from(t.binary_key, 'UTF8') as k, convert_from(t.binary_value, 'UTF8') as v " +
-        "from cp.`sequencefiles/simple.seq` t")
-      .ordered()
-      .baselineColumns("k", "v")
-      .baselineValues(byteWritableString("key0"), byteWritableString("value0"))
-      .baselineValues(byteWritableString("key1"), byteWritableString("value1"))
-      .build().run();
-  }
 }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/OperatorFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/OperatorFixture.java
index 6b0992c..2fbba16 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/OperatorFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/OperatorFixture.java
@@ -452,7 +452,7 @@
                                BufferAllocator allocator,
                                PhysicalOperator config) {
       super(fragContext, allocator, config);
-      this.operatorStats = new OperatorStats(new OpProfileDef(0, 0, 100), allocator);
+      this.operatorStats = new OperatorStats(new OpProfileDef(0, "", 100), allocator);
     }
 
     @Override
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/OperatorTestBuilderTest.java b/exec/java-exec/src/test/java/org/apache/drill/test/OperatorTestBuilderTest.java
index 82e30bb..2282e19 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/OperatorTestBuilderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/OperatorTestBuilderTest.java
@@ -145,8 +145,8 @@
     }
 
     @Override
-    public int getOperatorType() {
-      return 0;
+    public String getOperatorType() {
+      return "";
     }
 
     @Override
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/PhysicalOpUnitTestBase.java b/exec/java-exec/src/test/java/org/apache/drill/test/PhysicalOpUnitTestBase.java
index ecc3918..a329b53 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/PhysicalOpUnitTestBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/PhysicalOpUnitTestBase.java
@@ -330,8 +330,8 @@
     }
 
     @Override
-    public int getOperatorType() {
-      return 0;
+    public String getOperatorType() {
+      return "";
     }
 
     @Override
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java b/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
index e4dcd98..88ba9a0 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ProfileParser.java
@@ -26,6 +26,7 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -36,8 +37,7 @@
 import javax.json.JsonReader;
 import javax.json.JsonValue;
 
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
-
+import org.apache.drill.exec.server.rest.profile.CoreOperatorType;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -396,8 +396,7 @@
           logger.info("Can't find operator def: {}-{}", major.id, op.opId);
           continue;
         }
-        op.opName = CoreOperatorType.forNumber(op.type).name();
-        op.opName = op.opName.replace("_", " ");
+        op.opName = op.type.replace("_", " ");
         op.name = opDef.name;
         if (op.name.equalsIgnoreCase(op.opName)) {
           op.opName = null;
@@ -422,7 +421,7 @@
     public int majorFragId;
     public int minorFragId;
     public int opId;
-    public int type;
+    public String type;
     public String name;
     public long processMs;
     public long waitMs;
@@ -437,7 +436,10 @@
       majorFragId = majorId;
       minorFragId = minorId;
       opId = opProfile.getInt("operatorId");
-      type = opProfile.getInt("operatorType");
+      JsonValue.ValueType valueType = opProfile.get("operatorType").getValueType();
+      type = valueType == JsonValue.ValueType.STRING
+          ? opProfile.getString("operatorType")
+          : Objects.requireNonNull(CoreOperatorType.valueOf(opProfile.getInt("operatorType"))).name();
       processMs = opProfile.getJsonNumber("processNanos").longValue() / 1_000_000;
       waitMs = opProfile.getJsonNumber("waitNanos").longValue() / 1_000_000;
       setupMs = opProfile.getJsonNumber("setupNanos").longValue() / 1_000_000;
@@ -469,7 +471,7 @@
 
     @Override
     public String toString() {
-      return String.format("[OperatorProfile %02d-%02d-%02d, type: %d, name: %s]",
+      return String.format("[OperatorProfile %02d-%02d-%02d, type: %s, name: %s]",
           majorFragId, opId, minorFragId, type,
           (name == null) ? "null" : name);
     }
@@ -489,7 +491,7 @@
    */
 
   public static class OperatorSummary {
-    public int type;
+    public String type;
     public long processMs;
     public long setupMs;
     public int execCount;
@@ -697,9 +699,9 @@
   public static class FindOpVisitor extends TreeVisitor
   {
     private List<OperatorSummary> ops;
-    private int type;
+    private String type;
 
-    public List<OperatorSummary> find(int type, OperatorSummary node) {
+    public List<OperatorSummary> find(String type, OperatorSummary node) {
       ops = new ArrayList<>();
       this.type = type;
       visit(node);
@@ -708,7 +710,7 @@
 
     @Override
     protected void visitOp(OperatorSummary node, int indentLevel) {
-      if (node.type == type) {
+      if (type.equals(node.type)) {
         ops.add(node);
       }
     }
@@ -738,12 +740,11 @@
   /**
    * For a single-slice query, get all operators of a given numeric operator
    * type.
-   * @param type the operator type as specified in
-   * {@link org.apache.drill.exec.proto.UserBitShared.CoreOperatorType}
+   * @param type the operator type
    * @return a list of operators of the given type
    */
 
-  public List<OperatorProfile> getOpsOfType(int type) {
+  public List<OperatorProfile> getOpsOfType(String type) {
     List<OperatorProfile> ops = new ArrayList<>();
     List<OperatorSummary> opDefs = getOpDefsOfType(type);
     for (OperatorSummary opDef : opDefs) {
@@ -752,7 +753,7 @@
     return ops;
   }
 
-  public List<OperatorSummary> getOpDefsOfType(int type) {
+  public List<OperatorSummary> getOpDefsOfType(String type) {
     return new FindOpVisitor().find(type, topoOrder.get(0));
   }
 
@@ -891,7 +892,7 @@
       logger.info("Op: {} {}", op.opId, op.name);
       logger.info("Setup:   {} - {}%, {}%", op.setupMs, percent(op.setupMs, totalSetup), percent(op.setupMs, total));
       logger.info("Process: {} - {}%, {}%", op.processMs, percent(op.processMs, totalProcess), percent(op.processMs, total));
-      if (op.type == 17) {
+      if (op.type.equals("EXTERNAL_SORT")) {
         long value = op.getMetric(0);
         logger.info("  Spills: {}", value);
       }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
index f084091..86e9e63 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
@@ -347,6 +347,12 @@
 
       // Double must be handled specially since BigDecimal cannot handle
       // INF or NAN double values.
+      case FLOAT:
+        assertEquals(label, ec.getFloat(), ac.getFloat(), delta);
+        break;
+
+      // Double must be handled specially since BigDecimal cannot handle
+      // INF or NAN double values.
       case DOUBLE:
         assertEquals(label, ec.getDouble(), ac.getDouble(), delta);
         break;
@@ -367,6 +373,8 @@
     switch (scalarReader.valueType()) {
       case BYTES:
         return ByteBuffer.wrap(scalarReader.getBytes());
+      case FLOAT:
+        return new BigDecimal(scalarReader.getFloat(), this.scale).stripTrailingZeros();
       case DOUBLE:
         return new BigDecimal(scalarReader.getDouble(), this.scale).stripTrailingZeros();
       default:
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetUtilities.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetUtilities.java
index 925130f..95758d1 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetUtilities.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetUtilities.java
@@ -88,6 +88,8 @@
     switch (valueType) {
     case BYTES:
       return Integer.toHexString(value).getBytes();
+    case FLOAT:
+      return (float) value;
     case DOUBLE:
       return (double) value;
     case INTEGER:
@@ -165,6 +167,9 @@
         assertTrue(msg, Arrays.equals(expected, actual));
         break;
      }
+      case FLOAT:
+       assertEquals(msg, (float) expectedObj, (float) actualObj, 0.0001);
+       break;
      case DOUBLE:
        assertEquals(msg, (double) expectedObj, (double) actualObj, 0.0001);
        break;
diff --git a/exec/java-exec/src/test/resources/plugins/mock-plugin-upgrade.json b/exec/java-exec/src/test/resources/plugins/mock-plugin-upgrade.json
index ad39fa1..f2ee756 100644
--- a/exec/java-exec/src/test/resources/plugins/mock-plugin-upgrade.json
+++ b/exec/java-exec/src/test/resources/plugins/mock-plugin-upgrade.json
@@ -26,11 +26,6 @@
           "extensions" : [ "tsv" ],
           "delimiter" : "\t"
         },
-        "httpd" : {
-          "type" : "httpd",
-          "logFormat" : "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
-          "timestampFormat" : "dd/MMM/yyyy:HH:mm:ss ZZ"
-        },
         "parquet" : {
           "type" : "parquet"
         },
@@ -57,10 +52,6 @@
           "extensions" : [ "csvh" ],
           "delimiter" : ",",
           "extractHeader" : true
-        },
-        "image" : {
-          "type" : "image",
-          "extensions" : [ "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f" ]
         }
       },
       "enabled" : true
@@ -152,11 +143,6 @@
           "extensions" : [ "tsv" ],
           "delimiter" : "\t"
         },
-        "httpd" : {
-          "type" : "httpd",
-          "logFormat" : "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"",
-          "timestampFormat" : "dd/MMM/yyyy:HH:mm:ss ZZ"
-        },
         "parquet" : {
           "type" : "parquet"
         },
@@ -183,10 +169,6 @@
           "extensions" : [ "csvh" ],
           "delimiter" : ",",
           "extractHeader" : true
-        },
-        "image" : {
-          "type" : "image",
-          "extensions" : [ "jpg", "jpeg", "jpe", "tif", "tiff", "dng", "psd", "png", "bmp", "gif", "ico", "pcx", "wav", "wave", "avi", "webp", "mov", "mp4", "m4a", "m4p", "m4b", "m4r", "m4v", "3gp", "3g2", "eps", "epsf", "epsi", "ai", "arw", "crw", "cr2", "nef", "orf", "raf", "rw2", "rwl", "srw", "x3f" ]
         }
       },
       "enabled" : true
diff --git a/exec/java-exec/src/test/resources/store/httpd/dfs-test-bootstrap-test.httpd b/exec/java-exec/src/test/resources/store/httpd/dfs-test-bootstrap-test.httpd
deleted file mode 100644
index d48fa12..0000000
--- a/exec/java-exec/src/test/resources/store/httpd/dfs-test-bootstrap-test.httpd
+++ /dev/null
@@ -1,5 +0,0 @@
-195.154.46.135 - - [25/Oct/2015:04:11:25 +0100] "GET /linux/doing-pxe-without-dhcp-control HTTP/1.1" 200 24323 "http://howto.basjes.nl/" "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0"
-23.95.237.180 - - [25/Oct/2015:04:11:26 +0100] "GET /join_form HTTP/1.0" 200 11114 "http://howto.basjes.nl/" "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0"
-23.95.237.180 - - [25/Oct/2015:04:11:27 +0100] "POST /join_form HTTP/1.1" 302 9093 "http://howto.basjes.nl/join_form" "Mozilla/5.0 (Windows NT 5.1; rv:35.0) Gecko/20100101 Firefox/35.0"
-158.222.5.157 - - [25/Oct/2015:04:24:31 +0100] "GET /join_form HTTP/1.0" 200 11114 "http://howto.basjes.nl/" "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21"
-158.222.5.157 - - [25/Oct/2015:04:24:32 +0100] "POST /join_form HTTP/1.1" 302 9093 "http://howto.basjes.nl/join_form" "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0 AlexaToolbar/alxf-2.21"
diff --git a/exec/jdbc-all/pom.xml b/exec/jdbc-all/pom.xml
index 81e2b9f..403a30f 100644
--- a/exec/jdbc-all/pom.xml
+++ b/exec/jdbc-all/pom.xml
@@ -27,7 +27,7 @@
   </parent>
 
   <artifactId>drill-jdbc-all</artifactId>
-  <name>JDBC JAR with all dependencies</name>
+  <name>Drill : Exec : JDBC JAR with all dependencies</name>
 
   <!-- Since we are packaging hadoop dependencies under the namespace with "oadd." prefix by default,
        "package.namespace.prefix" equals to "oadd.". It can be overridden if necessary within any profile -->
@@ -165,10 +165,6 @@
           <artifactId>libpam4j</artifactId>
         </exclusion>
         <exclusion>
-          <artifactId>metadata-extractor</artifactId>
-          <groupId>com.drewnoakes</groupId>
-        </exclusion>
-        <exclusion>
           <groupId>sqlline</groupId>
           <artifactId>sqlline</artifactId>
         </exclusion>
diff --git a/exec/jdbc/pom.xml b/exec/jdbc/pom.xml
index 2462ff4..025bcf5 100644
--- a/exec/jdbc/pom.xml
+++ b/exec/jdbc/pom.xml
@@ -26,7 +26,7 @@
     <version>1.19.0-SNAPSHOT</version>
   </parent>
   <artifactId>drill-jdbc</artifactId>
-  <name>exec/JDBC Driver using dependencies</name>
+  <name>Drill : Exec : JDBC Driver using dependencies</name>
 
   <dependencies>
     <dependency>
diff --git a/exec/memory/base/pom.xml b/exec/memory/base/pom.xml
index c5a49e0..471ebeb 100644
--- a/exec/memory/base/pom.xml
+++ b/exec/memory/base/pom.xml
@@ -26,7 +26,7 @@
     <version>1.19.0-SNAPSHOT</version>
   </parent>
   <artifactId>drill-memory-base</artifactId>
-  <name>exec/memory/base</name>
+  <name>Drill : Exec : Memory : Base</name>
 
   <dependencies>
 
diff --git a/exec/memory/pom.xml b/exec/memory/pom.xml
index 5ef7168..aa945d0 100644
--- a/exec/memory/pom.xml
+++ b/exec/memory/pom.xml
@@ -29,7 +29,7 @@
   <groupId>org.apache.drill.memory</groupId>
   <artifactId>memory-parent</artifactId>
   <packaging>pom</packaging>
-  <name>exec/memory/Parent Pom</name>
+  <name>Drill : Exec : Memory : </name>
 
   <modules>
     <module>base</module>
diff --git a/exec/pom.xml b/exec/pom.xml
index b64a1b8..28feef1 100644
--- a/exec/pom.xml
+++ b/exec/pom.xml
@@ -29,7 +29,7 @@
   <groupId>org.apache.drill.exec</groupId>
   <artifactId>exec-parent</artifactId>
   <packaging>pom</packaging>
-  <name>exec/Parent Pom</name>
+  <name>Drill : Exec : </name>
 
 
   <profiles>
diff --git a/exec/rpc/pom.xml b/exec/rpc/pom.xml
index 3781af7..bd94010 100644
--- a/exec/rpc/pom.xml
+++ b/exec/rpc/pom.xml
@@ -26,7 +26,7 @@
     <version>1.19.0-SNAPSHOT</version>
   </parent>
   <artifactId>drill-rpc</artifactId>
-  <name>exec/rpc</name>
+  <name>Drill : Exec : RPC</name>
 
   <dependencies>
 
diff --git a/exec/vector/pom.xml b/exec/vector/pom.xml
index 3ade503..e034ed2 100644
--- a/exec/vector/pom.xml
+++ b/exec/vector/pom.xml
@@ -26,7 +26,7 @@
     <version>1.19.0-SNAPSHOT</version>
   </parent>
   <artifactId>vector</artifactId>
-  <name>exec/Vectors</name>
+  <name>Drill : Exec : Vectors</name>
 
   <dependencies>
 
diff --git a/exec/vector/src/main/codegen/data/ValueVectorTypes.tdd b/exec/vector/src/main/codegen/data/ValueVectorTypes.tdd
index 50a4258..d30cf7f 100644
--- a/exec/vector/src/main/codegen/data/ValueVectorTypes.tdd
+++ b/exec/vector/src/main/codegen/data/ValueVectorTypes.tdd
@@ -68,7 +68,7 @@
       minor: [
         { class: "Int", valueHolder: "IntHolder"},
         { class: "UInt4", valueHolder: "UInt4Holder" },
-        { class: "Float4", javaType: "float" , boxedType: "Float", accessorType: "double", accessorCast: "set",
+        { class: "Float4", javaType: "float" , boxedType: "Float", accessorType: "float", accessorCast: "set",
           fields: [{name: "value", type: "float"}]},
         { class: "Time", javaType: "int", friendlyType: "LocalTime", accessorType: "int" },
         { class: "IntervalYear", javaType: "int", friendlyType: "Period" }
diff --git a/exec/vector/src/main/codegen/templates/ColumnAccessors.java b/exec/vector/src/main/codegen/templates/ColumnAccessors.java
index c6dc033..06a3d4c 100644
--- a/exec/vector/src/main/codegen/templates/ColumnAccessors.java
+++ b/exec/vector/src/main/codegen/templates/ColumnAccessors.java
@@ -448,6 +448,13 @@
     }
 
     @Override
+    public final void setFloat(final float value) {
+      // Does not catch overflow from
+      // double. See Math.round for details.
+      setLong(Math.round(value));
+    }
+
+    @Override
     public final void setDouble(final double value) {
       // Does not catch overflow from
       // double. See Math.round for details.
@@ -463,7 +470,7 @@
         throw InvalidConversionError.writeError(schema(), value, e);
       }
     }
-    <#elseif drillType == "Float4" || drillType == "Float8">
+    <#elseif drillType == "Float8">
 
     @Override
     public final void setInt(final int value) {
@@ -476,9 +483,35 @@
     }
 
     @Override
+    public final void setFloat(final float value) {
+      setDouble(value);
+    }
+
+    @Override
     public final void setDecimal(final BigDecimal value) {
       setDouble(value.doubleValue());
     }
+    <#elseif drillType == "Float4">
+
+    @Override
+    public final void setInt(final int value) {
+      setFloat(value);
+    }
+
+    @Override
+    public final void setLong(final long value) {
+      setFloat(value);
+    }
+
+    @Override
+    public final void setDouble(final double value) {
+      setFloat((float) value);
+    }
+
+    @Override
+    public final void setDecimal(final BigDecimal value) {
+      setFloat(value.floatValue());
+    }
     <#elseif decimal>
 
     @Override
@@ -495,6 +528,11 @@
     public final void setDouble(final double value) {
       setDecimal(BigDecimal.valueOf(value));
     }
+
+    @Override
+    public final void setFloat(final float value) {
+      setDecimal(BigDecimal.valueOf(value));
+    }
       <#if drillType == "VarDecimal">
 
     @Override
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/PrimitiveColumnMetadata.java b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/PrimitiveColumnMetadata.java
index ad46473..256f55c 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/PrimitiveColumnMetadata.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/PrimitiveColumnMetadata.java
@@ -249,7 +249,7 @@
         case BIGINT:
           return Long.parseLong(value);
         case FLOAT4:
-          return (double) Float.parseFloat(value);
+          return Float.parseFloat(value);
         case FLOAT8:
           return Double.parseDouble(value);
         case VARDECIMAL:
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ScalarReader.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ScalarReader.java
index 30d0c3f..8758e55 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ScalarReader.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ScalarReader.java
@@ -80,6 +80,7 @@
   int getInt();
   boolean getBoolean();
   long getLong();
+  float getFloat();
   double getDouble();
   String getString();
   byte[] getBytes();
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueType.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueType.java
index 0dde3ba..00a2ec8 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueType.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueType.java
@@ -49,6 +49,11 @@
   LONG,
 
   /**
+   * Type is set from a float: FLOAT4.
+   */
+  FLOAT,
+
+  /**
    * Type is set from a double: FLOAT4 and FLOAT8.
    */
   DOUBLE,
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueWriter.java
index 56105f9..1ec1fd1 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/ValueWriter.java
@@ -45,6 +45,7 @@
   void setBoolean(boolean value);
   void setInt(int value);
   void setLong(long value);
+  void setFloat(float value);
   void setDouble(double value);
   void setString(String value);
   void appendBytes(byte[] value, int len);
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/reader/AbstractScalarReader.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/reader/AbstractScalarReader.java
index 1fe9c69..e5e221e 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/reader/AbstractScalarReader.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/reader/AbstractScalarReader.java
@@ -143,6 +143,11 @@
   }
 
   @Override
+  public float getFloat() {
+    throw conversionError("double");
+  }
+
+  @Override
   public double getDouble() {
     throw conversionError("double");
   }
@@ -194,6 +199,8 @@
       return getBytes();
     case DECIMAL:
       return getDecimal();
+    case FLOAT:
+      return getFloat();
     case DOUBLE:
       return getDouble();
     case INTEGER:
@@ -240,6 +247,8 @@
     switch (extendedType()) {
     case BYTES:
       return AccessorUtilities.bytesToString(getBytes());
+    case FLOAT:
+      return Double.toString(getFloat());
     case DOUBLE:
       return Double.toString(getDouble());
     case INTEGER:
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractFixedWidthWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractFixedWidthWriter.java
index 58fbf8e..5e5fc1e 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractFixedWidthWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractFixedWidthWriter.java
@@ -138,6 +138,17 @@
     }
 
     @Override
+    public final void setFloat(final float value) {
+      try {
+        // Catches int overflow. Does not catch overflow from
+        // double. See Math.round for details.
+        setInt(Math.toIntExact(Math.round(value)));
+      } catch (final ArithmeticException e) {
+        throw InvalidConversionError.writeError(schema(), value, e);
+      }
+    }
+
+    @Override
     public final void setDouble(final double value) {
       try {
         // Catches int overflow. Does not catch overflow from
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractScalarWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractScalarWriter.java
index 5a0e8ba..43426b9 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractScalarWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractScalarWriter.java
@@ -18,6 +18,7 @@
 package org.apache.drill.exec.vector.accessor.writer;
 
 import java.math.BigDecimal;
+import java.util.Date;
 
 import org.apache.drill.exec.vector.accessor.ScalarWriter;
 import org.apache.drill.exec.vector.accessor.UnsupportedConversionError;
@@ -50,7 +51,7 @@
     } else if (value instanceof Double) {
       setDouble((Double) value);
     } else if (value instanceof Float) {
-      setDouble((Float) value);
+      setFloat((Float) value);
     } else if (value instanceof BigDecimal) {
       setDecimal((BigDecimal) value);
     } else if (value instanceof Period) {
@@ -61,6 +62,8 @@
       setDate((LocalDate) value);
     } else if (value instanceof Instant) {
       setTimestamp((Instant) value);
+    } else if (value instanceof Date) {
+      setTimestamp(Instant.ofEpochMilli(((Date) value).getTime()));
     } else if (value instanceof byte[]) {
       final byte[] bytes = (byte[]) value;
       setBytes(bytes, bytes.length);
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractTupleWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractTupleWriter.java
index 9cb674e..16155f4 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractTupleWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/AbstractTupleWriter.java
@@ -139,7 +139,7 @@
    * tuple writer. If no listener is bound, then an attempt to add a column
    * throws an exception.
    */
-  public static interface TupleWriterListener {
+  public interface TupleWriterListener {
 
     ObjectWriter addColumn(TupleWriter tuple, ColumnMetadata column);
 
@@ -190,8 +190,9 @@
     this.writers = writers;
   }
 
+  @SuppressWarnings("unused")
   protected AbstractTupleWriter(TupleMetadata schema) {
-    this(schema, new ArrayList<AbstractObjectWriter>());
+    this(schema, new ArrayList<>());
   }
 
   @Override
@@ -201,8 +202,8 @@
     vectorIndex = index;
     this.childIndex = childIndex;
 
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().bindIndex(childIndex);
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().bindIndex(childIndex);
     }
   }
 
@@ -239,8 +240,7 @@
 
   @Override
   public boolean isProjected(String columnName) {
-    return listener == null ? true
-        : listener.isProjected(columnName);
+    return listener == null || listener.isProjected(columnName);
   }
 
   @Override
@@ -288,8 +288,8 @@
   public void startWrite() {
     assert state == State.IDLE;
     state = State.IN_WRITE;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().startWrite();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().startWrite();
     }
   }
 
@@ -300,16 +300,16 @@
 
     assert state == State.IN_WRITE;
     state = State.IN_ROW;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().startRow();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().startRow();
     }
   }
 
   @Override
   public void endArrayValue() {
     assert state == State.IN_ROW;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().endArrayValue();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().endArrayValue();
     }
   }
 
@@ -325,16 +325,16 @@
     // the current row.
 
     assert state == State.IN_ROW;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().restartRow();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().restartRow();
     }
   }
 
   @Override
   public void saveRow() {
     assert state == State.IN_ROW;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().saveRow();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().saveRow();
     }
     state = State.IN_WRITE;
   }
@@ -345,8 +345,8 @@
     // Rollover can only happen while a row is in progress.
 
     assert state == State.IN_ROW;
-    for (int i = 0; i < writers.size(); i++) {
-      writers.get(i).events().preRollover();
+    for (AbstractObjectWriter writer : writers) {
+      writer.events().preRollover();
     }
   }
 
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/BaseScalarWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/BaseScalarWriter.java
index 56af1dc..7938b29 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/BaseScalarWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/BaseScalarWriter.java
@@ -241,6 +241,11 @@
   }
 
   @Override
+  public void setFloat(float value) {
+    throw conversionError("float");
+  }
+
+  @Override
   public void setDouble(double value) {
     throw conversionError("double");
   }
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/NullableScalarWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/NullableScalarWriter.java
index 4d5353d..02343df 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/NullableScalarWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/NullableScalarWriter.java
@@ -155,6 +155,13 @@
   }
 
   @Override
+  public void setFloat(float value) {
+    baseWriter.setFloat(value);
+    isSetWriter.setInt(1);
+    writerIndex.nextElement();
+  }
+
+  @Override
   public void setDouble(double value) {
     baseWriter.setDouble(value);
     isSetWriter.setInt(1);
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/dummy/DummyScalarWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/dummy/DummyScalarWriter.java
index 85a7fe1..5d738c3 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/dummy/DummyScalarWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/dummy/DummyScalarWriter.java
@@ -64,6 +64,9 @@
   public void setLong(long value) { }
 
   @Override
+  public void setFloat(float value) { }
+
+  @Override
   public void setDouble(double value) { }
 
   @Override
diff --git a/logical/pom.xml b/logical/pom.xml
index 31000b8..494c6d9 100644
--- a/logical/pom.xml
+++ b/logical/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-logical</artifactId>
   <packaging>jar</packaging>
-  <name>Logical Plan, Base expressions</name>
+  <name>Drill : Logical Plan</name>
 
   <dependencies>
     <dependency>
diff --git a/metastore/iceberg-metastore/pom.xml b/metastore/iceberg-metastore/pom.xml
index ba62212..85074d7 100644
--- a/metastore/iceberg-metastore/pom.xml
+++ b/metastore/iceberg-metastore/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-iceberg-metastore</artifactId>
-  <name>metastore/Drill Iceberg Metastore</name>
+  <name>Drill : Metastore : Iceberg</name>
 
   <properties>
     <iceberg.version>93d51b9</iceberg.version>
diff --git a/metastore/metastore-api/pom.xml b/metastore/metastore-api/pom.xml
index 0b09e84..b184f26 100644
--- a/metastore/metastore-api/pom.xml
+++ b/metastore/metastore-api/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-metastore-api</artifactId>
-  <name>metastore/Drill Metastore API</name>
+  <name>Drill : Metastore : API</name>
 
   <dependencies>
     <dependency>
diff --git a/metastore/pom.xml b/metastore/pom.xml
index f25745f..b00621f 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -30,7 +30,7 @@
   <groupId>org.apache.drill.metastore</groupId>
   <artifactId>metastore-parent</artifactId>
   <packaging>pom</packaging>
-  <name>metastore/Parent Pom</name>
+  <name>Drill : Metastore : </name>
 
   <dependencies>
     <dependency>
diff --git a/metastore/rdbms-metastore/pom.xml b/metastore/rdbms-metastore/pom.xml
index adbccde..bb18a43 100644
--- a/metastore/rdbms-metastore/pom.xml
+++ b/metastore/rdbms-metastore/pom.xml
@@ -28,7 +28,7 @@
   </parent>
 
   <artifactId>drill-rdbms-metastore</artifactId>
-  <name>metastore/Drill RDBMS Metastore</name>
+  <name>Drill : Metastore : RDBMS</name>
 
   <properties>
     <jooq.version>3.13.1</jooq.version>
diff --git a/pom.xml b/pom.xml
index 0a98738..105d509 100644
--- a/pom.xml
+++ b/pom.xml
@@ -33,7 +33,7 @@
   <version>1.19.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
-  <name>Apache Drill Root POM</name>
+  <name>Drill : </name>
   <description>Apache Drill is an open source, low latency SQL query engine for Hadoop and NoSQL.</description>
   <url>http://drill.apache.org/</url>
 
@@ -58,7 +58,7 @@
       avoid_bad_dependencies plugin found in the file.
     -->
     <calcite.groupId>com.github.vvysotskyi.drill-calcite</calcite.groupId>
-    <calcite.version>1.21.0-drill-r0</calcite.version>
+    <calcite.version>1.21.0-drill-r1</calcite.version>
     <avatica.version>1.15.0</avatica.version>
     <janino.version>3.0.11</janino.version>
     <sqlline.version>1.9.0</sqlline.version>
@@ -127,6 +127,8 @@
     <xerces.version>2.12.0</xerces.version>
     <commons.configuration.version>1.10</commons.configuration.version>
     <commons.beanutils.version>1.9.4</commons.beanutils.version>
+    <httpdlog-parser.version>5.7</httpdlog-parser.version>
+    <yauaa.version>5.20</yauaa.version>
   </properties>
 
   <scm>
@@ -221,6 +223,16 @@
     <url>https://issues.apache.org/jira/browse/DRILL</url>
   </issueManagement>
 
+  <reporting>
+    <plugins>
+      <plugin>
+        <groupId>org.owasp</groupId>
+        <artifactId>dependency-check-maven</artifactId>
+        <version>6.0.4</version>
+      </plugin>
+    </plugins>
+  </reporting>
+
   <build>
     <plugins>
       <plugin>
@@ -386,7 +398,6 @@
             <exclude>**/*.csvh-test</exclude>
             <exclude>**/*.tsv</exclude>
             <exclude>**/*.txt</exclude>
-            <exclude>**/*.yaml</exclude>
             <exclude>**/*.ssv</exclude>
             <exclude>**/.buildpath</exclude>
             <exclude>**/target/**</exclude>
@@ -408,6 +419,7 @@
             <exclude>**/ssl/*.p12</exclude>
             <exclude>**/*.tbl</exclude>
             <exclude>**/*.httpd</exclude>
+            <exclude>**/*.access_log</exclude>
             <exclude>**/*.autotools</exclude>
             <exclude>**/*.cproject</exclude>
             <exclude>**/*.drill</exclude>
@@ -686,7 +698,6 @@
               <exclude>**/*.md</exclude>
               <exclude>**/*.eps</exclude>
               <exclude>**/*.json</exclude>
-              <exclude>**/*.yaml</exclude>
               <exclude>**/*.seq</exclude>
               <exclude>**/*.parquet</exclude>
               <exclude>**/*.avro</exclude>
diff --git a/protocol/pom.xml b/protocol/pom.xml
index 9577f9d..e2075b4 100644
--- a/protocol/pom.xml
+++ b/protocol/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-protocol</artifactId>
   <packaging>jar</packaging>
-  <name>Drill Protocol</name>
+  <name>Drill : Protocol</name>
 
   <dependencies>
     <dependency>
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
index 5aeff0f..2437fb6 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/SchemaUserBitShared.java
@@ -2377,6 +2377,8 @@
 
                 if(message.hasWaitNanos())
                     output.writeInt64(9, message.getWaitNanos(), false);
+                if(message.hasOperatorTypeName())
+                    output.writeString(10, message.getOperatorTypeName(), false);
             }
             public boolean isInitialized(org.apache.drill.exec.proto.UserBitShared.OperatorProfile message)
             {
@@ -2442,6 +2444,9 @@
                         case 9:
                             builder.setWaitNanos(input.readInt64());
                             break;
+                        case 10:
+                            builder.setOperatorTypeName(input.readString());
+                            break;
                         default:
                             input.handleUnknownField(number, this);
                     }
@@ -2490,6 +2495,7 @@
                 case 7: return "peakLocalMemoryAllocated";
                 case 8: return "metric";
                 case 9: return "waitNanos";
+                case 10: return "operatorTypeName";
                 default: return null;
             }
         }
@@ -2509,6 +2515,7 @@
             fieldMap.put("peakLocalMemoryAllocated", 7);
             fieldMap.put("metric", 8);
             fieldMap.put("waitNanos", 9);
+            fieldMap.put("operatorTypeName", 10);
         }
     }
 
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
index 01a51f0..4254143 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
@@ -409,723 +409,6 @@
   }
 
   /**
-   * Protobuf enum {@code exec.shared.CoreOperatorType}
-   */
-  public enum CoreOperatorType
-      implements com.google.protobuf.ProtocolMessageEnum {
-    /**
-     * <code>SINGLE_SENDER = 0;</code>
-     */
-    SINGLE_SENDER(0),
-    /**
-     * <code>BROADCAST_SENDER = 1;</code>
-     */
-    BROADCAST_SENDER(1),
-    /**
-     * <code>FILTER = 2;</code>
-     */
-    FILTER(2),
-    /**
-     * <code>HASH_AGGREGATE = 3;</code>
-     */
-    HASH_AGGREGATE(3),
-    /**
-     * <code>HASH_JOIN = 4;</code>
-     */
-    HASH_JOIN(4),
-    /**
-     * <code>MERGE_JOIN = 5;</code>
-     */
-    MERGE_JOIN(5),
-    /**
-     * <code>HASH_PARTITION_SENDER = 6;</code>
-     */
-    HASH_PARTITION_SENDER(6),
-    /**
-     * <code>LIMIT = 7;</code>
-     */
-    LIMIT(7),
-    /**
-     * <code>MERGING_RECEIVER = 8;</code>
-     */
-    MERGING_RECEIVER(8),
-    /**
-     * <code>ORDERED_PARTITION_SENDER = 9;</code>
-     */
-    ORDERED_PARTITION_SENDER(9),
-    /**
-     * <code>PROJECT = 10;</code>
-     */
-    PROJECT(10),
-    /**
-     * <code>UNORDERED_RECEIVER = 11;</code>
-     */
-    UNORDERED_RECEIVER(11),
-    /**
-     * <code>RANGE_PARTITION_SENDER = 12;</code>
-     */
-    RANGE_PARTITION_SENDER(12),
-    /**
-     * <code>SCREEN = 13;</code>
-     */
-    SCREEN(13),
-    /**
-     * <code>SELECTION_VECTOR_REMOVER = 14;</code>
-     */
-    SELECTION_VECTOR_REMOVER(14),
-    /**
-     * <code>STREAMING_AGGREGATE = 15;</code>
-     */
-    STREAMING_AGGREGATE(15),
-    /**
-     * <code>TOP_N_SORT = 16;</code>
-     */
-    TOP_N_SORT(16),
-    /**
-     * <code>EXTERNAL_SORT = 17;</code>
-     */
-    EXTERNAL_SORT(17),
-    /**
-     * <code>TRACE = 18;</code>
-     */
-    TRACE(18),
-    /**
-     * <code>UNION = 19;</code>
-     */
-    UNION(19),
-    /**
-     * <code>OLD_SORT = 20;</code>
-     */
-    OLD_SORT(20),
-    /**
-     * <code>PARQUET_ROW_GROUP_SCAN = 21;</code>
-     */
-    PARQUET_ROW_GROUP_SCAN(21),
-    /**
-     * <code>HIVE_SUB_SCAN = 22;</code>
-     */
-    HIVE_SUB_SCAN(22),
-    /**
-     * <code>SYSTEM_TABLE_SCAN = 23;</code>
-     */
-    SYSTEM_TABLE_SCAN(23),
-    /**
-     * <code>MOCK_SUB_SCAN = 24;</code>
-     */
-    MOCK_SUB_SCAN(24),
-    /**
-     * <code>PARQUET_WRITER = 25;</code>
-     */
-    PARQUET_WRITER(25),
-    /**
-     * <code>DIRECT_SUB_SCAN = 26;</code>
-     */
-    DIRECT_SUB_SCAN(26),
-    /**
-     * <code>TEXT_WRITER = 27;</code>
-     */
-    TEXT_WRITER(27),
-    /**
-     * <code>TEXT_SUB_SCAN = 28;</code>
-     */
-    TEXT_SUB_SCAN(28),
-    /**
-     * <code>JSON_SUB_SCAN = 29;</code>
-     */
-    JSON_SUB_SCAN(29),
-    /**
-     * <code>INFO_SCHEMA_SUB_SCAN = 30;</code>
-     */
-    INFO_SCHEMA_SUB_SCAN(30),
-    /**
-     * <code>COMPLEX_TO_JSON = 31;</code>
-     */
-    COMPLEX_TO_JSON(31),
-    /**
-     * <code>PRODUCER_CONSUMER = 32;</code>
-     */
-    PRODUCER_CONSUMER(32),
-    /**
-     * <code>HBASE_SUB_SCAN = 33;</code>
-     */
-    HBASE_SUB_SCAN(33),
-    /**
-     * <code>WINDOW = 34;</code>
-     */
-    WINDOW(34),
-    /**
-     * <code>NESTED_LOOP_JOIN = 35;</code>
-     */
-    NESTED_LOOP_JOIN(35),
-    /**
-     * <code>AVRO_SUB_SCAN = 36;</code>
-     */
-    AVRO_SUB_SCAN(36),
-    /**
-     * <code>PCAP_SUB_SCAN = 37;</code>
-     */
-    PCAP_SUB_SCAN(37),
-    /**
-     * <code>KAFKA_SUB_SCAN = 38;</code>
-     */
-    KAFKA_SUB_SCAN(38),
-    /**
-     * <code>KUDU_SUB_SCAN = 39;</code>
-     */
-    KUDU_SUB_SCAN(39),
-    /**
-     * <code>FLATTEN = 40;</code>
-     */
-    FLATTEN(40),
-    /**
-     * <code>LATERAL_JOIN = 41;</code>
-     */
-    LATERAL_JOIN(41),
-    /**
-     * <code>UNNEST = 42;</code>
-     */
-    UNNEST(42),
-    /**
-     * <code>HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN = 43;</code>
-     */
-    HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN(43),
-    /**
-     * <code>JDBC_SCAN = 44;</code>
-     */
-    JDBC_SCAN(44),
-    /**
-     * <code>REGEX_SUB_SCAN = 45;</code>
-     */
-    REGEX_SUB_SCAN(45),
-    /**
-     * <code>MAPRDB_SUB_SCAN = 46;</code>
-     */
-    MAPRDB_SUB_SCAN(46),
-    /**
-     * <code>MONGO_SUB_SCAN = 47;</code>
-     */
-    MONGO_SUB_SCAN(47),
-    /**
-     * <code>KUDU_WRITER = 48;</code>
-     */
-    KUDU_WRITER(48),
-    /**
-     * <code>OPEN_TSDB_SUB_SCAN = 49;</code>
-     */
-    OPEN_TSDB_SUB_SCAN(49),
-    /**
-     * <code>JSON_WRITER = 50;</code>
-     */
-    JSON_WRITER(50),
-    /**
-     * <code>HTPPD_LOG_SUB_SCAN = 51;</code>
-     */
-    HTPPD_LOG_SUB_SCAN(51),
-    /**
-     * <code>IMAGE_SUB_SCAN = 52;</code>
-     */
-    IMAGE_SUB_SCAN(52),
-    /**
-     * <code>SEQUENCE_SUB_SCAN = 53;</code>
-     */
-    SEQUENCE_SUB_SCAN(53),
-    /**
-     * <code>PARTITION_LIMIT = 54;</code>
-     */
-    PARTITION_LIMIT(54),
-    /**
-     * <code>PCAPNG_SUB_SCAN = 55;</code>
-     */
-    PCAPNG_SUB_SCAN(55),
-    /**
-     * <code>RUNTIME_FILTER = 56;</code>
-     */
-    RUNTIME_FILTER(56),
-    /**
-     * <code>ROWKEY_JOIN = 57;</code>
-     */
-    ROWKEY_JOIN(57),
-    /**
-     * <code>SYSLOG_SUB_SCAN = 58;</code>
-     */
-    SYSLOG_SUB_SCAN(58),
-    /**
-     * <code>STATISTICS_AGGREGATE = 59;</code>
-     */
-    STATISTICS_AGGREGATE(59),
-    /**
-     * <code>UNPIVOT_MAPS = 60;</code>
-     */
-    UNPIVOT_MAPS(60),
-    /**
-     * <code>STATISTICS_MERGE = 61;</code>
-     */
-    STATISTICS_MERGE(61),
-    /**
-     * <code>LTSV_SUB_SCAN = 62;</code>
-     */
-    LTSV_SUB_SCAN(62),
-    /**
-     * <code>HDF5_SUB_SCAN = 63;</code>
-     */
-    HDF5_SUB_SCAN(63),
-    /**
-     * <code>EXCEL_SUB_SCAN = 64;</code>
-     */
-    EXCEL_SUB_SCAN(64),
-    /**
-     * <code>SHP_SUB_SCAN = 65;</code>
-     */
-    SHP_SUB_SCAN(65),
-    /**
-     * <code>METADATA_HANDLER = 66;</code>
-     */
-    METADATA_HANDLER(66),
-    /**
-     * <code>METADATA_CONTROLLER = 67;</code>
-     */
-    METADATA_CONTROLLER(67),
-    /**
-     * <code>DRUID_SUB_SCAN = 68;</code>
-     */
-    DRUID_SUB_SCAN(68),
-    /**
-     * <code>SPSS_SUB_SCAN = 69;</code>
-     */
-    SPSS_SUB_SCAN(69),
-    /**
-     * <code>HTTP_SUB_SCAN = 70;</code>
-     */
-    HTTP_SUB_SCAN(70),
-    ;
-
-    /**
-     * <code>SINGLE_SENDER = 0;</code>
-     */
-    public static final int SINGLE_SENDER_VALUE = 0;
-    /**
-     * <code>BROADCAST_SENDER = 1;</code>
-     */
-    public static final int BROADCAST_SENDER_VALUE = 1;
-    /**
-     * <code>FILTER = 2;</code>
-     */
-    public static final int FILTER_VALUE = 2;
-    /**
-     * <code>HASH_AGGREGATE = 3;</code>
-     */
-    public static final int HASH_AGGREGATE_VALUE = 3;
-    /**
-     * <code>HASH_JOIN = 4;</code>
-     */
-    public static final int HASH_JOIN_VALUE = 4;
-    /**
-     * <code>MERGE_JOIN = 5;</code>
-     */
-    public static final int MERGE_JOIN_VALUE = 5;
-    /**
-     * <code>HASH_PARTITION_SENDER = 6;</code>
-     */
-    public static final int HASH_PARTITION_SENDER_VALUE = 6;
-    /**
-     * <code>LIMIT = 7;</code>
-     */
-    public static final int LIMIT_VALUE = 7;
-    /**
-     * <code>MERGING_RECEIVER = 8;</code>
-     */
-    public static final int MERGING_RECEIVER_VALUE = 8;
-    /**
-     * <code>ORDERED_PARTITION_SENDER = 9;</code>
-     */
-    public static final int ORDERED_PARTITION_SENDER_VALUE = 9;
-    /**
-     * <code>PROJECT = 10;</code>
-     */
-    public static final int PROJECT_VALUE = 10;
-    /**
-     * <code>UNORDERED_RECEIVER = 11;</code>
-     */
-    public static final int UNORDERED_RECEIVER_VALUE = 11;
-    /**
-     * <code>RANGE_PARTITION_SENDER = 12;</code>
-     */
-    public static final int RANGE_PARTITION_SENDER_VALUE = 12;
-    /**
-     * <code>SCREEN = 13;</code>
-     */
-    public static final int SCREEN_VALUE = 13;
-    /**
-     * <code>SELECTION_VECTOR_REMOVER = 14;</code>
-     */
-    public static final int SELECTION_VECTOR_REMOVER_VALUE = 14;
-    /**
-     * <code>STREAMING_AGGREGATE = 15;</code>
-     */
-    public static final int STREAMING_AGGREGATE_VALUE = 15;
-    /**
-     * <code>TOP_N_SORT = 16;</code>
-     */
-    public static final int TOP_N_SORT_VALUE = 16;
-    /**
-     * <code>EXTERNAL_SORT = 17;</code>
-     */
-    public static final int EXTERNAL_SORT_VALUE = 17;
-    /**
-     * <code>TRACE = 18;</code>
-     */
-    public static final int TRACE_VALUE = 18;
-    /**
-     * <code>UNION = 19;</code>
-     */
-    public static final int UNION_VALUE = 19;
-    /**
-     * <code>OLD_SORT = 20;</code>
-     */
-    public static final int OLD_SORT_VALUE = 20;
-    /**
-     * <code>PARQUET_ROW_GROUP_SCAN = 21;</code>
-     */
-    public static final int PARQUET_ROW_GROUP_SCAN_VALUE = 21;
-    /**
-     * <code>HIVE_SUB_SCAN = 22;</code>
-     */
-    public static final int HIVE_SUB_SCAN_VALUE = 22;
-    /**
-     * <code>SYSTEM_TABLE_SCAN = 23;</code>
-     */
-    public static final int SYSTEM_TABLE_SCAN_VALUE = 23;
-    /**
-     * <code>MOCK_SUB_SCAN = 24;</code>
-     */
-    public static final int MOCK_SUB_SCAN_VALUE = 24;
-    /**
-     * <code>PARQUET_WRITER = 25;</code>
-     */
-    public static final int PARQUET_WRITER_VALUE = 25;
-    /**
-     * <code>DIRECT_SUB_SCAN = 26;</code>
-     */
-    public static final int DIRECT_SUB_SCAN_VALUE = 26;
-    /**
-     * <code>TEXT_WRITER = 27;</code>
-     */
-    public static final int TEXT_WRITER_VALUE = 27;
-    /**
-     * <code>TEXT_SUB_SCAN = 28;</code>
-     */
-    public static final int TEXT_SUB_SCAN_VALUE = 28;
-    /**
-     * <code>JSON_SUB_SCAN = 29;</code>
-     */
-    public static final int JSON_SUB_SCAN_VALUE = 29;
-    /**
-     * <code>INFO_SCHEMA_SUB_SCAN = 30;</code>
-     */
-    public static final int INFO_SCHEMA_SUB_SCAN_VALUE = 30;
-    /**
-     * <code>COMPLEX_TO_JSON = 31;</code>
-     */
-    public static final int COMPLEX_TO_JSON_VALUE = 31;
-    /**
-     * <code>PRODUCER_CONSUMER = 32;</code>
-     */
-    public static final int PRODUCER_CONSUMER_VALUE = 32;
-    /**
-     * <code>HBASE_SUB_SCAN = 33;</code>
-     */
-    public static final int HBASE_SUB_SCAN_VALUE = 33;
-    /**
-     * <code>WINDOW = 34;</code>
-     */
-    public static final int WINDOW_VALUE = 34;
-    /**
-     * <code>NESTED_LOOP_JOIN = 35;</code>
-     */
-    public static final int NESTED_LOOP_JOIN_VALUE = 35;
-    /**
-     * <code>AVRO_SUB_SCAN = 36;</code>
-     */
-    public static final int AVRO_SUB_SCAN_VALUE = 36;
-    /**
-     * <code>PCAP_SUB_SCAN = 37;</code>
-     */
-    public static final int PCAP_SUB_SCAN_VALUE = 37;
-    /**
-     * <code>KAFKA_SUB_SCAN = 38;</code>
-     */
-    public static final int KAFKA_SUB_SCAN_VALUE = 38;
-    /**
-     * <code>KUDU_SUB_SCAN = 39;</code>
-     */
-    public static final int KUDU_SUB_SCAN_VALUE = 39;
-    /**
-     * <code>FLATTEN = 40;</code>
-     */
-    public static final int FLATTEN_VALUE = 40;
-    /**
-     * <code>LATERAL_JOIN = 41;</code>
-     */
-    public static final int LATERAL_JOIN_VALUE = 41;
-    /**
-     * <code>UNNEST = 42;</code>
-     */
-    public static final int UNNEST_VALUE = 42;
-    /**
-     * <code>HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN = 43;</code>
-     */
-    public static final int HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN_VALUE = 43;
-    /**
-     * <code>JDBC_SCAN = 44;</code>
-     */
-    public static final int JDBC_SCAN_VALUE = 44;
-    /**
-     * <code>REGEX_SUB_SCAN = 45;</code>
-     */
-    public static final int REGEX_SUB_SCAN_VALUE = 45;
-    /**
-     * <code>MAPRDB_SUB_SCAN = 46;</code>
-     */
-    public static final int MAPRDB_SUB_SCAN_VALUE = 46;
-    /**
-     * <code>MONGO_SUB_SCAN = 47;</code>
-     */
-    public static final int MONGO_SUB_SCAN_VALUE = 47;
-    /**
-     * <code>KUDU_WRITER = 48;</code>
-     */
-    public static final int KUDU_WRITER_VALUE = 48;
-    /**
-     * <code>OPEN_TSDB_SUB_SCAN = 49;</code>
-     */
-    public static final int OPEN_TSDB_SUB_SCAN_VALUE = 49;
-    /**
-     * <code>JSON_WRITER = 50;</code>
-     */
-    public static final int JSON_WRITER_VALUE = 50;
-    /**
-     * <code>HTPPD_LOG_SUB_SCAN = 51;</code>
-     */
-    public static final int HTPPD_LOG_SUB_SCAN_VALUE = 51;
-    /**
-     * <code>IMAGE_SUB_SCAN = 52;</code>
-     */
-    public static final int IMAGE_SUB_SCAN_VALUE = 52;
-    /**
-     * <code>SEQUENCE_SUB_SCAN = 53;</code>
-     */
-    public static final int SEQUENCE_SUB_SCAN_VALUE = 53;
-    /**
-     * <code>PARTITION_LIMIT = 54;</code>
-     */
-    public static final int PARTITION_LIMIT_VALUE = 54;
-    /**
-     * <code>PCAPNG_SUB_SCAN = 55;</code>
-     */
-    public static final int PCAPNG_SUB_SCAN_VALUE = 55;
-    /**
-     * <code>RUNTIME_FILTER = 56;</code>
-     */
-    public static final int RUNTIME_FILTER_VALUE = 56;
-    /**
-     * <code>ROWKEY_JOIN = 57;</code>
-     */
-    public static final int ROWKEY_JOIN_VALUE = 57;
-    /**
-     * <code>SYSLOG_SUB_SCAN = 58;</code>
-     */
-    public static final int SYSLOG_SUB_SCAN_VALUE = 58;
-    /**
-     * <code>STATISTICS_AGGREGATE = 59;</code>
-     */
-    public static final int STATISTICS_AGGREGATE_VALUE = 59;
-    /**
-     * <code>UNPIVOT_MAPS = 60;</code>
-     */
-    public static final int UNPIVOT_MAPS_VALUE = 60;
-    /**
-     * <code>STATISTICS_MERGE = 61;</code>
-     */
-    public static final int STATISTICS_MERGE_VALUE = 61;
-    /**
-     * <code>LTSV_SUB_SCAN = 62;</code>
-     */
-    public static final int LTSV_SUB_SCAN_VALUE = 62;
-    /**
-     * <code>HDF5_SUB_SCAN = 63;</code>
-     */
-    public static final int HDF5_SUB_SCAN_VALUE = 63;
-    /**
-     * <code>EXCEL_SUB_SCAN = 64;</code>
-     */
-    public static final int EXCEL_SUB_SCAN_VALUE = 64;
-    /**
-     * <code>SHP_SUB_SCAN = 65;</code>
-     */
-    public static final int SHP_SUB_SCAN_VALUE = 65;
-    /**
-     * <code>METADATA_HANDLER = 66;</code>
-     */
-    public static final int METADATA_HANDLER_VALUE = 66;
-    /**
-     * <code>METADATA_CONTROLLER = 67;</code>
-     */
-    public static final int METADATA_CONTROLLER_VALUE = 67;
-    /**
-     * <code>DRUID_SUB_SCAN = 68;</code>
-     */
-    public static final int DRUID_SUB_SCAN_VALUE = 68;
-    /**
-     * <code>SPSS_SUB_SCAN = 69;</code>
-     */
-    public static final int SPSS_SUB_SCAN_VALUE = 69;
-    /**
-     * <code>HTTP_SUB_SCAN = 70;</code>
-     */
-    public static final int HTTP_SUB_SCAN_VALUE = 70;
-
-
-    public final int getNumber() {
-      return value;
-    }
-
-    /**
-     * @param value The numeric wire value of the corresponding enum entry.
-     * @return The enum associated with the given numeric wire value.
-     * @deprecated Use {@link #forNumber(int)} instead.
-     */
-    @java.lang.Deprecated
-    public static CoreOperatorType valueOf(int value) {
-      return forNumber(value);
-    }
-
-    /**
-     * @param value The numeric wire value of the corresponding enum entry.
-     * @return The enum associated with the given numeric wire value.
-     */
-    public static CoreOperatorType forNumber(int value) {
-      switch (value) {
-        case 0: return SINGLE_SENDER;
-        case 1: return BROADCAST_SENDER;
-        case 2: return FILTER;
-        case 3: return HASH_AGGREGATE;
-        case 4: return HASH_JOIN;
-        case 5: return MERGE_JOIN;
-        case 6: return HASH_PARTITION_SENDER;
-        case 7: return LIMIT;
-        case 8: return MERGING_RECEIVER;
-        case 9: return ORDERED_PARTITION_SENDER;
-        case 10: return PROJECT;
-        case 11: return UNORDERED_RECEIVER;
-        case 12: return RANGE_PARTITION_SENDER;
-        case 13: return SCREEN;
-        case 14: return SELECTION_VECTOR_REMOVER;
-        case 15: return STREAMING_AGGREGATE;
-        case 16: return TOP_N_SORT;
-        case 17: return EXTERNAL_SORT;
-        case 18: return TRACE;
-        case 19: return UNION;
-        case 20: return OLD_SORT;
-        case 21: return PARQUET_ROW_GROUP_SCAN;
-        case 22: return HIVE_SUB_SCAN;
-        case 23: return SYSTEM_TABLE_SCAN;
-        case 24: return MOCK_SUB_SCAN;
-        case 25: return PARQUET_WRITER;
-        case 26: return DIRECT_SUB_SCAN;
-        case 27: return TEXT_WRITER;
-        case 28: return TEXT_SUB_SCAN;
-        case 29: return JSON_SUB_SCAN;
-        case 30: return INFO_SCHEMA_SUB_SCAN;
-        case 31: return COMPLEX_TO_JSON;
-        case 32: return PRODUCER_CONSUMER;
-        case 33: return HBASE_SUB_SCAN;
-        case 34: return WINDOW;
-        case 35: return NESTED_LOOP_JOIN;
-        case 36: return AVRO_SUB_SCAN;
-        case 37: return PCAP_SUB_SCAN;
-        case 38: return KAFKA_SUB_SCAN;
-        case 39: return KUDU_SUB_SCAN;
-        case 40: return FLATTEN;
-        case 41: return LATERAL_JOIN;
-        case 42: return UNNEST;
-        case 43: return HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN;
-        case 44: return JDBC_SCAN;
-        case 45: return REGEX_SUB_SCAN;
-        case 46: return MAPRDB_SUB_SCAN;
-        case 47: return MONGO_SUB_SCAN;
-        case 48: return KUDU_WRITER;
-        case 49: return OPEN_TSDB_SUB_SCAN;
-        case 50: return JSON_WRITER;
-        case 51: return HTPPD_LOG_SUB_SCAN;
-        case 52: return IMAGE_SUB_SCAN;
-        case 53: return SEQUENCE_SUB_SCAN;
-        case 54: return PARTITION_LIMIT;
-        case 55: return PCAPNG_SUB_SCAN;
-        case 56: return RUNTIME_FILTER;
-        case 57: return ROWKEY_JOIN;
-        case 58: return SYSLOG_SUB_SCAN;
-        case 59: return STATISTICS_AGGREGATE;
-        case 60: return UNPIVOT_MAPS;
-        case 61: return STATISTICS_MERGE;
-        case 62: return LTSV_SUB_SCAN;
-        case 63: return HDF5_SUB_SCAN;
-        case 64: return EXCEL_SUB_SCAN;
-        case 65: return SHP_SUB_SCAN;
-        case 66: return METADATA_HANDLER;
-        case 67: return METADATA_CONTROLLER;
-        case 68: return DRUID_SUB_SCAN;
-        case 69: return SPSS_SUB_SCAN;
-        case 70: return HTTP_SUB_SCAN;
-        default: return null;
-      }
-    }
-
-    public static com.google.protobuf.Internal.EnumLiteMap<CoreOperatorType>
-        internalGetValueMap() {
-      return internalValueMap;
-    }
-    private static final com.google.protobuf.Internal.EnumLiteMap<
-        CoreOperatorType> internalValueMap =
-          new com.google.protobuf.Internal.EnumLiteMap<CoreOperatorType>() {
-            public CoreOperatorType findValueByNumber(int number) {
-              return CoreOperatorType.forNumber(number);
-            }
-          };
-
-    public final com.google.protobuf.Descriptors.EnumValueDescriptor
-        getValueDescriptor() {
-      return getDescriptor().getValues().get(ordinal());
-    }
-    public final com.google.protobuf.Descriptors.EnumDescriptor
-        getDescriptorForType() {
-      return getDescriptor();
-    }
-    public static final com.google.protobuf.Descriptors.EnumDescriptor
-        getDescriptor() {
-      return org.apache.drill.exec.proto.UserBitShared.getDescriptor().getEnumTypes().get(3);
-    }
-
-    private static final CoreOperatorType[] VALUES = values();
-
-    public static CoreOperatorType valueOf(
-        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-      if (desc.getType() != getDescriptor()) {
-        throw new java.lang.IllegalArgumentException(
-          "EnumValueDescriptor is not for this type.");
-      }
-      return VALUES[desc.getIndex()];
-    }
-
-    private final int value;
-
-    private CoreOperatorType(int value) {
-      this.value = value;
-    }
-
-    // @@protoc_insertion_point(enum_scope:exec.shared.CoreOperatorType)
-  }
-
-  /**
    * Protobuf enum {@code exec.shared.SaslStatus}
    */
   public enum SaslStatus
@@ -1225,7 +508,7 @@
     }
     public static final com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.drill.exec.proto.UserBitShared.getDescriptor().getEnumTypes().get(4);
+      return org.apache.drill.exec.proto.UserBitShared.getDescriptor().getEnumTypes().get(3);
     }
 
     private static final SaslStatus[] VALUES = values();
@@ -23269,15 +22552,15 @@
     int getOperatorId();
 
     /**
-     * <code>optional int32 operator_type = 4;</code>
+     * <code>optional int32 operator_type = 4 [deprecated = true];</code>
      * @return Whether the operatorType field is set.
      */
-    boolean hasOperatorType();
+    @java.lang.Deprecated boolean hasOperatorType();
     /**
-     * <code>optional int32 operator_type = 4;</code>
+     * <code>optional int32 operator_type = 4 [deprecated = true];</code>
      * @return The operatorType.
      */
-    int getOperatorType();
+    @java.lang.Deprecated int getOperatorType();
 
     /**
      * <code>optional int64 setup_nanos = 5;</code>
@@ -23346,6 +22629,23 @@
      * @return The waitNanos.
      */
     long getWaitNanos();
+
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return Whether the operatorTypeName field is set.
+     */
+    boolean hasOperatorTypeName();
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return The operatorTypeName.
+     */
+    java.lang.String getOperatorTypeName();
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return The bytes for operatorTypeName.
+     */
+    com.google.protobuf.ByteString
+        getOperatorTypeNameBytes();
   }
   /**
    * Protobuf type {@code exec.shared.OperatorProfile}
@@ -23362,6 +22662,7 @@
     private OperatorProfile() {
       inputProfile_ = java.util.Collections.emptyList();
       metric_ = java.util.Collections.emptyList();
+      operatorTypeName_ = "";
     }
 
     @java.lang.Override
@@ -23443,6 +22744,12 @@
               waitNanos_ = input.readInt64();
               break;
             }
+            case 82: {
+              com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000040;
+              operatorTypeName_ = bs;
+              break;
+            }
             default: {
               if (!parseUnknownField(
                   input, unknownFields, extensionRegistry, tag)) {
@@ -23537,17 +22844,17 @@
     public static final int OPERATOR_TYPE_FIELD_NUMBER = 4;
     private int operatorType_;
     /**
-     * <code>optional int32 operator_type = 4;</code>
+     * <code>optional int32 operator_type = 4 [deprecated = true];</code>
      * @return Whether the operatorType field is set.
      */
-    public boolean hasOperatorType() {
+    @java.lang.Deprecated public boolean hasOperatorType() {
       return ((bitField0_ & 0x00000002) != 0);
     }
     /**
-     * <code>optional int32 operator_type = 4;</code>
+     * <code>optional int32 operator_type = 4 [deprecated = true];</code>
      * @return The operatorType.
      */
-    public int getOperatorType() {
+    @java.lang.Deprecated public int getOperatorType() {
       return operatorType_;
     }
 
@@ -23654,6 +22961,51 @@
       return waitNanos_;
     }
 
+    public static final int OPERATOR_TYPE_NAME_FIELD_NUMBER = 10;
+    private volatile java.lang.Object operatorTypeName_;
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return Whether the operatorTypeName field is set.
+     */
+    public boolean hasOperatorTypeName() {
+      return ((bitField0_ & 0x00000040) != 0);
+    }
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return The operatorTypeName.
+     */
+    public java.lang.String getOperatorTypeName() {
+      java.lang.Object ref = operatorTypeName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          operatorTypeName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string operator_type_name = 10;</code>
+     * @return The bytes for operatorTypeName.
+     */
+    public com.google.protobuf.ByteString
+        getOperatorTypeNameBytes() {
+      java.lang.Object ref = operatorTypeName_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        operatorTypeName_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private byte memoizedIsInitialized = -1;
     @java.lang.Override
     public final boolean isInitialized() {
@@ -23692,6 +23044,9 @@
       if (((bitField0_ & 0x00000020) != 0)) {
         output.writeInt64(9, waitNanos_);
       }
+      if (((bitField0_ & 0x00000040) != 0)) {
+        com.google.protobuf.GeneratedMessageV3.writeString(output, 10, operatorTypeName_);
+      }
       unknownFields.writeTo(output);
     }
 
@@ -23733,6 +23088,9 @@
         size += com.google.protobuf.CodedOutputStream
           .computeInt64Size(9, waitNanos_);
       }
+      if (((bitField0_ & 0x00000040) != 0)) {
+        size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, operatorTypeName_);
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -23782,6 +23140,11 @@
         if (getWaitNanos()
             != other.getWaitNanos()) return false;
       }
+      if (hasOperatorTypeName() != other.hasOperatorTypeName()) return false;
+      if (hasOperatorTypeName()) {
+        if (!getOperatorTypeName()
+            .equals(other.getOperatorTypeName())) return false;
+      }
       if (!unknownFields.equals(other.unknownFields)) return false;
       return true;
     }
@@ -23829,6 +23192,10 @@
         hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
             getWaitNanos());
       }
+      if (hasOperatorTypeName()) {
+        hash = (37 * hash) + OPERATOR_TYPE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getOperatorTypeName().hashCode();
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -23988,6 +23355,8 @@
         }
         waitNanos_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000080);
+        operatorTypeName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000100);
         return this;
       }
 
@@ -24058,6 +23427,10 @@
           result.waitNanos_ = waitNanos_;
           to_bitField0_ |= 0x00000020;
         }
+        if (((from_bitField0_ & 0x00000100) != 0)) {
+          to_bitField0_ |= 0x00000040;
+        }
+        result.operatorTypeName_ = operatorTypeName_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -24177,6 +23550,11 @@
         if (other.hasWaitNanos()) {
           setWaitNanos(other.getWaitNanos());
         }
+        if (other.hasOperatorTypeName()) {
+          bitField0_ |= 0x00000100;
+          operatorTypeName_ = other.operatorTypeName_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -24486,35 +23864,35 @@
 
       private int operatorType_ ;
       /**
-       * <code>optional int32 operator_type = 4;</code>
+       * <code>optional int32 operator_type = 4 [deprecated = true];</code>
        * @return Whether the operatorType field is set.
        */
-      public boolean hasOperatorType() {
+      @java.lang.Deprecated public boolean hasOperatorType() {
         return ((bitField0_ & 0x00000004) != 0);
       }
       /**
-       * <code>optional int32 operator_type = 4;</code>
+       * <code>optional int32 operator_type = 4 [deprecated = true];</code>
        * @return The operatorType.
        */
-      public int getOperatorType() {
+      @java.lang.Deprecated public int getOperatorType() {
         return operatorType_;
       }
       /**
-       * <code>optional int32 operator_type = 4;</code>
+       * <code>optional int32 operator_type = 4 [deprecated = true];</code>
        * @param value The operatorType to set.
        * @return This builder for chaining.
        */
-      public Builder setOperatorType(int value) {
+      @java.lang.Deprecated public Builder setOperatorType(int value) {
         bitField0_ |= 0x00000004;
         operatorType_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional int32 operator_type = 4;</code>
+       * <code>optional int32 operator_type = 4 [deprecated = true];</code>
        * @return This builder for chaining.
        */
-      public Builder clearOperatorType() {
+      @java.lang.Deprecated public Builder clearOperatorType() {
         bitField0_ = (bitField0_ & ~0x00000004);
         operatorType_ = 0;
         onChanged();
@@ -24908,6 +24286,90 @@
         onChanged();
         return this;
       }
+
+      private java.lang.Object operatorTypeName_ = "";
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @return Whether the operatorTypeName field is set.
+       */
+      public boolean hasOperatorTypeName() {
+        return ((bitField0_ & 0x00000100) != 0);
+      }
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @return The operatorTypeName.
+       */
+      public java.lang.String getOperatorTypeName() {
+        java.lang.Object ref = operatorTypeName_;
+        if (!(ref instanceof java.lang.String)) {
+          com.google.protobuf.ByteString bs =
+              (com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            operatorTypeName_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @return The bytes for operatorTypeName.
+       */
+      public com.google.protobuf.ByteString
+          getOperatorTypeNameBytes() {
+        java.lang.Object ref = operatorTypeName_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          operatorTypeName_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @param value The operatorTypeName to set.
+       * @return This builder for chaining.
+       */
+      public Builder setOperatorTypeName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000100;
+        operatorTypeName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @return This builder for chaining.
+       */
+      public Builder clearOperatorTypeName() {
+        bitField0_ = (bitField0_ & ~0x00000100);
+        operatorTypeName_ = getDefaultInstance().getOperatorTypeName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string operator_type_name = 10;</code>
+       * @param value The bytes for operatorTypeName to set.
+       * @return This builder for chaining.
+       */
+      public Builder setOperatorTypeNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000100;
+        operatorTypeName_ = value;
+        onChanged();
+        return this;
+      }
       @java.lang.Override
       public final Builder setUnknownFields(
           final com.google.protobuf.UnknownFieldSet unknownFields) {
@@ -29034,68 +28496,33 @@
       "y_used\030\007 \001(\003\022\027\n\017max_memory_used\030\010 \001(\003\022(\n" +
       "\010endpoint\030\t \001(\0132\026.exec.DrillbitEndpoint\022" +
       "\023\n\013last_update\030\n \001(\003\022\025\n\rlast_progress\030\013 " +
-      "\001(\003\"\377\001\n\017OperatorProfile\0221\n\rinput_profile" +
+      "\001(\003\"\237\002\n\017OperatorProfile\0221\n\rinput_profile" +
       "\030\001 \003(\0132\032.exec.shared.StreamProfile\022\023\n\013op" +
-      "erator_id\030\003 \001(\005\022\025\n\roperator_type\030\004 \001(\005\022\023" +
-      "\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos\030\006 \001" +
-      "(\003\022#\n\033peak_local_memory_allocated\030\007 \001(\003\022" +
-      "(\n\006metric\030\010 \003(\0132\030.exec.shared.MetricValu" +
-      "e\022\022\n\nwait_nanos\030\t \001(\003\"B\n\rStreamProfile\022\017" +
-      "\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n\007sche" +
-      "mas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001 " +
-      "\001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014double_value\030" +
-      "\003 \001(\001\")\n\010Registry\022\035\n\003jar\030\001 \003(\0132\020.exec.sh" +
-      "ared.Jar\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022functio" +
-      "n_signature\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmech" +
-      "anism\030\001 \001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status\030\003 \001(" +
-      "\0162\027.exec.shared.SaslStatus*5\n\nRpcChannel" +
-      "\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020" +
-      "\002*V\n\tQueryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010" +
-      "PHYSICAL\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_ST" +
-      "ATEMENT\020\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020\000" +
-      "\022\027\n\023AWAITING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022\014" +
-      "\n\010FINISHED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005\022" +
-      "\032\n\026CANCELLATION_REQUESTED\020\006*\236\013\n\020CoreOper" +
-      "atorType\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROADCAST" +
-      "_SENDER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREGATE\020" +
-      "\003\022\r\n\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HASH" +
-      "_PARTITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGIN" +
-      "G_RECEIVER\020\010\022\034\n\030ORDERED_PARTITION_SENDER" +
-      "\020\t\022\013\n\007PROJECT\020\n\022\026\n\022UNORDERED_RECEIVER\020\013\022" +
-      "\032\n\026RANGE_PARTITION_SENDER\020\014\022\n\n\006SCREEN\020\r\022" +
-      "\034\n\030SELECTION_VECTOR_REMOVER\020\016\022\027\n\023STREAMI" +
-      "NG_AGGREGATE\020\017\022\016\n\nTOP_N_SORT\020\020\022\021\n\rEXTERN" +
-      "AL_SORT\020\021\022\t\n\005TRACE\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_S" +
-      "ORT\020\024\022\032\n\026PARQUET_ROW_GROUP_SCAN\020\025\022\021\n\rHIV" +
-      "E_SUB_SCAN\020\026\022\025\n\021SYSTEM_TABLE_SCAN\020\027\022\021\n\rM" +
-      "OCK_SUB_SCAN\020\030\022\022\n\016PARQUET_WRITER\020\031\022\023\n\017DI" +
-      "RECT_SUB_SCAN\020\032\022\017\n\013TEXT_WRITER\020\033\022\021\n\rTEXT" +
-      "_SUB_SCAN\020\034\022\021\n\rJSON_SUB_SCAN\020\035\022\030\n\024INFO_S" +
-      "CHEMA_SUB_SCAN\020\036\022\023\n\017COMPLEX_TO_JSON\020\037\022\025\n" +
-      "\021PRODUCER_CONSUMER\020 \022\022\n\016HBASE_SUB_SCAN\020!" +
-      "\022\n\n\006WINDOW\020\"\022\024\n\020NESTED_LOOP_JOIN\020#\022\021\n\rAV" +
-      "RO_SUB_SCAN\020$\022\021\n\rPCAP_SUB_SCAN\020%\022\022\n\016KAFK" +
-      "A_SUB_SCAN\020&\022\021\n\rKUDU_SUB_SCAN\020\'\022\013\n\007FLATT" +
-      "EN\020(\022\020\n\014LATERAL_JOIN\020)\022\n\n\006UNNEST\020*\022,\n(HI" +
-      "VE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN\020+" +
-      "\022\r\n\tJDBC_SCAN\020,\022\022\n\016REGEX_SUB_SCAN\020-\022\023\n\017M" +
-      "APRDB_SUB_SCAN\020.\022\022\n\016MONGO_SUB_SCAN\020/\022\017\n\013" +
-      "KUDU_WRITER\0200\022\026\n\022OPEN_TSDB_SUB_SCAN\0201\022\017\n" +
-      "\013JSON_WRITER\0202\022\026\n\022HTPPD_LOG_SUB_SCAN\0203\022\022" +
-      "\n\016IMAGE_SUB_SCAN\0204\022\025\n\021SEQUENCE_SUB_SCAN\020" +
-      "5\022\023\n\017PARTITION_LIMIT\0206\022\023\n\017PCAPNG_SUB_SCA" +
-      "N\0207\022\022\n\016RUNTIME_FILTER\0208\022\017\n\013ROWKEY_JOIN\0209" +
-      "\022\023\n\017SYSLOG_SUB_SCAN\020:\022\030\n\024STATISTICS_AGGR" +
-      "EGATE\020;\022\020\n\014UNPIVOT_MAPS\020<\022\024\n\020STATISTICS_" +
-      "MERGE\020=\022\021\n\rLTSV_SUB_SCAN\020>\022\021\n\rHDF5_SUB_S" +
-      "CAN\020?\022\022\n\016EXCEL_SUB_SCAN\020@\022\020\n\014SHP_SUB_SCA" +
-      "N\020A\022\024\n\020METADATA_HANDLER\020B\022\027\n\023METADATA_CO" +
-      "NTROLLER\020C\022\022\n\016DRUID_SUB_SCAN\020D\022\021\n\rSPSS_S" +
-      "UB_SCAN\020E\022\021\n\rHTTP_SUB_SCAN\020F*g\n\nSaslStat" +
-      "us\022\020\n\014SASL_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020" +
-      "SASL_IN_PROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013" +
-      "SASL_FAILED\020\004B.\n\033org.apache.drill.exec.p" +
-      "rotoB\rUserBitSharedH\001"
+      "erator_id\030\003 \001(\005\022\031\n\roperator_type\030\004 \001(\005B\002" +
+      "\030\001\022\023\n\013setup_nanos\030\005 \001(\003\022\025\n\rprocess_nanos" +
+      "\030\006 \001(\003\022#\n\033peak_local_memory_allocated\030\007 " +
+      "\001(\003\022(\n\006metric\030\010 \003(\0132\030.exec.shared.Metric" +
+      "Value\022\022\n\nwait_nanos\030\t \001(\003\022\032\n\022operator_ty" +
+      "pe_name\030\n \001(\t\"B\n\rStreamProfile\022\017\n\007record" +
+      "s\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n\007schemas\030\003 \001(" +
+      "\003\"J\n\013MetricValue\022\021\n\tmetric_id\030\001 \001(\005\022\022\n\nl" +
+      "ong_value\030\002 \001(\003\022\024\n\014double_value\030\003 \001(\001\")\n" +
+      "\010Registry\022\035\n\003jar\030\001 \003(\0132\020.exec.shared.Jar" +
+      "\"/\n\003Jar\022\014\n\004name\030\001 \001(\t\022\032\n\022function_signat" +
+      "ure\030\002 \003(\t\"W\n\013SaslMessage\022\021\n\tmechanism\030\001 " +
+      "\001(\t\022\014\n\004data\030\002 \001(\014\022\'\n\006status\030\003 \001(\0162\027.exec" +
+      ".shared.SaslStatus*5\n\nRpcChannel\022\017\n\013BIT_" +
+      "CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*V\n\tQue" +
+      "ryType\022\007\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL" +
+      "\020\003\022\r\n\tEXECUTION\020\004\022\026\n\022PREPARED_STATEMENT\020" +
+      "\005*\207\001\n\rFragmentState\022\013\n\007SENDING\020\000\022\027\n\023AWAI" +
+      "TING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISH" +
+      "ED\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005\022\032\n\026CANCE" +
+      "LLATION_REQUESTED\020\006*g\n\nSaslStatus\022\020\n\014SAS" +
+      "L_UNKNOWN\020\000\022\016\n\nSASL_START\020\001\022\024\n\020SASL_IN_P" +
+      "ROGRESS\020\002\022\020\n\014SASL_SUCCESS\020\003\022\017\n\013SASL_FAIL" +
+      "ED\020\004B.\n\033org.apache.drill.exec.protoB\rUse" +
+      "rBitSharedH\001"
     };
     descriptor = com.google.protobuf.Descriptors.FileDescriptor
       .internalBuildGeneratedFileFrom(descriptorData,
@@ -29205,7 +28632,7 @@
     internal_static_exec_shared_OperatorProfile_fieldAccessorTable = new
       com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_exec_shared_OperatorProfile_descriptor,
-        new java.lang.String[] { "InputProfile", "OperatorId", "OperatorType", "SetupNanos", "ProcessNanos", "PeakLocalMemoryAllocated", "Metric", "WaitNanos", });
+        new java.lang.String[] { "InputProfile", "OperatorId", "OperatorType", "SetupNanos", "ProcessNanos", "PeakLocalMemoryAllocated", "Metric", "WaitNanos", "OperatorTypeName", });
     internal_static_exec_shared_StreamProfile_descriptor =
       getDescriptor().getMessageTypes().get(17);
     internal_static_exec_shared_StreamProfile_fieldAccessorTable = new
diff --git a/protocol/src/main/protobuf/UserBitShared.proto b/protocol/src/main/protobuf/UserBitShared.proto
index f7b7b02..45bdd84 100644
--- a/protocol/src/main/protobuf/UserBitShared.proto
+++ b/protocol/src/main/protobuf/UserBitShared.proto
@@ -280,12 +280,13 @@
 message OperatorProfile {
   repeated StreamProfile input_profile = 1;
   optional int32 operator_id = 3;
-  optional int32 operator_type = 4;
+  optional int32 operator_type = 4 [deprecated = true];
   optional int64 setup_nanos = 5;
   optional int64 process_nanos = 6;
   optional int64 peak_local_memory_allocated = 7;
   repeated MetricValue metric = 8;
   optional int64 wait_nanos = 9;
+  optional string operator_type_name = 10;
 }
 
 message StreamProfile {
@@ -310,80 +311,6 @@
   CANCELLATION_REQUESTED = 6;
 }
 
-enum CoreOperatorType {
-  SINGLE_SENDER = 0;
-  BROADCAST_SENDER = 1;
-  FILTER = 2;
-  HASH_AGGREGATE = 3;
-  HASH_JOIN = 4;
-  MERGE_JOIN = 5;
-  HASH_PARTITION_SENDER = 6;
-  LIMIT = 7;
-  MERGING_RECEIVER = 8;
-  ORDERED_PARTITION_SENDER = 9;
-  PROJECT = 10;
-  UNORDERED_RECEIVER = 11;
-  RANGE_PARTITION_SENDER = 12;
-  SCREEN = 13;
-  SELECTION_VECTOR_REMOVER = 14;
-  STREAMING_AGGREGATE = 15;
-  TOP_N_SORT = 16;
-  EXTERNAL_SORT = 17;
-  TRACE = 18;
-  UNION = 19;
-  OLD_SORT = 20;
-  PARQUET_ROW_GROUP_SCAN = 21;
-  HIVE_SUB_SCAN = 22;
-  SYSTEM_TABLE_SCAN = 23;
-  MOCK_SUB_SCAN = 24;
-  PARQUET_WRITER = 25;
-  DIRECT_SUB_SCAN = 26;
-  TEXT_WRITER = 27;
-  TEXT_SUB_SCAN = 28;
-  JSON_SUB_SCAN = 29;
-  INFO_SCHEMA_SUB_SCAN = 30;
-  COMPLEX_TO_JSON = 31;
-  PRODUCER_CONSUMER = 32;
-  HBASE_SUB_SCAN = 33;
-  WINDOW = 34;
-  NESTED_LOOP_JOIN = 35;
-  AVRO_SUB_SCAN = 36;
-  PCAP_SUB_SCAN = 37;
-  KAFKA_SUB_SCAN = 38;
-  KUDU_SUB_SCAN = 39;
-  FLATTEN = 40;
-  LATERAL_JOIN = 41;
-  UNNEST = 42;
-  HIVE_DRILL_NATIVE_PARQUET_ROW_GROUP_SCAN = 43;
-  JDBC_SCAN = 44;
-  REGEX_SUB_SCAN = 45;
-  MAPRDB_SUB_SCAN = 46;
-  MONGO_SUB_SCAN = 47;
-  KUDU_WRITER = 48;
-  OPEN_TSDB_SUB_SCAN = 49;
-  JSON_WRITER = 50;
-  HTPPD_LOG_SUB_SCAN = 51;
-  IMAGE_SUB_SCAN = 52;
-  SEQUENCE_SUB_SCAN = 53;
-  PARTITION_LIMIT = 54;
-  PCAPNG_SUB_SCAN = 55;
-  RUNTIME_FILTER = 56;
-  ROWKEY_JOIN = 57;
-  SYSLOG_SUB_SCAN = 58;
-  STATISTICS_AGGREGATE = 59;
-  UNPIVOT_MAPS = 60;
-  STATISTICS_MERGE = 61;
-  LTSV_SUB_SCAN = 62;
-  HDF5_SUB_SCAN = 63;
-  EXCEL_SUB_SCAN = 64;
-  SHP_SUB_SCAN = 65;
-  METADATA_HANDLER = 66;
-  METADATA_CONTROLLER = 67;
-  DRUID_SUB_SCAN = 68;
-  SPSS_SUB_SCAN = 69;
-  HTTP_SUB_SCAN = 70;
-}
-
 /* Registry that contains list of jars, each jar contains its name and list of function signatures.
 Structure example:
 REGISTRY    -> Jar1.jar   -> upper(VARCHAR-REQUIRED)
diff --git a/start-build-env.sh b/start-build-env.sh
new file mode 100755
index 0000000..0740149
--- /dev/null
+++ b/start-build-env.sh
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -e               # exit on error
+
+cd "$(dirname "$0")" # connect to root
+
+DOCKER_DIR=dev-support/docker
+DOCKER_FILE="${DOCKER_DIR}/Dockerfile"
+
+CONTAINER_NAME=drill-dev-${USER}-$$
+
+#CPU_ARCH=$(echo "$MACHTYPE" | cut -d- -f1)
+#if [ "$CPU_ARCH" = "aarch64" ]; then
+#  DOCKER_FILE="${DOCKER_DIR}/Dockerfile_aarch64"
+#fi
+
+docker build -t drill-build -f $DOCKER_FILE $DOCKER_DIR
+
+USER_NAME=${SUDO_USER:=$USER}
+USER_ID=$(id -u "${USER_NAME}")
+
+if [ "$(uname -s)" = "Darwin" ]; then
+  GROUP_ID=100
+fi
+
+if [ "$(uname -s)" = "Linux" ]; then
+  GROUP_ID=$(id -g "${USER_NAME}")
+  # man docker-run
+  # When using SELinux, mounted directories may not be accessible
+  # to the container. To work around this, with Docker prior to 1.7
+  # one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
+  # the directories. With Docker 1.7 and later the z mount option
+  # does this automatically.
+  if command -v selinuxenabled >/dev/null && selinuxenabled; then
+    DCKR_VER=$(docker -v|
+    awk '$1 == "Docker" && $2 == "version" {split($3,ver,".");print ver[1]"."ver[2]}')
+    DCKR_MAJ=${DCKR_VER%.*}
+    DCKR_MIN=${DCKR_VER#*.}
+    if [ "${DCKR_MAJ}" -eq 1 ] && [ "${DCKR_MIN}" -ge 7 ] ||
+        [ "${DCKR_MAJ}" -gt 1 ]; then
+      V_OPTS=:z
+    else
+      for d in "${PWD}" "${HOME}/.m2"; do
+        ctx=$(stat --printf='%C' "$d"|cut -d':' -f3)
+        if [ "$ctx" != svirt_sandbox_file_t ] && [ "$ctx" != container_file_t ]; then
+          printf 'INFO: SELinux is enabled.\n'
+          printf '\tMounted %s may not be accessible to the container.\n' "$d"
+          printf 'INFO: If so, on the host, run the following command:\n'
+          printf '\t# chcon -Rt svirt_sandbox_file_t %s\n' "$d"
+        fi
+      done
+    fi
+  fi
+fi
+
+# Set the home directory in the Docker container.
+DOCKER_HOME_DIR=${DOCKER_HOME_DIR:-/home/${USER_NAME}}
+
+DOCKER_GROUP_ID=$(getent group docker | cut -d':' -f3)
+
+docker build -t "drill-build-${USER_ID}" - <<UserSpecificDocker
+FROM drill-build
+RUN rm -f /var/log/faillog /var/log/lastlog
+RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
+RUN groupmod -g ${DOCKER_GROUP_ID} docker
+RUN useradd -g ${GROUP_ID} -G docker -u ${USER_ID} -k /root -m ${USER_NAME} -d "${DOCKER_HOME_DIR}"
+RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > "/etc/sudoers.d/drill-build-${USER_ID}"
+ENV HOME "${DOCKER_HOME_DIR}"
+
+UserSpecificDocker
+
+echo ""
+echo "Docker image build completed."
+echo "=============================================================================================="
+echo ""
+
+#If this env varible is empty, docker will be started
+# in non interactive mode
+DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"}
+
+DOCKER_SOCKET_MOUNT=""
+if [ -S /var/run/docker.sock ];
+then
+  DOCKER_SOCKET_MOUNT="-v /var/run/docker.sock:/var/run/docker.sock${V_OPTS:-}"
+  echo "Enabling Docker support with the docker build environment."
+else
+  echo "There is NO Docker support with the docker build environment."
+fi
+
+COMMAND=( "$@" )
+if [ $# -eq 0 ];
+then
+  COMMAND=( "bash" )
+fi
+
+[ -d "${HOME}/.gradle_drill_build_env" ] || mkdir -p "${HOME}/.gradle_drill_build_env"
+
+# By mapping the .m2 directory you can do an mvn install from
+# within the container and use the result on your normal
+# system.  And this also is a significant speedup in subsequent
+# builds because the dependencies are downloaded only once.
+docker run --rm=true ${DOCKER_INTERACTIVE_RUN}                         \
+           --name "${CONTAINER_NAME}"                                  \
+           -v "${HOME}/.m2:${DOCKER_HOME_DIR}/.m2${V_OPTS:-}"          \
+           -v "${HOME}/.gnupg:${DOCKER_HOME_DIR}/.gnupg${V_OPTS:-}"    \
+           -v "${HOME}/.gradle_drill_build_env:${DOCKER_HOME_DIR}/.gradle${V_OPTS:-}"  \
+           -v "${PWD}:${DOCKER_HOME_DIR}/drill${V_OPTS:-}"              \
+           -w "${DOCKER_HOME_DIR}/drill"                                \
+           ${DOCKER_SOCKET_MOUNT}                                      \
+           -u "${USER_ID}"                                             \
+           "drill-build-${USER_ID}" "${COMMAND[@]}"
diff --git a/tools/fmpp/pom.xml b/tools/fmpp/pom.xml
index a85aa9b..af35f1a 100644
--- a/tools/fmpp/pom.xml
+++ b/tools/fmpp/pom.xml
@@ -29,7 +29,7 @@
 
   <artifactId>drill-fmpp-maven-plugin</artifactId>
   <packaging>maven-plugin</packaging>
-  <name>tools/freemarker codegen tooling</name>
+  <name>Drill : Tools : Freemarker codegen</name>
 
   <dependencies>
     <dependency>
diff --git a/tools/pom.xml b/tools/pom.xml
index 09b0aba..ede3e4e 100644
--- a/tools/pom.xml
+++ b/tools/pom.xml
@@ -29,7 +29,7 @@
   <groupId>org.apache.drill.tools</groupId>
   <artifactId>tools-parent</artifactId>
   <packaging>pom</packaging>
-  <name>tools/Parent Pom</name>
+  <name>Drill : Tools : </name>
 
   <modules>
     <module>fmpp</module>