DRILL-7406: Update Calcite to 1.21.0

1. DRILL-7386 - added tests to TestHiveStructs.
2. DRILL-4527 - the DrillAvgVarianceConvertlet can't be removed without test failures.
3. DRILL-6215 - switched to prepared statement in JdbcRecordReader.
4. DRILL-6905 - added test into TestExampleQueries.
5. DRILL-7415 - Fixed jdbc show tables when 2 tables with same name are present in different schemas.
6. DRILL-7340 - Fixed jdbc filter pushdown when few jdbc datasources enabled.
7. Split SqlConverter into multiple source files.
8. Minor refactorings for jdbc and other places.

closes #1940
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
index 373e76d..f170b91 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
@@ -21,12 +21,13 @@
 import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.SqlReturnTypeInference;
 
 public class HiveUDFOperator extends SqlFunction {
   public HiveUDFOperator(String name, SqlReturnTypeInference sqlReturnTypeInference) {
     super(new SqlIdentifier(name, SqlParserPos.ZERO), sqlReturnTypeInference, null,
-        VarArgOperandTypeChecker.INSTANCE, null, SqlFunctionCategory.USER_DEFINED_FUNCTION);
+        OperandTypes.VARIADIC, null, SqlFunctionCategory.USER_DEFINED_FUNCTION);
   }
 
   // Consider Hive functions to be non-deterministic so they are not folded at
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/complex_types/TestHiveStructs.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/complex_types/TestHiveStructs.java
index 4b0750d..3729e3a 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/complex_types/TestHiveStructs.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/complex_types/TestHiveStructs.java
@@ -476,4 +476,25 @@
         .baselineValues(2, mapOf("n", 5, "u", "Text"))
         .go();
   }
+
+  @Test // DRILL-7386
+  public void countStructColumn() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT COUNT(str_n0) cnt FROM hive.struct_tbl")
+        .unOrdered()
+        .baselineColumns("cnt")
+        .baselineValues(3L)
+        .go();
+  }
+
+  @Test // DRILL-7386
+  public void typeOfFunctions() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT sqlTypeOf(%1$s) sto, typeOf(%1$s) to, modeOf(%1$s) mo, drillTypeOf(%1$s) dto " +
+            "FROM hive.struct_tbl LIMIT 1", "str_n0")
+        .unOrdered()
+        .baselineColumns("sto", "to", "mo", "dto")
+        .baselineValues("STRUCT", "MAP", "NOT NULL", "MAP")
+        .go();
+  }
 }
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
new file mode 100644
index 0000000..c4e500f
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/CapitalizingJdbcSchema.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import javax.sql.DataSource;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.calcite.adapter.jdbc.JdbcConvention;
+import org.apache.calcite.adapter.jdbc.JdbcSchema;
+import org.apache.calcite.schema.Function;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.calcite.sql.SqlDialect;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class CapitalizingJdbcSchema extends AbstractSchema {
+
+  private static final Logger logger = LoggerFactory.getLogger(CapitalizingJdbcSchema.class);
+
+  private final Map<String, CapitalizingJdbcSchema> schemaMap;
+  private final JdbcSchema inner;
+  private final boolean caseSensitive;
+
+  CapitalizingJdbcSchema(List<String> parentSchemaPath, String name, DataSource dataSource,
+                         SqlDialect dialect, JdbcConvention convention, String catalog, String schema, boolean caseSensitive) {
+    super(parentSchemaPath, name);
+    this.schemaMap = new HashMap<>();
+    this.inner = new JdbcSchema(dataSource, dialect, convention, catalog, schema);
+    this.caseSensitive = caseSensitive;
+  }
+
+  @Override
+  public String getTypeName() {
+    return JdbcStorageConfig.NAME;
+  }
+
+  @Override
+  public Collection<Function> getFunctions(String name) {
+    return inner.getFunctions(name);
+  }
+
+  @Override
+  public Set<String> getFunctionNames() {
+    return inner.getFunctionNames();
+  }
+
+  @Override
+  public CapitalizingJdbcSchema getSubSchema(String name) {
+    return schemaMap.get(name);
+  }
+
+  void setHolder(SchemaPlus plusOfThis) {
+    for (String s : getSubSchemaNames()) {
+      CapitalizingJdbcSchema inner = getSubSchema(s);
+      SchemaPlus holder = plusOfThis.add(s, inner);
+      inner.setHolder(holder);
+    }
+  }
+
+  @Override
+  public Set<String> getSubSchemaNames() {
+    return schemaMap.keySet();
+  }
+
+  @Override
+  public Set<String> getTableNames() {
+    if (isCatalogSchema()) {
+      return Collections.emptySet();
+    }
+    return inner.getTableNames();
+  }
+
+  @Override
+  public Table getTable(String name) {
+    if (isCatalogSchema()) {
+      logger.warn("Failed attempt to find table '{}' in catalog schema '{}'", name, getName());
+      return null;
+    }
+    Table table = inner.getTable(name);
+    if (table == null && !areTableNamesCaseSensitive()) {
+      // Oracle and H2 changes unquoted identifiers to uppercase.
+      table = inner.getTable(name.toUpperCase());
+      if (table == null) {
+        // Postgres changes unquoted identifiers to lowercase.
+        table = inner.getTable(name.toLowerCase());
+      }
+    }
+    return table;
+  }
+
+  @Override
+  public boolean areTableNamesCaseSensitive() {
+    return caseSensitive;
+  }
+
+  @Override
+  public CapitalizingJdbcSchema getDefaultSchema() {
+    return isCatalogSchema()
+        ? schemaMap.values().iterator().next().getDefaultSchema()
+        : this;
+  }
+
+  private boolean isCatalogSchema() {
+    return !schemaMap.isEmpty();
+  }
+
+  void addSubSchema(CapitalizingJdbcSchema subSchema) {
+    schemaMap.put(subSchema.getName(), subSchema);
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java
new file mode 100644
index 0000000..d8ea6bf
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcConvention.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.calcite.adapter.jdbc.JdbcConvention;
+import org.apache.calcite.adapter.jdbc.JdbcRules;
+import org.apache.calcite.adapter.jdbc.JdbcRules.JdbcFilterRule;
+import org.apache.calcite.adapter.jdbc.JdbcRules.JdbcProjectRule;
+import org.apache.calcite.adapter.jdbc.JdbcToEnumerableConverterRule;
+import org.apache.calcite.linq4j.tree.ConstantUntypedNull;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.sql.SqlDialect;
+import org.apache.drill.exec.planner.RuleInstance;
+import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
+
+/**
+ * Convention with set of rules to register for jdbc plugin
+ */
+class DrillJdbcConvention extends JdbcConvention {
+
+  /**
+   * Unwanted Calcite's JdbcRules are filtered out using this set
+   */
+  private static final Set<Class<? extends RelOptRule>> EXCLUDED_CALCITE_RULES = ImmutableSet.of(
+      JdbcToEnumerableConverterRule.class, JdbcFilterRule.class, JdbcProjectRule.class);
+
+  private final ImmutableSet<RelOptRule> rules;
+  private final JdbcStoragePlugin plugin;
+
+  DrillJdbcConvention(SqlDialect dialect, String name, JdbcStoragePlugin plugin) {
+    super(dialect, ConstantUntypedNull.INSTANCE, name);
+    this.plugin = plugin;
+    List<RelOptRule> calciteJdbcRules = JdbcRules.rules(this, DrillRelFactories.LOGICAL_BUILDER).stream()
+        .filter(rule -> !EXCLUDED_CALCITE_RULES.contains(rule.getClass()))
+        .collect(Collectors.toList());
+    this.rules = ImmutableSet.<RelOptRule>builder()
+        .addAll(calciteJdbcRules)
+        .add(JdbcIntermediatePrelConverterRule.INSTANCE)
+        .add(new JdbcDrelConverterRule(this))
+        .add(new DrillJdbcRuleBase.DrillJdbcProjectRule(this))
+        .add(new DrillJdbcRuleBase.DrillJdbcFilterRule(this))
+        .add(RuleInstance.FILTER_SET_OP_TRANSPOSE_RULE)
+        .add(RuleInstance.PROJECT_REMOVE_RULE)
+        .build();
+  }
+
+  @Override
+  public void register(RelOptPlanner planner) {
+    rules.forEach(planner::addRule);
+  }
+
+  Set<RelOptRule> getRules() {
+    return rules;
+  }
+
+  JdbcStoragePlugin getPlugin() {
+    return plugin;
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
index 0238938..ac95867 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/DrillJdbcRuleBase.java
@@ -59,7 +59,7 @@
   static class DrillJdbcProjectRule extends DrillJdbcRuleBase {
 
     public DrillJdbcProjectRule(JdbcConvention out) {
-      super(LogicalProject.class, Convention.NONE, out, "JdbcProjectRule");
+      super(LogicalProject.class, Convention.NONE, out, "DrillJdbcProjectRule");
     }
 
     public RelNode convert(RelNode rel) {
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcCatalogSchema.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcCatalogSchema.java
new file mode 100644
index 0000000..823191c
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcCatalogSchema.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import javax.sql.DataSource;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.calcite.schema.Schema;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.schema.Table;
+import org.apache.calcite.sql.SqlDialect;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.SchemaFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class JdbcCatalogSchema extends AbstractSchema {
+
+  private static final Logger logger = LoggerFactory.getLogger(JdbcCatalogSchema.class);
+
+  /**
+   * Maps name in lowercase to its catalog or schema instance
+   */
+  private final Map<String, CapitalizingJdbcSchema> schemaMap;
+  private final CapitalizingJdbcSchema defaultSchema;
+
+  JdbcCatalogSchema(String name, DataSource source, SqlDialect dialect, DrillJdbcConvention convention, boolean caseSensitive) {
+    super(Collections.emptyList(), name);
+    this.schemaMap = new HashMap<>();
+    String connectionSchemaName = null;
+    try (Connection con = source.getConnection();
+         ResultSet set = con.getMetaData().getCatalogs()) {
+      connectionSchemaName = con.getSchema();
+      while (set.next()) {
+        final String catalogName = set.getString(1);
+        CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(
+            getSchemaPath(), catalogName, source, dialect, convention, catalogName, null, caseSensitive);
+        schemaMap.put(schema.getName(), schema);
+      }
+    } catch (SQLException e) {
+      logger.warn("Failure while attempting to load JDBC schema.", e);
+    }
+
+    // unable to read catalog list.
+    if (schemaMap.isEmpty()) {
+
+      // try to add a list of schemas to the schema map.
+      boolean schemasAdded = addSchemas(source, dialect, convention, caseSensitive);
+
+      if (!schemasAdded) {
+        // there were no schemas, just create a default one (the jdbc system doesn't support catalogs/schemas).
+        schemaMap.put(SchemaFactory.DEFAULT_WS_NAME, new CapitalizingJdbcSchema(Collections.emptyList(), name, source, dialect, convention, null, null, caseSensitive));
+      }
+    } else {
+      // We already have catalogs. Add schemas in this context of their catalogs.
+      addSchemas(source, dialect, convention, caseSensitive);
+    }
+
+    defaultSchema = determineDefaultSchema(connectionSchemaName);
+  }
+
+  private CapitalizingJdbcSchema determineDefaultSchema(String connectionSchemaName) {
+    CapitalizingJdbcSchema connSchema;
+    if (connectionSchemaName == null ||
+        (connSchema = schemaMap.get(connectionSchemaName.toLowerCase())) == null) {
+      connSchema = schemaMap.values().iterator().next();
+    }
+    return connSchema.getDefaultSchema();
+  }
+
+  void setHolder(SchemaPlus plusOfThis) {
+    for (String s : getSubSchemaNames()) {
+      CapitalizingJdbcSchema inner = getSubSchema(s);
+      SchemaPlus holder = plusOfThis.add(s, inner);
+      inner.setHolder(holder);
+    }
+  }
+
+  private boolean addSchemas(DataSource source, SqlDialect dialect, DrillJdbcConvention convention, boolean caseSensitive) {
+    boolean added = false;
+    try (Connection con = source.getConnection();
+         ResultSet set = con.getMetaData().getSchemas()) {
+      while (set.next()) {
+        final String schemaName = set.getString(1);
+        final String catalogName = set.getString(2);
+
+        String parentKey = StringUtils.lowerCase(catalogName);
+        CapitalizingJdbcSchema parentSchema = schemaMap.get(parentKey);
+        if (parentSchema == null) {
+          CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(getSchemaPath(), schemaName, source, dialect,
+              convention, catalogName, schemaName, caseSensitive);
+
+          // if a catalog schema doesn't exist, we'll add this at the top level.
+          schemaMap.put(schema.getName(), schema);
+        } else {
+          CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(parentSchema.getSchemaPath(), schemaName,
+              source, dialect,
+              convention, catalogName, schemaName, caseSensitive);
+          parentSchema.addSubSchema(schema);
+        }
+        added = true;
+      }
+    } catch (SQLException e) {
+      logger.warn("Failure while attempting to load JDBC schema.", e);
+    }
+
+    return added;
+  }
+
+
+  @Override
+  public String getTypeName() {
+    return JdbcStorageConfig.NAME;
+  }
+
+  @Override
+  public Schema getDefaultSchema() {
+    return defaultSchema;
+  }
+
+  @Override
+  public CapitalizingJdbcSchema getSubSchema(String name) {
+    return schemaMap.get(name);
+  }
+
+  @Override
+  public Set<String> getSubSchemaNames() {
+    return schemaMap.keySet();
+  }
+
+  @Override
+  public Table getTable(String name) {
+    if (defaultSchema != null) {
+      try {
+        return defaultSchema.getTable(name);
+      } catch (RuntimeException e) {
+        logger.warn("Failure while attempting to read table '{}' from JDBC source.", name, e);
+      }
+    }
+
+    // no table was found.
+    return null;
+  }
+
+  @Override
+  public Set<String> getTableNames() {
+    return defaultSchema.getTableNames();
+  }
+
+  @Override
+  public boolean areTableNamesCaseSensitive() {
+    return defaultSchema.areTableNamesCaseSensitive();
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
new file mode 100644
index 0000000..7107e9f
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcDrelConverterRule.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import java.util.function.Predicate;
+
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.drill.exec.planner.logical.DrillRel;
+import org.apache.drill.exec.planner.logical.DrillRelFactories;
+
+class JdbcDrelConverterRule extends ConverterRule {
+
+  JdbcDrelConverterRule(DrillJdbcConvention in) {
+    super(RelNode.class, (Predicate<RelNode>) input -> true, in, DrillRel.DRILL_LOGICAL,
+        DrillRelFactories.LOGICAL_BUILDER, "JDBC_DREL_Converter" + in.getName());
+  }
+
+  @Override
+  public RelNode convert(RelNode in) {
+    return new JdbcDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL),
+        convert(in, in.getTraitSet().replace(this.getInTrait()).simplify()));
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java
new file mode 100644
index 0000000..1fc58b4
--- /dev/null
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcIntermediatePrelConverterRule.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.jdbc;
+
+import java.util.function.Predicate;
+
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterRule;
+import org.apache.drill.exec.planner.logical.DrillRel;
+import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.exec.planner.physical.Prel;
+
+final class JdbcIntermediatePrelConverterRule extends ConverterRule {
+
+  static final JdbcIntermediatePrelConverterRule INSTANCE = new JdbcIntermediatePrelConverterRule();
+
+  private JdbcIntermediatePrelConverterRule() {
+    super(JdbcDrel.class, (Predicate<RelNode>) input -> true, DrillRel.DRILL_LOGICAL,
+        Prel.DRILL_PHYSICAL, DrillRelFactories.LOGICAL_BUILDER, "JDBC_PREL_Converter");
+  }
+
+  @Override
+  public RelNode convert(RelNode in) {
+    return new JdbcIntermediatePrel(
+        in.getCluster(),
+        in.getTraitSet().replace(getOutTrait()),
+        in.getInput(0));
+  }
+}
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
index 85f88a8..72120e6 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcPrel.java
@@ -40,7 +40,6 @@
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.planner.physical.visitor.PrelVisitor;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
-import org.apache.drill.exec.store.jdbc.JdbcStoragePlugin.DrillJdbcConvention;
 
 /**
  * Represents a JDBC Plan once the children nodes have been rewritten into SQL.
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordReader.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordReader.java
index 5c6def2..370ae1f 100755
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordReader.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcRecordReader.java
@@ -21,10 +21,10 @@
 import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.Date;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
-import java.sql.Statement;
 import java.sql.Time;
 import java.sql.Timestamp;
 import java.util.Calendar;
@@ -72,7 +72,7 @@
   private ResultSet resultSet;
   private final String storagePluginName;
   private Connection connection;
-  private Statement statement;
+  private PreparedStatement statement;
   private final String sql;
   private ImmutableList<ValueVector> vectors;
   private ImmutableList<Copier<?>> copiers;
@@ -186,8 +186,8 @@
   public void setup(OperatorContext operatorContext, OutputMutator output) {
     try {
       connection = source.getConnection();
-      statement = connection.createStatement();
-      resultSet = statement.executeQuery(sql);
+      statement = connection.prepareStatement(sql);
+      resultSet = statement.executeQuery();
 
       ResultSetMetaData meta = resultSet.getMetaData();
       int columnsCount = meta.getColumnCount();
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStorageConfig.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStorageConfig.java
index 1c607d6..571490c 100755
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStorageConfig.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStorageConfig.java
@@ -17,6 +17,8 @@
  */
 package org.apache.drill.exec.store.jdbc;
 
+import java.util.Objects;
+
 import com.fasterxml.jackson.annotation.JsonFilter;
 import org.apache.drill.common.logical.StoragePluginConfig;
 
@@ -88,45 +90,14 @@
   public boolean equals(Object obj) {
     if (this == obj) {
       return true;
-    }
-    if (obj == null) {
+    } else if (obj == null || getClass() != obj.getClass()) {
       return false;
     }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    JdbcStorageConfig other = (JdbcStorageConfig) obj;
-    if (caseInsensitiveTableNames != other.caseInsensitiveTableNames) {
-      return false;
-    }
-    if (driver == null) {
-      if (other.driver != null) {
-        return false;
-      }
-    } else if (!driver.equals(other.driver)) {
-      return false;
-    }
-    if (password == null) {
-      if (other.password != null) {
-        return false;
-      }
-    } else if (!password.equals(other.password)) {
-      return false;
-    }
-    if (url == null) {
-      if (other.url != null) {
-        return false;
-      }
-    } else if (!url.equals(other.url)) {
-      return false;
-    }
-    if (username == null) {
-      if (other.username != null) {
-        return false;
-      }
-    } else if (!username.equals(other.username)) {
-      return false;
-    }
-    return true;
+    JdbcStorageConfig conf = (JdbcStorageConfig) obj;
+    return caseInsensitiveTableNames == conf.caseInsensitiveTableNames
+        && Objects.equals(driver, conf.driver)
+        && Objects.equals(password, conf.password)
+        && Objects.equals(url, conf.url)
+        && Objects.equals(username, conf.username);
   }
 }
diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
index ebff371..74a0507 100755
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
@@ -17,72 +17,27 @@
  */
 package org.apache.drill.exec.store.jdbc;
 
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.function.Predicate;
-
 import javax.sql.DataSource;
+import java.util.Set;
 
-import org.apache.calcite.adapter.jdbc.JdbcConvention;
-import org.apache.calcite.adapter.jdbc.JdbcRules;
 import org.apache.calcite.adapter.jdbc.JdbcSchema;
-import org.apache.calcite.linq4j.tree.ConstantUntypedNull;
-import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.convert.ConverterRule;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.schema.Function;
-import org.apache.calcite.schema.Schema;
 import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.schema.Table;
 import org.apache.calcite.sql.SqlDialect;
 import org.apache.calcite.sql.SqlDialectFactoryImpl;
 import org.apache.commons.dbcp2.BasicDataSource;
-import org.apache.drill.common.JSONOptions;
-import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.exec.ops.OptimizerRulesContext;
-import org.apache.drill.exec.physical.base.AbstractGroupScan;
-import org.apache.drill.exec.planner.RuleInstance;
-import org.apache.drill.exec.planner.logical.DrillRel;
-import org.apache.drill.exec.planner.logical.DrillRelFactories;
-import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.AbstractStoragePlugin;
 import org.apache.drill.exec.store.SchemaConfig;
-import org.apache.drill.exec.store.SchemaFactory;
-import org.apache.drill.exec.store.jdbc.DrillJdbcRuleBase.DrillJdbcFilterRule;
-import org.apache.drill.exec.store.jdbc.DrillJdbcRuleBase.DrillJdbcProjectRule;
-
-import org.apache.drill.shaded.guava.com.google.common.base.Joiner;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableSet;
 
 public class JdbcStoragePlugin extends AbstractStoragePlugin {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(JdbcStoragePlugin.class);
-
-  // Rules from Calcite's JdbcRules class that we want to avoid using.
-  private static String[] RULES_TO_AVOID = {
-      "JdbcToEnumerableConverterRule", "JdbcFilterRule", "JdbcProjectRule"
-  };
-
 
   private final JdbcStorageConfig config;
   private final DataSource source;
   private final SqlDialect dialect;
   private final DrillJdbcConvention convention;
 
-
   public JdbcStoragePlugin(JdbcStorageConfig config, DrillbitContext context, String name) {
     super(context, name);
     this.config = config;
@@ -100,352 +55,18 @@
 
     this.source = source;
     this.dialect = JdbcSchema.createDialect(SqlDialectFactoryImpl.INSTANCE, source);
-    this.convention = new DrillJdbcConvention(dialect, name);
+    this.convention = new DrillJdbcConvention(dialect, name, this);
   }
 
 
-  class DrillJdbcConvention extends JdbcConvention {
-
-    private final ImmutableSet<RelOptRule> rules;
-
-    public DrillJdbcConvention(SqlDialect dialect, String name) {
-      super(dialect, ConstantUntypedNull.INSTANCE, name);
-
-
-      // build rules for this convention.
-      ImmutableSet.Builder<RelOptRule> builder = ImmutableSet.builder();
-
-      builder.add(JDBC_PRULE_INSTANCE);
-      builder.add(new JdbcDrelConverterRule(this));
-      builder.add(new DrillJdbcProjectRule(this));
-      builder.add(new DrillJdbcFilterRule(this));
-
-      outside: for (RelOptRule rule : JdbcRules.rules(this)) {
-        final String description = rule.toString();
-
-        // we want to black list some rules but the parent Calcite package is all or none.
-        // Therefore, we remove rules with names we don't like.
-        for(String black : RULES_TO_AVOID){
-          if(description.equals(black)){
-            continue outside;
-          }
-
-        }
-
-        builder.add(rule);
-      }
-
-      builder.add(RuleInstance.FILTER_SET_OP_TRANSPOSE_RULE);
-      builder.add(RuleInstance.PROJECT_REMOVE_RULE);
-
-      rules = builder.build();
-    }
-
-    @Override
-    public void register(RelOptPlanner planner) {
-      for (RelOptRule rule : rules) {
-        planner.addRule(rule);
-      }
-    }
-
-    public Set<RelOptRule> getRules() {
-      return rules;
-    }
-
-    public JdbcStoragePlugin getPlugin() {
-      return JdbcStoragePlugin.this;
-    }
-  }
-
-  /**
-   * Returns whether a condition is supported by {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcJoin}.
-   *
-   * <p>Corresponds to the capabilities of
-   * {@link org.apache.calcite.rel.rel2sql.SqlImplementor#convertConditionToSqlNode}.
-   *
-   * @param node Condition
-   * @return Whether condition is supported
-   */
-  private static boolean canJoinOnCondition(RexNode node) {
-    final List<RexNode> operands;
-    switch (node.getKind()) {
-    case AND:
-    case OR:
-      operands = ((RexCall) node).getOperands();
-      for (RexNode operand : operands) {
-        if (!canJoinOnCondition(operand)) {
-          return false;
-        }
-      }
-      return true;
-
-    case EQUALS:
-    case IS_NOT_DISTINCT_FROM:
-    case NOT_EQUALS:
-    case GREATER_THAN:
-    case GREATER_THAN_OR_EQUAL:
-    case LESS_THAN:
-    case LESS_THAN_OR_EQUAL:
-      operands = ((RexCall) node).getOperands();
-      if ((operands.get(0) instanceof RexInputRef)
-          && (operands.get(1) instanceof RexInputRef)) {
-        return true;
-      }
-      // fall through
-
-    default:
-      return false;
-    }
-  }
-
-  private static final JdbcPrule JDBC_PRULE_INSTANCE = new JdbcPrule();
-
-  private static class JdbcPrule extends ConverterRule {
-
-    private JdbcPrule() {
-      super(JdbcDrel.class, (Predicate<RelNode>) input -> true, DrillRel.DRILL_LOGICAL,
-          Prel.DRILL_PHYSICAL, DrillRelFactories.LOGICAL_BUILDER, "JDBC_PREL_Converter");
-    }
-
-    @Override
-    public RelNode convert(RelNode in) {
-
-      return new JdbcIntermediatePrel(
-          in.getCluster(),
-          in.getTraitSet().replace(getOutTrait()),
-          in.getInput(0));
-    }
-
-  }
-
-  private class JdbcDrelConverterRule extends ConverterRule {
-
-    public JdbcDrelConverterRule(DrillJdbcConvention in) {
-      super(RelNode.class, (Predicate<RelNode>) input -> true, in, DrillRel.DRILL_LOGICAL,
-          DrillRelFactories.LOGICAL_BUILDER, "JDBC_DREL_Converter" + in.getName());
-    }
-
-    @Override
-    public RelNode convert(RelNode in) {
-      return new JdbcDrel(in.getCluster(), in.getTraitSet().replace(DrillRel.DRILL_LOGICAL),
-          convert(in, in.getTraitSet().replace(this.getInTrait()).simplify()));
-    }
-
-  }
-
-  private class CapitalizingJdbcSchema extends AbstractSchema {
-
-    private final Map<String, CapitalizingJdbcSchema> schemaMap = new HashMap<>();
-    private final JdbcSchema inner;
-
-    public CapitalizingJdbcSchema(List<String> parentSchemaPath, String name, DataSource dataSource,
-        SqlDialect dialect, JdbcConvention convention, String catalog, String schema) {
-      super(parentSchemaPath, name);
-      inner = new JdbcSchema(dataSource, dialect, convention, catalog, schema);
-    }
-
-    @Override
-    public String getTypeName() {
-      return JdbcStorageConfig.NAME;
-    }
-
-    @Override
-    public Collection<Function> getFunctions(String name) {
-      return inner.getFunctions(name);
-    }
-
-    @Override
-    public Set<String> getFunctionNames() {
-      return inner.getFunctionNames();
-    }
-
-    @Override
-    public CapitalizingJdbcSchema getSubSchema(String name) {
-      return schemaMap.get(name);
-    }
-
-    void setHolder(SchemaPlus plusOfThis) {
-      for (String s : getSubSchemaNames()) {
-        CapitalizingJdbcSchema inner = getSubSchema(s);
-        SchemaPlus holder = plusOfThis.add(s, inner);
-        inner.setHolder(holder);
-      }
-    }
-
-    @Override
-    public Set<String> getSubSchemaNames() {
-      return schemaMap.keySet();
-    }
-
-    @Override
-    public Set<String> getTableNames() {
-      return inner.getTableNames();
-    }
-
-    public String toString() {
-      return Joiner.on(".").join(getSchemaPath());
-    }
-
-    @Override
-    public Table getTable(String name) {
-      Table table = inner.getTable(name);
-      if (table != null) {
-        return table;
-      }
-      if (!areTableNamesCaseSensitive()) {
-        // Oracle and H2 changes unquoted identifiers to uppercase.
-        table = inner.getTable(name.toUpperCase());
-        if (table != null) {
-          return table;
-        }
-        // Postgres changes unquoted identifiers to lowercase.
-        return inner.getTable(name.toLowerCase());
-      }
-
-      // no table was found.
-      return null;
-    }
-
-    @Override
-    public boolean areTableNamesCaseSensitive() {
-      return !config.areTableNamesCaseInsensitive();
-    }
-  }
-
-  private class JdbcCatalogSchema extends AbstractSchema {
-
-    private final Map<String, CapitalizingJdbcSchema> schemaMap = new HashMap<>();
-    private final CapitalizingJdbcSchema defaultSchema;
-
-    public JdbcCatalogSchema(String name) {
-      super(Collections.emptyList(), name);
-
-      try (Connection con = source.getConnection();
-           ResultSet set = con.getMetaData().getCatalogs()) {
-        while (set.next()) {
-          final String catalogName = set.getString(1);
-          CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(
-              getSchemaPath(), catalogName, source, dialect, convention, catalogName, null);
-          schemaMap.put(schema.getName(), schema);
-        }
-      } catch (SQLException e) {
-        logger.warn("Failure while attempting to load JDBC schema.", e);
-      }
-
-      // unable to read catalog list.
-      if (schemaMap.isEmpty()) {
-
-        // try to add a list of schemas to the schema map.
-        boolean schemasAdded = addSchemas();
-
-        if (!schemasAdded) {
-          // there were no schemas, just create a default one (the jdbc system doesn't support catalogs/schemas).
-          schemaMap.put(SchemaFactory.DEFAULT_WS_NAME, new CapitalizingJdbcSchema(Collections.emptyList(), name, source, dialect,
-              convention, null, null));
-        }
-      } else {
-        // We already have catalogs. Add schemas in this context of their catalogs.
-        addSchemas();
-      }
-
-      defaultSchema = schemaMap.values().iterator().next();
-    }
-
-    void setHolder(SchemaPlus plusOfThis) {
-      for (String s : getSubSchemaNames()) {
-        CapitalizingJdbcSchema inner = getSubSchema(s);
-        SchemaPlus holder = plusOfThis.add(s, inner);
-        inner.setHolder(holder);
-      }
-    }
-
-    private boolean addSchemas() {
-      boolean added = false;
-      try (Connection con = source.getConnection();
-           ResultSet set = con.getMetaData().getSchemas()) {
-        while (set.next()) {
-          final String schemaName = set.getString(1);
-          final String catalogName = set.getString(2);
-
-          CapitalizingJdbcSchema parentSchema = schemaMap.get(catalogName);
-          if (parentSchema == null) {
-            CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(getSchemaPath(), schemaName, source, dialect,
-                convention, catalogName, schemaName);
-
-            // if a catalog schema doesn't exist, we'll add this at the top level.
-            schemaMap.put(schema.getName(), schema);
-          } else {
-            CapitalizingJdbcSchema schema = new CapitalizingJdbcSchema(parentSchema.getSchemaPath(), schemaName,
-                source, dialect,
-                convention, catalogName, schemaName);
-            parentSchema.schemaMap.put(schemaName, schema);
-
-          }
-          added = true;
-        }
-      } catch (SQLException e) {
-        logger.warn("Failure while attempting to load JDBC schema.", e);
-      }
-
-      return added;
-    }
-
-
-    @Override
-    public String getTypeName() {
-      return JdbcStorageConfig.NAME;
-    }
-
-    @Override
-    public Schema getDefaultSchema() {
-      return defaultSchema;
-    }
-
-    @Override
-    public CapitalizingJdbcSchema getSubSchema(String name) {
-      return schemaMap.get(name);
-    }
-
-    @Override
-    public Set<String> getSubSchemaNames() {
-      return schemaMap.keySet();
-    }
-
-    @Override
-    public Table getTable(String name) {
-      Schema schema = getDefaultSchema();
-
-      if (schema != null) {
-        try {
-          return schema.getTable(name);
-        } catch (RuntimeException e) {
-          logger.warn("Failure while attempting to read table '{}' from JDBC source.", name, e);
-        }
-      }
-
-      // no table was found.
-      return null;
-    }
-
-    @Override
-    public Set<String> getTableNames() {
-      return defaultSchema.getTableNames();
-    }
-
-    @Override
-    public boolean areTableNamesCaseSensitive() {
-      return defaultSchema.areTableNamesCaseSensitive();
-    }
-  }
-
   @Override
   public void registerSchemas(SchemaConfig config, SchemaPlus parent) {
-    JdbcCatalogSchema schema = new JdbcCatalogSchema(getName());
+    JdbcCatalogSchema schema = new JdbcCatalogSchema(getName(), source, dialect, convention,
+        !this.config.areTableNamesCaseInsensitive());
     SchemaPlus holder = parent.add(getName(), schema);
     schema.setHolder(holder);
   }
 
-
   @Override
   public JdbcStorageConfig getConfig() {
     return config;
@@ -465,12 +86,6 @@
   }
 
   @Override
-  public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns)
-      throws IOException {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
   public Set<RelOptRule> getPhysicalOptimizerRules(OptimizerRulesContext context) {
     return convention.getRules();
   }
diff --git a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
index dacf028..d1bbc09 100644
--- a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
+++ b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithH2IT.java
@@ -21,13 +21,10 @@
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.StoragePluginRegistryImpl;
 import org.apache.drill.exec.util.StoragePluginTestUtils;
 import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterTest;
 import org.h2.tools.RunScript;
-import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -42,6 +39,7 @@
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.core.IsNot.not;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 
 /**
@@ -54,48 +52,28 @@
   private static final String TABLE_NAME = String.format("%s.`%s`", StoragePluginTestUtils.DFS_PLUGIN_NAME, TABLE_PATH);
 
   @BeforeClass
-  public static void initH2() throws Exception {
-    Class.forName("org.h2.Driver");
-    String connString = String.format(
-        "jdbc:h2:%s", dirTestWatcher.getTmpDir().getCanonicalPath());
-
-    try (Connection connection = DriverManager.getConnection(connString, "root", "root")) {
-      URL scriptFile = TestJdbcPluginWithH2IT.class.getClassLoader().getResource("h2-test-data.sql");
-      Assert.assertNotNull("Script for test tables generation 'h2-test-data.sql' " +
-          "cannot be found in test resources", scriptFile);
-      try (FileReader fileReader = new FileReader(scriptFile.getFile())) {
-        RunScript.execute(connection, fileReader);
-      }
-    }
-
+  public static void init() throws Exception {
     startCluster(ClusterFixture.builder(dirTestWatcher));
-
-    JdbcStorageConfig jdbcStorageConfig = new JdbcStorageConfig(
-        "org.h2.Driver",
-        connString,
-        "root",
-        "root",
-        true);
-    jdbcStorageConfig.setEnabled(true);
-
-    String pluginName = "h2";
-    DrillbitContext context = cluster.drillbit().getContext();
-    JdbcStoragePlugin jdbcStoragePlugin = new JdbcStoragePlugin(jdbcStorageConfig,
-        context, pluginName);
-    StoragePluginRegistryImpl pluginRegistry = (StoragePluginRegistryImpl) context.getStorage();
-    pluginRegistry.addPluginToPersistentStoreIfAbsent(pluginName, jdbcStorageConfig, jdbcStoragePlugin);
-  }
-
-  @BeforeClass
-  public static void copyData() {
     dirTestWatcher.copyResourceToRoot(Paths.get(TABLE_PATH));
+    Class.forName("org.h2.Driver");
+    String connString = "jdbc:h2:" + dirTestWatcher.getTmpDir().getCanonicalPath();
+    URL scriptFile = TestJdbcPluginWithH2IT.class.getClassLoader().getResource("h2-test-data.sql");
+    assertNotNull("Script for test tables generation 'h2-test-data.sql' cannot be found in test resources", scriptFile);
+    try (Connection connection = DriverManager.getConnection(connString, "root", "root");
+         FileReader fileReader = new FileReader(scriptFile.getFile())) {
+      RunScript.execute(connection, fileReader);
+    }
+    JdbcStorageConfig jdbcStorageConfig = new JdbcStorageConfig("org.h2.Driver", connString, "root", "root", true);
+    jdbcStorageConfig.setEnabled(true);
+    cluster.defineStoragePlugin(ctx -> new JdbcStoragePlugin(jdbcStorageConfig, ctx, "h2"));
+    cluster.defineStoragePlugin(ctx -> new JdbcStoragePlugin(jdbcStorageConfig, ctx, "h2o"));
   }
 
   @Test
   public void testCrossSourceMultiFragmentJoin() throws Exception {
     try {
       client.alterSession(ExecConstants.SLICE_TARGET, 1);
-      run("select x.person_id, y.salary from h2.drill_h2_test.person x "
+      run("select x.person_id, y.salary from h2.tmp.drill_h2_test.person x "
               + "join %s y on x.person_id = y.person_id ", TABLE_NAME);
     } finally {
       client.resetSession(ExecConstants.SLICE_TARGET);
@@ -109,7 +87,7 @@
         .sqlQuery(
             "select person_id, first_name, last_name, address, city, state, zip, json, bigint_field, smallint_field, " +
                 "numeric_field, boolean_field, double_field, float_field, real_field, time_field, timestamp_field, " +
-                "date_field, clob_field from h2.`drill_h2_test`.person")
+                "date_field, clob_field from h2.tmp.`drill_h2_test`.person")
         .ordered()
         .baselineColumns("person_id", "first_name", "last_name", "address", "city", "state", "zip", "json",
             "bigint_field", "smallint_field", "numeric_field", "boolean_field", "double_field", "float_field",
@@ -135,9 +113,8 @@
 
   @Test
   public void pushdownJoin() throws Exception {
-    run("use h2");
-    String query = "select x.person_id from (select person_id from h2.drill_h2_test.person) x "
-            + "join (select person_id from h2.drill_h2_test.person) y on x.person_id = y.person_id ";
+    String query = "select x.person_id from (select person_id from h2.tmp.drill_h2_test.person) x "
+            + "join (select person_id from h2.tmp.drill_h2_test.person) y on x.person_id = y.person_id ";
 
     String plan = queryBuilder().sql(query).explainText();
 
@@ -148,9 +125,9 @@
   @Test
   public void pushdownJoinAndFilterPushDown() throws Exception {
     String query = "select * from \n" +
-        "h2.drill_h2_test.person e\n" +
+        "h2.tmp.drill_h2_test.person e\n" +
         "INNER JOIN \n" +
-        "h2.drill_h2_test.person s\n" +
+        "h2.tmp.drill_h2_test.person s\n" +
         "ON e.FIRST_NAME = s.FIRST_NAME\n" +
         "WHERE e.LAST_NAME > 'hello'";
 
@@ -164,7 +141,7 @@
 
   @Test
   public void pushdownAggregation() throws Exception {
-    String query = "select count(*) from h2.drill_h2_test.person";
+    String query = "select count(*) from h2.tmp.drill_h2_test.person";
     String plan = queryBuilder().sql(query).explainText();
 
     assertThat("Query plan shouldn't contain Aggregate operator",
@@ -174,12 +151,12 @@
   @Test
   public void pushdownDoubleJoinAndFilter() throws Exception {
     String query = "select * from \n" +
-        "h2.drill_h2_test.person e\n" +
+        "h2.tmp.drill_h2_test.person e\n" +
         "INNER JOIN \n" +
-        "h2.drill_h2_test.person s\n" +
+        "h2.tmp.drill_h2_test.person s\n" +
         "ON e.person_ID = s.person_ID\n" +
         "INNER JOIN \n" +
-        "h2.drill_h2_test.person ed\n" +
+        "h2.tmp.drill_h2_test.person ed\n" +
         "ON e.person_ID = ed.person_ID\n" +
         "WHERE s.first_name > 'abc' and ed.first_name > 'efg'";
 
@@ -191,9 +168,23 @@
         plan, not(containsString("Filter")));
   }
 
+  @Test // DRILL-7340
+  public void twoPluginsPredicatesPushdown() throws Exception {
+    String query = "SELECT * " +
+        "FROM h2.tmp.drill_h2_test.person l " +
+        "INNER JOIN h2o.tmp.drill_h2_test.person r " +
+        "ON l.person_id = r.person_id " +
+        "WHERE l.first_name = 'first_name_1' AND r.last_name = 'last_name_1'";
+    queryBuilder()
+        .sql(query)
+        .planMatcher()
+        .exclude("Filter")
+        .match();
+  }
+
   @Test
   public void showTablesDefaultSchema() throws Exception {
-    run("use h2.drill_h2_test");
+    run("use h2.tmp.drill_h2_test");
     assertEquals(1, queryBuilder().sql("show tables like 'PERSON'").run().recordCount());
 
     // check table names case insensitivity
@@ -202,7 +193,7 @@
 
   @Test
   public void describe() throws Exception {
-    run("use h2.drill_h2_test");
+    run("use h2.tmp.drill_h2_test");
     assertEquals(19, queryBuilder().sql("describe PERSON").run().recordCount());
 
     // check table names case insensitivity
@@ -213,12 +204,12 @@
   public void ensureDrillFunctionsAreNotPushedDown() throws Exception {
     // This should verify that we're not trying to push CONVERT_FROM into the JDBC storage plugin. If were pushing
     // this function down, the SQL query would fail.
-    run("select CONVERT_FROM(JSON, 'JSON') from h2.drill_h2_test.person where person_ID = 4");
+    run("select CONVERT_FROM(JSON, 'JSON') from h2.tmp.drill_h2_test.person where person_ID = 4");
   }
 
   @Test
   public void pushdownFilter() throws Exception {
-    String query = "select * from h2.drill_h2_test.person where person_ID = 1";
+    String query = "select * from h2.tmp.drill_h2_test.person where person_ID = 1";
     String plan = queryBuilder().sql(query).explainText();
 
     assertThat("Query plan shouldn't contain Filter operator",
@@ -227,19 +218,45 @@
 
   @Test
   public void testCaseInsensitiveTableNames() throws Exception {
-    assertEquals(5, queryBuilder().sql("select * from h2.drill_h2_test.PeRsOn").run().recordCount());
-    assertEquals(5, queryBuilder().sql("select * from h2.drill_h2_test.PERSON").run().recordCount());
-    assertEquals(5, queryBuilder().sql("select * from h2.drill_h2_test.person").run().recordCount());
+    assertEquals(5, queryBuilder().sql("select * from h2.tmp.drill_h2_test.PeRsOn").run().recordCount());
+    assertEquals(5, queryBuilder().sql("select * from h2.tmp.drill_h2_test.PERSON").run().recordCount());
+    assertEquals(5, queryBuilder().sql("select * from h2.tmp.drill_h2_test.person").run().recordCount());
   }
 
   @Test
   public void testJdbcStoragePluginSerDe() throws Exception {
-    String query = "select * from h2.drill_h2_test.PeRsOn";
+    String query = "select * from h2.tmp.drill_h2_test.PeRsOn";
 
     String plan = queryBuilder().sql(query).explainJson();
     assertEquals(5, queryBuilder().physical(plan).run().recordCount());
   }
 
+  @Test // DRILL-7415
+  public void showTablesForPluginDefaultSchema() throws Exception {
+    run("USE h2");
+    String sql = "SHOW TABLES";
+    testBuilder()
+        .sqlQuery(sql)
+        .unOrdered()
+        .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
+        .baselineValues("h2.tmp.drill_h2_test_1", "PERSON")
+        .go();
+  }
+
+  @Test // DRILL-7415
+  public void showTablesForInformationSchema() throws Exception {
+    run("USE h2.tmp.`INFORMATION_SCHEMA`");
+    String sql = "SHOW TABLES";
+    queryBuilder().sql(sql).printCsv();
+    testBuilder()
+        .sqlQuery(sql)
+        .unOrdered()
+        .expectsNumRecords(33)
+        .csvBaselineFile("h2_information_schema_tables.csv")
+        .baselineColumns("TABLE_SCHEMA", "TABLE_NAME")
+        .go();
+  }
+
   @Test
   public void testJdbcTableTypes() throws Exception {
     String query = "select distinct table_type from information_schema.`tables`";
diff --git a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
index 6253d11..03ba794 100644
--- a/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
+++ b/contrib/storage-jdbc/src/test/java/org/apache/drill/exec/store/jdbc/TestJdbcPluginWithMySQLIT.java
@@ -24,7 +24,6 @@
 import com.wix.mysql.distribution.Version;
 import org.apache.drill.categories.JdbcStorageTest;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
-import org.apache.drill.exec.store.StoragePluginRegistryImpl;
 import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterTest;
 import org.apache.drill.test.QueryTestUtil;
@@ -50,7 +49,7 @@
 
   @BeforeClass
   public static void initMysql() throws Exception {
-    String mysqlPluginName = "mysql";
+    startCluster(ClusterFixture.builder(dirTestWatcher));
     String mysqlDBName = "drill_mysql_test";
     int mysqlPort = QueryTestUtil.getFreePortNumber(2215, 300);
 
@@ -68,40 +67,22 @@
       schemaConfig.withScripts(ScriptResolver.classPathScript("mysql-test-data-linux.sql"));
     }
 
-    mysqld = EmbeddedMysql.anEmbeddedMysql(config)
-        .addSchema(schemaConfig.build())
-        .start();
+    mysqld = EmbeddedMysql.anEmbeddedMysql(config).addSchema(schemaConfig.build()).start();
 
-    startCluster(ClusterFixture.builder(dirTestWatcher));
-
-    StoragePluginRegistryImpl pluginRegistry = (StoragePluginRegistryImpl) cluster.drillbit().getContext().getStorage();
-
-    JdbcStorageConfig jdbcStorageConfig = new JdbcStorageConfig(
-        "com.mysql.cj.jdbc.Driver",
+    JdbcStorageConfig jdbcStorageConfig = new JdbcStorageConfig("com.mysql.cj.jdbc.Driver",
         String.format("jdbc:mysql://localhost:%s/%s?useJDBCCompliantTimezoneShift=true", mysqlPort, mysqlDBName),
-        "mysqlUser",
-        "mysqlPass",
-        false);
+        "mysqlUser", "mysqlPass", false);
     jdbcStorageConfig.setEnabled(true);
 
-    JdbcStoragePlugin jdbcStoragePlugin = new JdbcStoragePlugin(jdbcStorageConfig,
-        cluster.drillbit().getContext(), mysqlPluginName);
-    pluginRegistry.addPluginToPersistentStoreIfAbsent(mysqlPluginName, jdbcStorageConfig, jdbcStoragePlugin);
+    cluster.defineStoragePlugin(ctx -> new JdbcStoragePlugin(jdbcStorageConfig, ctx, "mysql"));
 
     if (osName.startsWith("linux")) {
       // adds storage plugin with case insensitive table names
-      String mysqlCaseSensitivePluginName = "mysqlCaseInsensitive";
-      JdbcStorageConfig jdbcCaseSensitiveStorageConfig = new JdbcStorageConfig(
-          "com.mysql.cj.jdbc.Driver",
+      JdbcStorageConfig jdbcCaseSensitiveStorageConfig = new JdbcStorageConfig("com.mysql.cj.jdbc.Driver",
           String.format("jdbc:mysql://localhost:%s/%s?useJDBCCompliantTimezoneShift=true", mysqlPort, mysqlDBName),
-          "mysqlUser",
-          "mysqlPass",
-          true);
+          "mysqlUser", "mysqlPass", true);
       jdbcCaseSensitiveStorageConfig.setEnabled(true);
-
-      JdbcStoragePlugin jdbcCaseSensitiveStoragePlugin = new JdbcStoragePlugin(jdbcCaseSensitiveStorageConfig,
-          cluster.drillbit().getContext(), mysqlCaseSensitivePluginName);
-      pluginRegistry.addPluginToPersistentStoreIfAbsent(mysqlCaseSensitivePluginName, jdbcCaseSensitiveStorageConfig, jdbcCaseSensitiveStoragePlugin);
+      cluster.defineStoragePlugin(ctx -> new JdbcStoragePlugin(jdbcCaseSensitiveStorageConfig, ctx, "mysqlCaseInsensitive"));
     }
   }
 
diff --git a/contrib/storage-jdbc/src/test/resources/h2-test-data.sql b/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
index a2643fb..6f3ecca 100644
--- a/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
+++ b/contrib/storage-jdbc/src/test/resources/h2-test-data.sql
@@ -54,4 +54,9 @@
           '{ z : { a : 1, b : 2, c : 3 } }', -67, 4, 40.04, false, 4.0, 4.1, 444.00, '16:00:01',
           '2015-06-01 16:00:01', '2015-06-01', 'xxx');
 
-insert into person (person_id) values (5);
\ No newline at end of file
+insert into person (person_id) values (5);
+
+create SCHEMA drill_h2_test_1;
+set schema drill_h2_test_1;
+create table person(person_id INT NOT NULL PRIMARY KEY);
+set schema drill_h2_test;
\ No newline at end of file
diff --git a/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv b/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
new file mode 100644
index 0000000..971ee0f
--- /dev/null
+++ b/contrib/storage-jdbc/src/test/resources/h2_information_schema_tables.csv
@@ -0,0 +1,33 @@
+h2.tmp.information_schema,CATALOGS
+h2.tmp.information_schema,COLLATIONS
+h2.tmp.information_schema,COLUMNS
+h2.tmp.information_schema,COLUMN_PRIVILEGES
+h2.tmp.information_schema,CONSTANTS
+h2.tmp.information_schema,CONSTRAINTS
+h2.tmp.information_schema,CROSS_REFERENCES
+h2.tmp.information_schema,DOMAINS
+h2.tmp.information_schema,FUNCTION_ALIASES
+h2.tmp.information_schema,FUNCTION_COLUMNS
+h2.tmp.information_schema,HELP
+h2.tmp.information_schema,INDEXES
+h2.tmp.information_schema,IN_DOUBT
+h2.tmp.information_schema,KEY_COLUMN_USAGE
+h2.tmp.information_schema,LOCKS
+h2.tmp.information_schema,QUERY_STATISTICS
+h2.tmp.information_schema,REFERENTIAL_CONSTRAINTS
+h2.tmp.information_schema,RIGHTS
+h2.tmp.information_schema,ROLES
+h2.tmp.information_schema,SCHEMATA
+h2.tmp.information_schema,SEQUENCES
+h2.tmp.information_schema,SESSIONS
+h2.tmp.information_schema,SESSION_STATE
+h2.tmp.information_schema,SETTINGS
+h2.tmp.information_schema,SYNONYMS
+h2.tmp.information_schema,TABLES
+h2.tmp.information_schema,TABLE_CONSTRAINTS
+h2.tmp.information_schema,TABLE_PRIVILEGES
+h2.tmp.information_schema,TABLE_TYPES
+h2.tmp.information_schema,TRIGGERS
+h2.tmp.information_schema,TYPE_INFO
+h2.tmp.information_schema,USERS
+h2.tmp.information_schema,VIEWS
\ No newline at end of file
diff --git a/exec/java-exec/src/main/codegen/data/Parser.tdd b/exec/java-exec/src/main/codegen/data/Parser.tdd
index 3480754..3a4412c 100644
--- a/exec/java-exec/src/main/codegen/data/Parser.tdd
+++ b/exec/java-exec/src/main/codegen/data/Parser.tdd
@@ -75,6 +75,14 @@
   dataTypeParserMethods: [
   ]
 
+  # Binary operators tokens
+  binaryOperatorsTokens: [
+  ]
+
+  # Binary operators initialization
+  extraBinaryExpressions: [
+  ]
+
   # List of files in @includes directory that have parser method
   # implementations for custom SQL statements, literals or types
   # given as part of "statementParserMethods", "literalParserMethods" or
@@ -105,6 +113,7 @@
   # For details please see comment under CALCITE-2405.
    nonReservedKeywords: [
         "A"
+        "ABSENT"
         "ABSOLUTE"
         "ACTION"
         "ADA"
@@ -175,13 +184,16 @@
         "DOY"
         "DYNAMIC_FUNCTION"
         "DYNAMIC_FUNCTION_CODE"
+        "ENCODING"
         "EPOCH"
+        "ERROR"
         "EXCEPTION"
         "EXCLUDE"
         "EXCLUDING"
         "FINAL"
         "FIRST"
         "FOLLOWING"
+        "FORMAT"
         "FORTRAN"
         "FOUND"
         "FRAC_SECOND"
@@ -229,6 +241,7 @@
         "MILLISECOND"
         "MILLENNIUM"
         "MINVALUE"
+        "MONTHS"
         "MORE_"
         "MUMPS"
         "NAME"
@@ -257,6 +270,7 @@
         "PARAMETER_SPECIFIC_SCHEMA"
         "PARTIAL"
         "PASCAL"
+        "PASSING"
         "PASSTHROUGH"
         "PAST"
         "PATH"
@@ -583,7 +597,7 @@
 #       "HAVING",
         "HOLD",
         "HOUR",
-#       # "HOURS", # not a keyword in Calcite
+        "HOURS",
         "IDENTITY",
 #       # "IF", # not a keyword in Calcite
         "IMMEDIATE",
@@ -865,6 +879,16 @@
         "REMOVE"
       ]
 
+  # List of non-reserved keywords to add;
+  # items in this list become non-reserved
+  nonReservedKeywordsToAdd: [
+  ]
+
+  # List of non-reserved keywords to remove;
+  # items in this list become reserved
+  nonReservedKeywordsToRemove: [
+  ]
+
   # List of additional join types. Each is a method with no arguments.
   # Example: LeftSemiJoin()
   joinTypes: [
@@ -876,6 +900,7 @@
   builtinFunctionCallMethods: [
   ]
 
+  includePosixOperators: false,
   includeCompoundIdentifier: false,
   includeBraces: true,
   includeAdditionalDeclarations: false,
diff --git a/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicSchema.java b/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicSchema.java
index 01c38c2..7dffe9a 100644
--- a/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicSchema.java
+++ b/exec/java-exec/src/main/java/org/apache/calcite/jdbc/DynamicSchema.java
@@ -25,7 +25,7 @@
 
 /**
  * Unlike SimpleCalciteSchema, DynamicSchema could have an empty or partial schemaMap, but it could maintain a map of
- * name->SchemaFactory, and only register schema when the corresponsdent name is requested.
+ * name->SchemaFactory, and only register schema when the correspondent name is requested.
  */
 public class DynamicSchema extends SimpleCalciteSchema {
 
@@ -44,14 +44,8 @@
     return ret;
   }
 
-  @Override
-  public SchemaPlus plus() {
-    return super.plus();
-  }
-
   public static SchemaPlus createRootSchema(StoragePluginRegistry storages, SchemaConfig schemaConfig) {
     DynamicRootSchema rootSchema = new DynamicRootSchema(storages, schemaConfig);
     return rootSchema.plus();
   }
-
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DrillRelBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DrillRelBuilder.java
index 22597dc..454c458 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DrillRelBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/DrillRelBuilder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.drill.exec.planner;
 
+import java.util.Collections;
+
 import org.apache.calcite.plan.Context;
 import org.apache.calcite.plan.Contexts;
 import org.apache.calcite.plan.RelOptCluster;
@@ -47,7 +49,8 @@
     RelNode relNode = build();
 
     // creates filter with false in the predicate
-    final RelNode filter = filterFactory.createFilter(relNode, cluster.getRexBuilder().makeLiteral(false));
+    final RelNode filter = filterFactory.createFilter(relNode,
+        cluster.getRexBuilder().makeLiteral(false), Collections.emptySet());
     push(filter);
 
     return this;
@@ -56,11 +59,7 @@
   /** Creates a {@link RelBuilderFactory}, a partially-created DrillRelBuilder.
    * Just add a {@link RelOptCluster} and a {@link RelOptSchema} */
   public static RelBuilderFactory proto(final Context context) {
-    return new RelBuilderFactory() {
-      public RelBuilder create(RelOptCluster cluster, RelOptSchema schema) {
-        return new DrillRelBuilder(context, cluster, schema);
-      }
-    };
+    return (cluster, schema) -> new DrillRelBuilder(context, cluster, schema);
   }
 
   /** Creates a {@link RelBuilderFactory} that uses a given set of factories. */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRelFactories.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRelFactories.java
index bca08a0..bb0b798 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRelFactories.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillRelFactories.java
@@ -111,7 +111,7 @@
    */
   private static class DrillFilterFactoryImpl implements RelFactories.FilterFactory {
     @Override
-    public RelNode createFilter(RelNode child, RexNode condition) {
+    public RelNode createFilter(RelNode child, RexNode condition, Set<CorrelationId> variablesSet) {
       return DrillFilterRel.create(child, condition);
     }
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/JoinPrel.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/JoinPrel.java
index d1c3731..3e1d4c6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/JoinPrel.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/JoinPrel.java
@@ -21,8 +21,6 @@
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexChecker;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.validate.SqlValidatorUtil;
@@ -142,7 +140,7 @@
       List<String> rightFields,
       List<Integer> leftKeys,
       List<Integer> rightKeys) {
-    List<RexNode> conjuncts = getConjuncts();
+    List<RexNode> conjuncts = RelOptUtil.conjunctions(this.getCondition());
 
     short i = 0;
     for (Pair<Integer, Integer> pair : Pair.zip(leftKeys, rightKeys)) {
@@ -160,20 +158,6 @@
     }
   }
 
-  // todo: remove this method after CALCITE-3174 is resolved
-  private List<RexNode> getConjuncts() {
-    List<RexNode> conjunctions = RelOptUtil.conjunctions(getCondition());
-    RexBuilder rexBuilder = getCluster().getRexBuilder();
-    for (int i = 0; i < conjunctions.size(); i++) {
-      RexNode node = conjunctions.get(i);
-      if (node instanceof RexCall) {
-        conjunctions.set(i,
-            RelOptUtil.collapseExpandedIsNotDistinctFromExpr((RexCall) node, rexBuilder));
-      }
-    }
-    return conjunctions;
-  }
-
   public boolean isSemiJoin() {
     return isSemiJoin;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/Checker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/Checker.java
index 51320b3..12fbe38 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/Checker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/Checker.java
@@ -51,7 +51,7 @@
   }
 
   private Checker(int size) {
-    range = new FixedRange(size);
+    range = SqlOperandCountRanges.of(size);
   }
 
   private Checker(int min, int max) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillAvgVarianceConvertlet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillAvgVarianceConvertlet.java
deleted file mode 100644
index f25ceee..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillAvgVarianceConvertlet.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.sql;
-
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlLiteral;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlNumericLiteral;
-import org.apache.calcite.sql.SqlOperatorBinding;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.type.SqlReturnTypeInference;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql2rel.SqlRexContext;
-import org.apache.calcite.sql2rel.SqlRexConvertlet;
-import org.apache.calcite.util.Util;
-
-/*
- * This class is adapted from calcite's AvgVarianceConvertlet. The difference being
- * we add a cast to double before we perform the division. The reason we have a separate implementation
- * from calcite's code is because while injecting a similar cast, calcite will look
- * at the output type of the aggregate function which will be 'ANY' at that point and will
- * inject a cast to 'ANY' which does not solve the problem.
- */
-public class DrillAvgVarianceConvertlet implements SqlRexConvertlet {
-
-  private final SqlKind subtype;
-  private static final DrillSqlOperator CastHighOp = new DrillSqlOperator("CastHigh", 1, false,
-      new SqlReturnTypeInference() {
-        @Override
-        public RelDataType inferReturnType(SqlOperatorBinding opBinding) {
-          return TypeInferenceUtils.createCalciteTypeWithNullability(
-              opBinding.getTypeFactory(),
-              SqlTypeName.ANY,
-              opBinding.getOperandType(0).isNullable());
-        }
-      }, false);
-
-  public DrillAvgVarianceConvertlet(SqlKind subtype) {
-    this.subtype = subtype;
-  }
-
-  public RexNode convertCall(SqlRexContext cx, SqlCall call) {
-    assert call.operandCount() == 1;
-    final SqlNode arg = call.operand(0);
-    final SqlNode expr;
-    switch (subtype) {
-      case AVG:
-        expr = expandAvg(arg);
-        break;
-      case STDDEV_POP:
-        expr = expandVariance(arg, true, true);
-        break;
-      case STDDEV_SAMP:
-        expr = expandVariance(arg, false, true);
-        break;
-      case VAR_POP:
-        expr = expandVariance(arg, true, false);
-        break;
-      case VAR_SAMP:
-        expr = expandVariance(arg, false, false);
-        break;
-      default:
-        throw Util.unexpected(subtype);
-    }
-    return cx.convertExpression(expr);
-  }
-
-  private SqlNode expandAvg(
-      final SqlNode arg) {
-    final SqlParserPos pos = SqlParserPos.ZERO;
-    final SqlNode sum =
-        DrillCalciteSqlAggFunctionWrapper.SUM.createCall(pos, arg);
-    final SqlNode count =
-        SqlStdOperatorTable.COUNT.createCall(pos, arg);
-    final SqlNode sumAsDouble =
-        CastHighOp.createCall(pos, sum);
-    return SqlStdOperatorTable.DIVIDE.createCall(
-        pos, sumAsDouble, count);
-  }
-
-  private SqlNode expandVariance(
-      final SqlNode arg,
-      boolean biased,
-      boolean sqrt) {
-    /* stddev_pop(x) ==>
-     *   power(
-     *    (sum(x * x) - sum(x) * sum(x) / count(x))
-     *    / count(x),
-     *    .5)
-
-     * stddev_samp(x) ==>
-     *  power(
-     *    (sum(x * x) - sum(x) * sum(x) / count(x))
-     *    / (count(x) - 1),
-     *    .5)
-
-     * var_pop(x) ==>
-     *    (sum(x * x) - sum(x) * sum(x) / count(x))
-     *    / count(x)
-
-     * var_samp(x) ==>
-     *    (sum(x * x) - sum(x) * sum(x) / count(x))
-     *    / (count(x) - 1)
-     */
-    final SqlParserPos pos = SqlParserPos.ZERO;
-
-    // cast the argument to double
-    final SqlNode castHighArg = CastHighOp.createCall(pos, arg);
-    final SqlNode argSquared =
-        SqlStdOperatorTable.MULTIPLY.createCall(pos, castHighArg, castHighArg);
-    final SqlNode sumArgSquared =
-        DrillCalciteSqlAggFunctionWrapper.SUM.createCall(pos, argSquared);
-    final SqlNode sum =
-        DrillCalciteSqlAggFunctionWrapper.SUM.createCall(pos, castHighArg);
-    final SqlNode sumSquared =
-        SqlStdOperatorTable.MULTIPLY.createCall(pos, sum, sum);
-    final SqlNode count =
-        SqlStdOperatorTable.COUNT.createCall(pos, castHighArg);
-    final SqlNode avgSumSquared =
-        SqlStdOperatorTable.DIVIDE.createCall(
-            pos, sumSquared, count);
-    final SqlNode diff =
-        SqlStdOperatorTable.MINUS.createCall(
-            pos, sumArgSquared, avgSumSquared);
-    final SqlNode denominator;
-    if (biased) {
-      denominator = count;
-    } else {
-      final SqlNumericLiteral one =
-          SqlLiteral.createExactNumeric("1", pos);
-      denominator =
-          SqlStdOperatorTable.MINUS.createCall(
-              pos, count, one);
-    }
-    final SqlNode diffAsDouble =
-        CastHighOp.createCall(pos, diff);
-    final SqlNode div =
-        SqlStdOperatorTable.DIVIDE.createCall(
-            pos, diffAsDouble, denominator);
-    SqlNode result = div;
-    if (sqrt) {
-      final SqlNumericLiteral half =
-          SqlLiteral.createExactNumeric("0.5", pos);
-      result =
-          SqlStdOperatorTable.POWER.createCall(pos, div, half);
-    }
-    return result;
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
index a9ff49f..f7c525b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillConvertletTable.java
@@ -19,8 +19,10 @@
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
 import java.util.stream.Collectors;
 
 import org.apache.calcite.avatica.util.TimeUnit;
@@ -30,106 +32,70 @@
 import org.apache.calcite.sql.SqlBasicCall;
 import org.apache.calcite.sql.SqlCall;
 import org.apache.calcite.sql.SqlIntervalQualifier;
-import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlLiteral;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlNumericLiteral;
 import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.SqlOperatorBinding;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlReturnTypeInference;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql2rel.SqlRexConvertlet;
 import org.apache.calcite.sql2rel.SqlRexConvertletTable;
 import org.apache.calcite.sql2rel.StandardConvertletTable;
 import org.apache.drill.exec.planner.sql.parser.DrillCalciteWrapperUtility;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
 
+/**
+ * Convertlet table which allows to plug-in custom rex conversion of calls to
+ * Calcite's standard operators.
+ */
 public class DrillConvertletTable implements SqlRexConvertletTable {
 
-  public static HashMap<SqlOperator, SqlRexConvertlet> map = new HashMap<>();
+  public static final SqlRexConvertletTable INSTANCE = new DrillConvertletTable();
+  private static final DrillSqlOperator CastHighOp = new DrillSqlOperator("CastHigh", 1, false,
+      new SqlReturnTypeInference() {
+        @Override
+        public RelDataType inferReturnType(SqlOperatorBinding opBinding) {
+          return TypeInferenceUtils.createCalciteTypeWithNullability(
+              opBinding.getTypeFactory(),
+              SqlTypeName.ANY,
+              opBinding.getOperandType(0).isNullable());
+        }
+      }, false);
 
-  public static SqlRexConvertletTable INSTANCE = new DrillConvertletTable();
+  private final Map<SqlOperator, SqlRexConvertlet> operatorToConvertletMap;
 
-  private static final SqlRexConvertlet SQRT_CONVERTLET = (cx, call) -> {
-    RexNode operand = cx.convertExpression(call.operand(0));
-    return cx.getRexBuilder().makeCall(SqlStdOperatorTable.SQRT, operand);
-  };
-
-  // Rewrites COALESCE function into CASE WHEN IS NOT NULL operand1 THEN operand1...
-  private static final SqlRexConvertlet COALESCE_CONVERTLET = (cx, call) -> {
-    int operandsCount = call.operandCount();
-    if (operandsCount == 1) {
-      return cx.convertExpression(call.operand(0));
-    } else {
-      List<RexNode> caseOperands = new ArrayList<>();
-      for (int i = 0; i < operandsCount - 1; i++) {
-        RexNode caseOperand = cx.convertExpression(call.operand(i));
-        caseOperands.add(cx.getRexBuilder().makeCall(
-            SqlStdOperatorTable.IS_NOT_NULL, caseOperand));
-        caseOperands.add(caseOperand);
-      }
-      caseOperands.add(cx.convertExpression(call.operand(operandsCount - 1)));
-      return cx.getRexBuilder().makeCall(SqlStdOperatorTable.CASE, caseOperands);
-    }
-  };
-
-  // Custom convertlet to avoid rewriting TIMESTAMP_DIFF by Calcite,
-  // since Drill does not support Reinterpret function and does not handle
-  // all Calcite interval representations correctly.
-  private static final SqlRexConvertlet TIMESTAMP_DIFF_CONVERTLET = (cx, call) -> {
-    SqlLiteral unitLiteral = call.operand(0);
-    SqlIntervalQualifier qualifier =
-        new SqlIntervalQualifier(unitLiteral.symbolValue(TimeUnit.class), null, SqlParserPos.ZERO);
-
-    List<RexNode> operands = Arrays.asList(
-        cx.convertExpression(qualifier),
-        cx.convertExpression(call.operand(1)),
-        cx.convertExpression(call.operand(2)));
-
-    RelDataTypeFactory typeFactory = cx.getTypeFactory();
-
-    RelDataType returnType = typeFactory.createTypeWithNullability(
-        typeFactory.createSqlType(SqlTypeName.BIGINT),
-        cx.getValidator().getValidatedNodeType(call.operand(1)).isNullable()
-            || cx.getValidator().getValidatedNodeType(call.operand(2)).isNullable());
-
-    return cx.getRexBuilder().makeCall(returnType,
-        SqlStdOperatorTable.TIMESTAMP_DIFF, operands);
-  };
-
-  private static final SqlRexConvertlet ROW_CONVERTLET = (cx, call) -> {
-    List<RexNode> args = call.getOperandList().stream()
-        .map(cx::convertExpression)
-        .collect(Collectors.toList());
-    return cx.getRexBuilder().makeCall(SqlStdOperatorTable.ROW, args);
-  };
-
-  static {
-    // Use custom convertlet for EXTRACT function
-    map.put(SqlStdOperatorTable.EXTRACT, DrillExtractConvertlet.INSTANCE);
-    // SQRT needs it's own convertlet because calcite overrides it to POWER(x, 0.5)
-    // which is not suitable for Infinity value case
-    map.put(SqlStdOperatorTable.SQRT, SQRT_CONVERTLET);
-    map.put(SqlStdOperatorTable.COALESCE, COALESCE_CONVERTLET);
-    map.put(SqlStdOperatorTable.TIMESTAMP_DIFF, TIMESTAMP_DIFF_CONVERTLET);
-    map.put(SqlStdOperatorTable.AVG, new DrillAvgVarianceConvertlet(SqlKind.AVG));
-    map.put(SqlStdOperatorTable.STDDEV_POP, new DrillAvgVarianceConvertlet(SqlKind.STDDEV_POP));
-    map.put(SqlStdOperatorTable.STDDEV_SAMP, new DrillAvgVarianceConvertlet(SqlKind.STDDEV_SAMP));
-    map.put(SqlStdOperatorTable.STDDEV, new DrillAvgVarianceConvertlet(SqlKind.STDDEV_SAMP));
-    map.put(SqlStdOperatorTable.VAR_POP, new DrillAvgVarianceConvertlet(SqlKind.VAR_POP));
-    map.put(SqlStdOperatorTable.VAR_SAMP, new DrillAvgVarianceConvertlet(SqlKind.VAR_SAMP));
-    map.put(SqlStdOperatorTable.VARIANCE, new DrillAvgVarianceConvertlet(SqlKind.VAR_SAMP));
-    map.put(SqlStdOperatorTable.ROW, ROW_CONVERTLET);
+  private DrillConvertletTable() {
+    operatorToConvertletMap = ImmutableMap.<SqlOperator, SqlRexConvertlet>builder()
+        .put(SqlStdOperatorTable.EXTRACT, extractConvertlet())
+        .put(SqlStdOperatorTable.SQRT, sqrtConvertlet())
+        .put(SqlStdOperatorTable.COALESCE, coalesceConvertlet())
+        .put(SqlStdOperatorTable.TIMESTAMP_DIFF, timestampDiffConvertlet())
+        .put(SqlStdOperatorTable.ROW, rowConvertlet())
+        .put(SqlStdOperatorTable.AVG, avgVarianceConvertlet(DrillConvertletTable::expandAvg))
+        .put(SqlStdOperatorTable.STDDEV_POP, avgVarianceConvertlet(arg -> expandVariance(arg, true, true)))
+        .put(SqlStdOperatorTable.STDDEV_SAMP, avgVarianceConvertlet(arg -> expandVariance(arg, false, true)))
+        .put(SqlStdOperatorTable.STDDEV, avgVarianceConvertlet(arg -> expandVariance(arg, false, true)))
+        .put(SqlStdOperatorTable.VAR_POP, avgVarianceConvertlet(arg -> expandVariance(arg, true, false)))
+        .put(SqlStdOperatorTable.VAR_SAMP, avgVarianceConvertlet(arg -> expandVariance(arg, false, false)))
+        .put(SqlStdOperatorTable.VARIANCE, avgVarianceConvertlet(arg -> expandVariance(arg, false, false)))
+        .build();
   }
 
-  /*
+  /**
    * Lookup the hash table to see if we have a custom convertlet for a given
    * operator, if we don't use StandardConvertletTable.
    */
   @Override
   public SqlRexConvertlet get(SqlCall call) {
+
     SqlRexConvertlet convertlet;
-    if(call.getOperator() instanceof DrillCalciteSqlWrapper) {
+    if (call.getOperator() instanceof DrillCalciteSqlWrapper) {
       final SqlOperator wrapper = call.getOperator();
       final SqlOperator wrapped = DrillCalciteWrapperUtility.extractSqlOperatorFromWrapper(call.getOperator());
-      if ((convertlet = map.get(wrapped)) != null) {
+      if ((convertlet = operatorToConvertletMap.get(wrapped)) != null) {
         return convertlet;
       }
 
@@ -138,14 +104,195 @@
       ((SqlBasicCall) call).setOperator(wrapper);
       return sqlRexConvertlet;
     }
-
-    if ((convertlet = map.get(call.getOperator())) != null) {
+    if ((convertlet = operatorToConvertletMap.get(call.getOperator())) != null) {
       return convertlet;
     }
-
     return StandardConvertletTable.INSTANCE.get(call);
   }
 
-  private DrillConvertletTable() {
+  /**
+   * Custom convertlet to handle extract functions. Calcite rewrites
+   * extract functions as divide and modulo functions, based on the
+   * data type. We cannot do that in Drill since we don't know the data type
+   * till we start scanning. So we don't rewrite extract and treat it as
+   * a regular function.
+   */
+  private static SqlRexConvertlet extractConvertlet() {
+    return (cx, call) -> {
+      List<SqlNode> operands = call.getOperandList();
+      List<RexNode> exprs = new LinkedList<>();
+      RelDataTypeFactory typeFactory = cx.getTypeFactory();
+
+      for (SqlNode node : operands) {
+        exprs.add(cx.convertExpression(node));
+      }
+
+      RelDataType returnType;
+      if (call.getOperator() == SqlStdOperatorTable.EXTRACT) {
+        // Legacy code:
+        // The return type is wrong!
+        // Legacy code choose SqlTypeName.BIGINT simply to avoid conflicting against Calcite's inference mechanism
+        // (which chose BIGINT in validation phase already)
+        returnType = typeFactory.createSqlType(SqlTypeName.BIGINT);
+      } else {
+        String timeUnit = ((SqlIntervalQualifier) operands.get(0)).timeUnitRange.toString();
+        returnType = typeFactory.createSqlType(TypeInferenceUtils.getSqlTypeNameForTimeUnit(timeUnit));
+      }
+      // Determine nullability using 2nd argument.
+      returnType = typeFactory.createTypeWithNullability(returnType, exprs.get(1).getType().isNullable());
+      return cx.getRexBuilder().makeCall(returnType, call.getOperator(), exprs);
+    };
+  }
+
+  /**
+   * SQRT needs it's own convertlet because calcite overrides it to POWER(x, 0.5)
+   * which is not suitable for Infinity value case
+   */
+  private static SqlRexConvertlet sqrtConvertlet() {
+    return (cx, call) -> {
+      RexNode operand = cx.convertExpression(call.operand(0));
+      return cx.getRexBuilder().makeCall(SqlStdOperatorTable.SQRT, operand);
+    };
+  }
+
+  /**
+   * Rewrites COALESCE function into CASE WHEN IS NOT NULL operand1 THEN operand1...
+   * all Calcite interval representations correctly.
+   * Custom convertlet to avoid rewriting TIMESTAMP_DIFF by Calcite,
+   * since Drill does not support Reinterpret function and does not handle
+   */
+  private static SqlRexConvertlet coalesceConvertlet() {
+    return (cx, call) -> {
+      int operandsCount = call.operandCount();
+      if (operandsCount == 1) {
+        return cx.convertExpression(call.operand(0));
+      } else {
+        List<RexNode> caseOperands = new ArrayList<>();
+        for (int i = 0; i < operandsCount - 1; i++) {
+          RexNode caseOperand = cx.convertExpression(call.operand(i));
+          caseOperands.add(cx.getRexBuilder().makeCall(
+              SqlStdOperatorTable.IS_NOT_NULL, caseOperand));
+          caseOperands.add(caseOperand);
+        }
+        caseOperands.add(cx.convertExpression(call.operand(operandsCount - 1)));
+        return cx.getRexBuilder().makeCall(SqlStdOperatorTable.CASE, caseOperands);
+      }
+    };
+  }
+
+  private static SqlRexConvertlet timestampDiffConvertlet() {
+    return (cx, call) -> {
+      SqlLiteral unitLiteral = call.operand(0);
+      SqlIntervalQualifier qualifier =
+          new SqlIntervalQualifier(unitLiteral.symbolValue(TimeUnit.class), null, SqlParserPos.ZERO);
+
+      List<RexNode> operands = Arrays.asList(
+          cx.convertExpression(qualifier),
+          cx.convertExpression(call.operand(1)),
+          cx.convertExpression(call.operand(2)));
+
+      RelDataTypeFactory typeFactory = cx.getTypeFactory();
+
+      RelDataType returnType = typeFactory.createTypeWithNullability(
+          typeFactory.createSqlType(SqlTypeName.BIGINT),
+          cx.getValidator().getValidatedNodeType(call.operand(1)).isNullable()
+              || cx.getValidator().getValidatedNodeType(call.operand(2)).isNullable());
+
+      return cx.getRexBuilder().makeCall(returnType,
+          SqlStdOperatorTable.TIMESTAMP_DIFF, operands);
+    };
+  }
+
+  private static SqlRexConvertlet rowConvertlet() {
+    return (cx, call) -> {
+      List<RexNode> args = call.getOperandList().stream()
+          .map(cx::convertExpression)
+          .collect(Collectors.toList());
+      return cx.getRexBuilder().makeCall(SqlStdOperatorTable.ROW, args);
+    };
+  }
+
+  private static SqlRexConvertlet avgVarianceConvertlet(Function<SqlNode, SqlNode> expandFunc) {
+    return (cx, call) -> cx.convertExpression(expandFunc.apply(call.operand(0)));
+  }
+
+  private static SqlNode expandAvg(final SqlNode arg) {
+    SqlNode sum = DrillCalciteSqlAggFunctionWrapper.SUM.createCall(SqlParserPos.ZERO, arg);
+    SqlNode count = SqlStdOperatorTable.COUNT.createCall(SqlParserPos.ZERO, arg);
+    SqlNode sumAsDouble = CastHighOp.createCall(SqlParserPos.ZERO, sum);
+    return SqlStdOperatorTable.DIVIDE.createCall(SqlParserPos.ZERO, sumAsDouble, count);
+  }
+
+  /**
+   * This code is adapted from calcite's AvgVarianceConvertlet. The difference being
+   * we add a cast to double before we perform the division. The reason we have a separate implementation
+   * from calcite's code is because while injecting a similar cast, calcite will look
+   * at the output type of the aggregate function which will be 'ANY' at that point and will
+   * inject a cast to 'ANY' which does not solve the problem.
+   */
+  private static SqlNode expandVariance(SqlNode arg, boolean biased, boolean sqrt) {
+    /* stddev_pop(x) ==>
+     *   power(
+     *    (sum(x * x) - sum(x) * sum(x) / count(x))
+     *    / count(x),
+     *    .5)
+
+     * stddev_samp(x) ==>
+     *  power(
+     *    (sum(x * x) - sum(x) * sum(x) / count(x))
+     *    / (count(x) - 1),
+     *    .5)
+
+     * var_pop(x) ==>
+     *    (sum(x * x) - sum(x) * sum(x) / count(x))
+     *    / count(x)
+
+     * var_samp(x) ==>
+     *    (sum(x * x) - sum(x) * sum(x) / count(x))
+     *    / (count(x) - 1)
+     */
+    final SqlParserPos pos = SqlParserPos.ZERO;
+
+    // cast the argument to double
+    final SqlNode castHighArg = CastHighOp.createCall(pos, arg);
+    final SqlNode argSquared =
+        SqlStdOperatorTable.MULTIPLY.createCall(pos, castHighArg, castHighArg);
+    final SqlNode sumArgSquared =
+        DrillCalciteSqlAggFunctionWrapper.SUM.createCall(pos, argSquared);
+    final SqlNode sum =
+        DrillCalciteSqlAggFunctionWrapper.SUM.createCall(pos, castHighArg);
+    final SqlNode sumSquared =
+        SqlStdOperatorTable.MULTIPLY.createCall(pos, sum, sum);
+    final SqlNode count =
+        SqlStdOperatorTable.COUNT.createCall(pos, castHighArg);
+    final SqlNode avgSumSquared =
+        SqlStdOperatorTable.DIVIDE.createCall(
+            pos, sumSquared, count);
+    final SqlNode diff =
+        SqlStdOperatorTable.MINUS.createCall(
+            pos, sumArgSquared, avgSumSquared);
+    final SqlNode denominator;
+    if (biased) {
+      denominator = count;
+    } else {
+      final SqlNumericLiteral one =
+          SqlLiteral.createExactNumeric("1", pos);
+      denominator =
+          SqlStdOperatorTable.MINUS.createCall(
+              pos, count, one);
+    }
+    final SqlNode diffAsDouble =
+        CastHighOp.createCall(pos, diff);
+    final SqlNode div =
+        SqlStdOperatorTable.DIVIDE.createCall(
+            pos, diffAsDouble, denominator);
+    SqlNode result = div;
+    if (sqrt) {
+      final SqlNumericLiteral half =
+          SqlLiteral.createExactNumeric("0.5", pos);
+      result =
+          SqlStdOperatorTable.POWER.createCall(pos, div, half);
+    }
+    return result;
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillExtractConvertlet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillExtractConvertlet.java
deleted file mode 100644
index 6285756..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillExtractConvertlet.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.sql;
-
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlIntervalQualifier;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql2rel.SqlRexContext;
-import org.apache.calcite.sql2rel.SqlRexConvertlet;
-
-public class DrillExtractConvertlet implements SqlRexConvertlet {
-
-  public final static DrillExtractConvertlet INSTANCE = new DrillExtractConvertlet();
-
-  private DrillExtractConvertlet() {
-  }
-
-  /*
-   * Custom convertlet to handle extract functions. Optiq rewrites
-   * extract functions as divide and modulo functions, based on the
-   * data type. We cannot do that in Drill since we don't know the data type
-   * till we start scanning. So we don't rewrite extract and treat it as
-   * a regular function.
-   */
-  @Override
-  public RexNode convertCall(SqlRexContext cx, SqlCall call) {
-    final RexBuilder rexBuilder = cx.getRexBuilder();
-    final List<SqlNode> operands = call.getOperandList();
-    final List<RexNode> exprs = new LinkedList<>();
-
-    String timeUnit = ((SqlIntervalQualifier) operands.get(0)).timeUnitRange.toString();
-
-    RelDataTypeFactory typeFactory = cx.getTypeFactory();
-
-    //RelDataType nullableReturnType =
-
-    for (SqlNode node: operands) {
-       exprs.add(cx.convertExpression(node));
-    }
-
-    final RelDataType returnType;
-    if(call.getOperator() == SqlStdOperatorTable.EXTRACT) {
-      // Legacy code:
-      // The return type is wrong!
-      // Legacy code choose SqlTypeName.BIGINT simply to avoid conflicting against Calcite's inference mechanism
-      // (, which chose BIGINT in validation phase already)
-      // Determine NULL-able using 2nd argument's Null-able.
-      returnType = typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.BIGINT), exprs.get(1).getType().isNullable());
-    } else {
-      // Determine NULL-able using 2nd argument's Null-able.
-      returnType = typeFactory.createTypeWithNullability(
-          typeFactory.createSqlType(
-              TypeInferenceUtils.getSqlTypeNameForTimeUnit(timeUnit)),
-          exprs.get(1).getType().isNullable());
-    }
-
-    return rexBuilder.makeCall(returnType, call.getOperator(), exprs);
-  }
-}
-
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
index a957e84..d402257 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
@@ -187,7 +187,7 @@
         if (calciteOperator == SqlStdOperatorTable.UNARY_MINUS || calciteOperator == SqlStdOperatorTable.UNARY_PLUS) {
           drillOpName = calciteOperator.getName();
         } else {
-          drillOpName = FunctionCallFactory.replaceOpWithFuncName(calciteOperator.getName());
+          drillOpName = FunctionCallFactory.convertToDrillFunctionName(calciteOperator.getName());
         }
 
         final List<DrillFuncHolder> drillFuncHolders = getFunctionListWithInference(drillOpName);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
index 8102d92..a698669 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.planner.sql;
 
+import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.SqlOperandTypeChecker;
 import org.apache.calcite.util.Optionality;
 import org.apache.calcite.sql.SqlAggFunction;
@@ -89,7 +90,7 @@
       return new DrillSqlAggOperator(
           name,
           functions,
-          isVarArg ? VarArgOperandTypeChecker.INSTANCE : Checker.getChecker(argCountMin, argCountMax),
+          isVarArg ? OperandTypes.VARIADIC : Checker.getChecker(argCountMin, argCountMax),
           TypeInferenceUtils.getDrillSqlReturnTypeInference(
               name,
               functions));
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperatorWithoutInference.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperatorWithoutInference.java
index 7e09b9b..304bef8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperatorWithoutInference.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperatorWithoutInference.java
@@ -20,6 +20,7 @@
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.validate.SqlValidator;
 import org.apache.calcite.sql.validate.SqlValidatorScope;
@@ -30,7 +31,7 @@
   public DrillSqlAggOperatorWithoutInference(String name, int argCount, boolean isVarArg) {
     super(name,
         new ArrayList<>(),
-        isVarArg ? VarArgOperandTypeChecker.INSTANCE : Checker.getChecker(argCount, argCount),
+        isVarArg ? OperandTypes.VARIADIC : Checker.getChecker(argCount, argCount),
         DynamicReturnType.INSTANCE);
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
index bc12bf1..e75c68c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
@@ -21,6 +21,7 @@
 import java.util.Collection;
 import java.util.List;
 
+import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.SqlOperandTypeChecker;
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 import org.apache.calcite.rel.type.RelDataType;
@@ -190,7 +191,7 @@
       return new DrillSqlOperator(
           name,
           functions,
-          isVarArg ? VarArgOperandTypeChecker.INSTANCE : Checker.getChecker(argCountMin, argCountMax),
+          isVarArg ? OperandTypes.VARIADIC : Checker.getChecker(argCountMin, argCountMax),
           isDeterministic,
           TypeInferenceUtils.getDrillSqlReturnTypeInference(
               name,
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperatorWithoutInference.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperatorWithoutInference.java
index ebacffa..3a82e04 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperatorWithoutInference.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperatorWithoutInference.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.planner.sql;
 
+import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
@@ -36,7 +37,7 @@
   public DrillSqlOperatorWithoutInference(String name, int argCount, TypeProtos.MajorType returnType, boolean isDeterminisitic, boolean isNiladic, boolean isVarArg) {
     super(name,
         new ArrayList<>(),
-        isVarArg ? VarArgOperandTypeChecker.INSTANCE : Checker.getChecker(argCount, argCount),
+        isVarArg ? OperandTypes.VARIADIC : Checker.getChecker(argCount, argCount),
         isDeterminisitic,
         DynamicReturnType.INSTANCE,
         isNiladic);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
index c26e39f..e71ff89 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
@@ -52,6 +52,7 @@
 import org.apache.drill.exec.planner.sql.parser.DrillSqlResetOption;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlSetOption;
 import org.apache.drill.exec.planner.sql.parser.SqlSchema;
+import org.apache.drill.exec.planner.sql.conversion.SqlConverter;
 import org.apache.drill.exec.testing.ControlsInjector;
 import org.apache.drill.exec.testing.ControlsInjectorFactory;
 import org.apache.drill.exec.util.Pointer;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/FixedRange.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/FixedRange.java
deleted file mode 100644
index edc4409..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/FixedRange.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.sql;
-
-import org.apache.calcite.sql.SqlOperandCountRange;
-
-class FixedRange implements SqlOperandCountRange{
-
-  private final int size;
-
-  public FixedRange(int size) {
-    super();
-    this.size = size;
-  }
-
-  @Override
-  public boolean isValidCount(int count) {
-    return count == size;
-  }
-
-  @Override
-  public int getMin() {
-    return size;
-  }
-
-  @Override
-  public int getMax() {
-    return size;
-  }
-
-}
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
index fb7a683..c442a89 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
@@ -41,29 +41,18 @@
    *
    * @param defaultSchema Reference to the default schema in complete schema tree.
    * @param schemaPath Schema path to search.
-   * @return SchemaPlus object.
+   * @return SchemaPlus object from default or root schema, or null if not found.
    */
   public static SchemaPlus findSchema(final SchemaPlus defaultSchema, final List<String> schemaPath) {
     if (schemaPath.size() == 0) {
       return defaultSchema;
     }
-
-    SchemaPlus schema;
-    if ((schema = searchSchemaTree(defaultSchema, schemaPath)) != null) {
-      return schema;
+    SchemaPlus schema = searchSchemaTree(defaultSchema, schemaPath);
+    SchemaPlus rootSchema;
+    if (schema == null && (rootSchema = rootSchema(defaultSchema)) != defaultSchema) {
+      schema = searchSchemaTree(rootSchema, schemaPath);
     }
-
-    SchemaPlus rootSchema = defaultSchema;
-    while(rootSchema.getParentSchema() != null) {
-      rootSchema = rootSchema.getParentSchema();
-    }
-
-    if (rootSchema != defaultSchema &&
-        (schema = searchSchemaTree(rootSchema, schemaPath)) != null) {
-      return schema;
-    }
-
-    return null;
+    return schema;
   }
 
   /**
@@ -327,4 +316,16 @@
     }
   }
 
+  /**
+   * Finds root of given schema.
+   * @param schema current schema
+   * @return root schema
+   */
+  public static SchemaPlus rootSchema(SchemaPlus schema) {
+    while (!isRootSchema(schema)) {
+      schema = schema.getParentSchema();
+    }
+    return schema;
+  }
+
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
deleted file mode 100644
index 3f398a4..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
+++ /dev/null
@@ -1,810 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.sql;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.calcite.jdbc.CalciteSchema;
-import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
-import org.apache.calcite.util.Static;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
-import org.apache.drill.exec.planner.sql.parser.impl.DrillSqlParseException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.metastore.MetadataProviderManager;
-import org.apache.drill.exec.planner.types.DrillRelDataTypeSystem;
-import org.apache.drill.metastore.metadata.TableMetadataProvider;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.logical.LogicalProject;
-import org.apache.drill.exec.util.Utilities;
-import org.apache.drill.shaded.guava.com.google.common.cache.CacheBuilder;
-import org.apache.drill.shaded.guava.com.google.common.cache.CacheLoader;
-import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
-import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
-import org.apache.calcite.adapter.java.JavaTypeFactory;
-import org.apache.calcite.config.CalciteConnectionConfigImpl;
-import org.apache.calcite.config.CalciteConnectionProperty;
-import org.apache.calcite.jdbc.DynamicSchema;
-import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
-import org.apache.calcite.plan.ConventionTraitDef;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptCostFactory;
-import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.plan.volcano.VolcanoPlanner;
-import org.apache.calcite.prepare.CalciteCatalogReader;
-import org.apache.calcite.prepare.Prepare;
-import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.calcite.rel.RelRoot;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.runtime.Hook;
-import org.apache.calcite.schema.SchemaPlus;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlIdentifier;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlOperatorTable;
-import org.apache.calcite.sql.parser.SqlParseException;
-import org.apache.calcite.sql.parser.SqlParser;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
-import org.apache.calcite.sql.validate.SqlConformance;
-import org.apache.calcite.sql.validate.SqlValidatorCatalogReader;
-import org.apache.calcite.sql.validate.SqlValidatorImpl;
-import org.apache.calcite.sql.validate.SqlValidatorScope;
-import org.apache.calcite.sql.validate.SqlValidatorUtil;
-import org.apache.calcite.sql2rel.SqlToRelConverter;
-import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.Util;
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
-import org.apache.drill.exec.ops.QueryContext;
-import org.apache.drill.exec.ops.UdfUtilities;
-import org.apache.drill.exec.planner.cost.DrillCostBase;
-import org.apache.drill.exec.planner.logical.DrillConstExecutor;
-import org.apache.drill.exec.planner.logical.DrillRelFactories;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.planner.physical.DrillDistributionTraitDef;
-import org.apache.drill.exec.planner.physical.PlannerSettings;
-import org.apache.drill.exec.rpc.user.UserSession;
-import org.apache.drill.exec.store.dfs.FileSelection;
-
-import org.apache.drill.shaded.guava.com.google.common.base.Joiner;
-import org.apache.drill.exec.store.ColumnExplorer;
-import org.apache.drill.exec.util.DecimalUtility;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class responsible for managing parsing, validation and toRel conversion for sql statements.
- */
-public class SqlConverter {
-  private static final Logger logger = LoggerFactory.getLogger(SqlConverter.class);
-
-  private final JavaTypeFactory typeFactory;
-  private final SqlParser.Config parserConfig;
-
-  // Allow the default config to be modified using immutable configs
-  private SqlToRelConverter.Config sqlToRelConverterConfig;
-  private final DrillCalciteCatalogReader catalog;
-  private final PlannerSettings settings;
-  private final SchemaPlus rootSchema;
-  private final SchemaPlus defaultSchema;
-  private final SqlOperatorTable opTab;
-  private final RelOptCostFactory costFactory;
-  private final DrillValidator validator;
-  private final boolean isInnerQuery;
-  private final boolean isExpandedView;
-  private final UdfUtilities util;
-  private final FunctionImplementationRegistry functions;
-  private final String temporarySchema;
-  private final UserSession session;
-  private final DrillConfig drillConfig;
-  private RelOptCluster cluster;
-
-  private VolcanoPlanner planner;
-  private boolean useRootSchema = false;
-
-  static {
-    /*
-     * Sets value to false to avoid simplifying project expressions
-     * during creating new projects since it may cause changing data mode
-     * which causes to assertion errors during type validation
-     */
-    Hook.REL_BUILDER_SIMPLIFY.add(Hook.propertyJ(false));
-  }
-
-  public SqlConverter(QueryContext context) {
-    this.settings = context.getPlannerSettings();
-    this.util = context;
-    this.functions = context.getFunctionRegistry();
-    this.parserConfig = new DrillParserConfig(settings);
-    this.sqlToRelConverterConfig = new SqlToRelConverterConfig();
-    this.isInnerQuery = false;
-    this.isExpandedView = false;
-    this.typeFactory = new JavaTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM);
-    this.defaultSchema = context.getNewDefaultSchema();
-    this.rootSchema = rootSchema(defaultSchema);
-    this.temporarySchema = context.getConfig().getString(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE);
-    this.session = context.getSession();
-    this.drillConfig = context.getConfig();
-    this.catalog = new DrillCalciteCatalogReader(
-        rootSchema,
-        parserConfig.caseSensitive(),
-        DynamicSchema.from(defaultSchema).path(null),
-        typeFactory,
-        drillConfig,
-        session);
-    this.opTab = new ChainedSqlOperatorTable(Arrays.asList(context.getDrillOperatorTable(), catalog));
-    this.costFactory = (settings.useDefaultCosting()) ? null : new DrillCostBase.DrillCostFactory();
-    this.validator = new DrillValidator(opTab, catalog, typeFactory, parserConfig.conformance());
-    validator.setIdentifierExpansion(true);
-    cluster = null;
-  }
-
-  private SqlConverter(SqlConverter parent, SchemaPlus defaultSchema, SchemaPlus rootSchema,
-      DrillCalciteCatalogReader catalog) {
-    this.parserConfig = parent.parserConfig;
-    this.sqlToRelConverterConfig = parent.sqlToRelConverterConfig;
-    this.defaultSchema = defaultSchema;
-    this.functions = parent.functions;
-    this.util = parent.util;
-    this.isInnerQuery = true;
-    this.isExpandedView = true;
-    this.typeFactory = parent.typeFactory;
-    this.costFactory = parent.costFactory;
-    this.settings = parent.settings;
-    this.rootSchema = rootSchema;
-    this.catalog = catalog;
-    this.opTab = parent.opTab;
-    this.planner = parent.planner;
-    this.validator = new DrillValidator(opTab, catalog, typeFactory, parserConfig.conformance());
-    this.temporarySchema = parent.temporarySchema;
-    this.session = parent.session;
-    this.drillConfig = parent.drillConfig;
-    validator.setIdentifierExpansion(true);
-    this.cluster = parent.cluster;
-  }
-
-
-  public SqlNode parse(String sql) {
-    try {
-      SqlParser parser = SqlParser.create(sql, parserConfig);
-      return parser.parseStmt();
-    } catch (SqlParseException e) {
-      DrillSqlParseException dex = new DrillSqlParseException(e);
-      UserException.Builder builder = UserException
-          .parseError(dex)
-          .addContext(formatSQLParsingError(sql, dex));
-      if (isInnerQuery) {
-        builder.message("Failure parsing a view your query is dependent upon.");
-      }
-      throw builder.build(logger);
-    }
-  }
-
-  public SqlNode validate(final SqlNode parsedNode) {
-    try {
-      return validator.validate(parsedNode);
-    } catch (RuntimeException e) {
-      UserException.Builder builder = UserException
-          .validationError(e);
-      if (isInnerQuery) {
-        builder.message("Failure validating a view your query is dependent upon.");
-      }
-      throw builder.build(logger);
-    }
-  }
-
-  public RelDataType getOutputType(SqlNode validatedNode) {
-    return validator.getValidatedNodeType(validatedNode);
-  }
-
-  public JavaTypeFactory getTypeFactory() {
-    return typeFactory;
-  }
-
-  public SqlOperatorTable getOpTab() {
-    return opTab;
-  }
-
-  public RelOptCostFactory getCostFactory() {
-    return costFactory;
-  }
-
-  public SchemaPlus getRootSchema() {
-    return rootSchema;
-  }
-
-  public SchemaPlus getDefaultSchema() {
-    return defaultSchema;
-  }
-
-  /** Disallow temporary tables presence in sql statement (ex: in view definitions) */
-  public void disallowTemporaryTables() {
-    catalog.disallowTemporaryTables();
-  }
-
-  /**
-   * Is root schema path should be used as default schema path.
-   *
-   * @param useRoot flag
-   */
-  public void useRootSchemaAsDefault(boolean useRoot) {
-    useRootSchema = useRoot;
-  }
-
-  private class DrillValidator extends SqlValidatorImpl {
-
-    DrillValidator(SqlOperatorTable opTab, SqlValidatorCatalogReader catalogReader,
-        RelDataTypeFactory typeFactory, SqlConformance conformance) {
-      super(opTab, catalogReader, typeFactory, conformance);
-    }
-
-    @Override
-    protected void validateFrom(
-        SqlNode node,
-        RelDataType targetRowType,
-        SqlValidatorScope scope) {
-      if (node.getKind() == SqlKind.AS) {
-        SqlCall sqlCall = (SqlCall) node;
-        SqlNode sqlNode = sqlCall.operand(0);
-        switch (sqlNode.getKind()) {
-          case IDENTIFIER:
-            SqlIdentifier tempNode = (SqlIdentifier) sqlNode;
-            DrillCalciteCatalogReader catalogReader = (DrillCalciteCatalogReader) getCatalogReader();
-
-            changeNamesIfTableIsTemporary(tempNode);
-
-            // Check the schema and throw a valid SchemaNotFound exception instead of TableNotFound exception.
-            catalogReader.isValidSchema(tempNode.names);
-            break;
-          case UNNEST:
-            if (sqlCall.operandCount() < 3) {
-              throw Static.RESOURCE.validationError("Alias table and column name are required for UNNEST").ex();
-            }
-        }
-      }
-      super.validateFrom(node, targetRowType, scope);
-    }
-
-    @Override
-    public String deriveAlias(
-        SqlNode node,
-        int ordinal) {
-      if (node instanceof SqlIdentifier) {
-        SqlIdentifier tempNode = ((SqlIdentifier) node);
-        changeNamesIfTableIsTemporary(tempNode);
-      }
-      return SqlValidatorUtil.getAlias(node, ordinal);
-    }
-
-    /**
-     * Checks that specified expression is not implicit column and
-     * adds it to a select list, ensuring that its alias does not
-     * clash with any existing expressions on the list.
-     * <p>
-     * This method may be used when {@link RelDataType#isDynamicStruct}
-     * method returns false. Each column from table row type except
-     * the implicit is added into specified list, aliases and fieldList.
-     * In the opposite case when {@link RelDataType#isDynamicStruct}
-     * returns true, only dynamic star is added into specified
-     * list, aliases and fieldList.
-     */
-    @Override
-    protected void addToSelectList(
-        List<SqlNode> list,
-        Set<String> aliases,
-        List<Map.Entry<String, RelDataType>> fieldList,
-        SqlNode exp,
-        SqlValidatorScope scope,
-        final boolean includeSystemVars) {
-      if (!ColumnExplorer.initImplicitFileColumns(session.getOptions())
-          .containsKey(SqlValidatorUtil.getAlias(exp, -1))) {
-        super.addToSelectList(list, aliases, fieldList, exp, scope, includeSystemVars);
-      }
-    }
-
-    @Override
-    protected void inferUnknownTypes(
-        RelDataType inferredType,
-        SqlValidatorScope scope,
-        SqlNode node) {
-      // calls validateQuery() for SqlSelect to be sure that temporary table name will be changed
-      // for the case when it is used in sub-select
-      if (node.getKind() == SqlKind.SELECT) {
-        validateQuery(node, scope, inferredType);
-      }
-      super.inferUnknownTypes(inferredType, scope, node);
-    }
-
-    private void changeNamesIfTableIsTemporary(SqlIdentifier tempNode) {
-      List<String> temporaryTableNames = ((SqlConverter.DrillCalciteCatalogReader) getCatalogReader()).getTemporaryNames(tempNode.names);
-      if (temporaryTableNames != null) {
-        SqlParserPos pos = tempNode.getComponentParserPosition(0);
-        List<SqlParserPos> poses = Lists.newArrayList();
-        for (int i = 0; i < temporaryTableNames.size(); i++) {
-          poses.add(i, pos);
-        }
-        tempNode.setNames(temporaryTableNames, poses);
-      }
-    }
-  }
-
-  public RelRoot toRel(final SqlNode validatedNode) {
-    if (planner == null) {
-      planner = new VolcanoPlanner(costFactory, settings);
-      planner.setExecutor(new DrillConstExecutor(functions, util, settings));
-      planner.clearRelTraitDefs();
-      planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
-      planner.addRelTraitDef(DrillDistributionTraitDef.INSTANCE);
-      planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
-    }
-
-    if (cluster == null) {
-      initCluster();
-    }
-    final SqlToRelConverter sqlToRelConverter =
-        new SqlToRelConverter(new Expander(), validator, catalog, cluster, DrillConvertletTable.INSTANCE,
-            sqlToRelConverterConfig);
-
-    RelRoot rel = sqlToRelConverter.convertQuery(validatedNode, false, !isInnerQuery || isExpandedView);
-
-    // If extra expressions used in ORDER BY were added to the project list,
-    // add another project to remove them.
-    if ((!isInnerQuery || isExpandedView) && rel.rel.getRowType().getFieldCount() - rel.fields.size() > 0) {
-      RexBuilder builder = rel.rel.getCluster().getRexBuilder();
-
-      RelNode relNode = rel.rel;
-      List<RexNode> expressions = rel.fields.stream()
-          .map(f -> builder.makeInputRef(relNode, f.left))
-          .collect(Collectors.toList());
-
-      RelNode project = LogicalProject.create(rel.rel, expressions, rel.validatedRowType);
-      rel = RelRoot.of(project, rel.validatedRowType, rel.kind);
-    }
-    return rel.withRel(sqlToRelConverter.flattenTypes(rel.rel, true));
-  }
-
-  private class Expander implements RelOptTable.ViewExpander {
-
-    @Override
-    public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) {
-      final DrillCalciteCatalogReader catalogReader = new DrillCalciteCatalogReader(
-          rootSchema,
-          parserConfig.caseSensitive(),
-          schemaPath,
-          typeFactory,
-          drillConfig,
-          session);
-      final SqlConverter parser = new SqlConverter(SqlConverter.this, defaultSchema, rootSchema, catalogReader);
-      return expandView(queryString, parser);
-    }
-
-    @Override
-    public RelRoot expandView(RelDataType rowType, String queryString, SchemaPlus rootSchema, List<String> schemaPath) {
-      final DrillCalciteCatalogReader catalogReader = new DrillCalciteCatalogReader(
-          rootSchema,
-          parserConfig.caseSensitive(),
-          schemaPath,
-          typeFactory,
-          drillConfig,
-          session);
-      SchemaPlus schema = rootSchema;
-      for (String s : schemaPath) {
-        SchemaPlus newSchema = schema.getSubSchema(s);
-
-        if (newSchema == null) {
-          throw UserException
-              .validationError()
-              .message(
-              "Failure while attempting to expand view. Requested schema %s not available in schema %s.", s,
-                  schema.getName())
-              .addContext("View Context", Joiner.on(", ").join(schemaPath))
-              .addContext("View SQL", queryString)
-              .build(logger);
-        }
-
-        schema = newSchema;
-      }
-      SqlConverter parser = new SqlConverter(SqlConverter.this, schema, rootSchema, catalogReader);
-      return expandView(queryString, parser);
-    }
-
-    private RelRoot expandView(String queryString, SqlConverter converter) {
-      converter.disallowTemporaryTables();
-      final SqlNode parsedNode = converter.parse(queryString);
-      final SqlNode validatedNode = converter.validate(parsedNode);
-      return converter.toRel(validatedNode);
-    }
-
-  }
-
-  private class SqlToRelConverterConfig implements SqlToRelConverter.Config {
-
-    final int inSubqueryThreshold = (int)settings.getInSubqueryThreshold();
-
-    @Override
-    public boolean isConvertTableAccess() {
-      return false;
-    }
-
-    @Override
-    public boolean isDecorrelationEnabled() {
-      return SqlToRelConverterConfig.DEFAULT.isDecorrelationEnabled();
-    }
-
-    @Override
-    public boolean isTrimUnusedFields() {
-      return false;
-    }
-
-    @Override
-    public boolean isCreateValuesRel() {
-      return SqlToRelConverterConfig.DEFAULT.isCreateValuesRel();
-    }
-
-    @Override
-    public boolean isExplain() {
-      return SqlToRelConverterConfig.DEFAULT.isExplain();
-    }
-
-    @Override
-    public boolean isExpand() {
-      return false;
-    }
-
-    @Override
-    public int getInSubQueryThreshold() {
-      return inSubqueryThreshold;
-    }
-
-    @Override
-    public RelBuilderFactory getRelBuilderFactory() {
-      return DrillRelFactories.LOGICAL_BUILDER;
-    }
-  }
-
-  /**
-   * Formats sql exception with context name included and with
-   * graphical representation for the {@link DrillSqlParseException}
-   *
-   * @param sql     the SQL sent to the server
-   * @param ex      exception object
-   * @return The sql with a ^ character under the error
-   */
-  static String formatSQLParsingError(String sql, DrillSqlParseException ex) {
-    final String sqlErrorMessageHeader = "SQL Query: ";
-    final SqlParserPos pos = ex.getPos();
-
-    if (pos != null) {
-      int issueLineNumber = pos.getLineNum() - 1;  // recalculates to base 0
-      int issueColumnNumber = pos.getColumnNum() - 1;  // recalculates to base 0
-      int messageHeaderLength = sqlErrorMessageHeader.length();
-
-      // If the issue happens on the first line, header width should be calculated alongside with the sql query
-      int shiftLength = (issueLineNumber == 0) ? issueColumnNumber + messageHeaderLength : issueColumnNumber;
-
-      StringBuilder sb = new StringBuilder();
-      String[] lines = sql.split(DrillParserUtil.EOL);
-
-      for (int i = 0; i < lines.length; i++) {
-        sb.append(lines[i]);
-
-        if (i == issueLineNumber) {
-          sb
-              .append(DrillParserUtil.EOL)
-              .append(StringUtils.repeat(' ', shiftLength))
-              .append("^");
-        }
-        if (i < lines.length - 1) {
-          sb.append(DrillParserUtil.EOL);
-        }
-      }
-      sql = sb.toString();
-    }
-    return sqlErrorMessageHeader + sql;
-  }
-
-  private static SchemaPlus rootSchema(SchemaPlus schema) {
-    while (true) {
-      if (schema.getParentSchema() == null) {
-        return schema;
-      }
-      schema = schema.getParentSchema();
-    }
-  }
-
-  private void initCluster() {
-    cluster = RelOptCluster.create(planner, new DrillRexBuilder(typeFactory));
-    JaninoRelMetadataProvider relMetadataProvider = Utilities.registerJaninoRelMetadataProvider();
-
-    cluster.setMetadataProvider(relMetadataProvider);
-  }
-
-  private static class DrillRexBuilder extends RexBuilder {
-    private DrillRexBuilder(RelDataTypeFactory typeFactory) {
-      super(typeFactory);
-    }
-
-    /**
-     * Since Drill has different mechanism and rules for implicit casting,
-     * ensureType() is overridden to avoid conflicting cast functions being added to the expressions.
-     */
-    @Override
-    public RexNode ensureType(
-        RelDataType type,
-        RexNode node,
-        boolean matchNullability) {
-      return node;
-    }
-
-    /**
-     * Creates a call to the CAST operator, expanding if possible, and optionally
-     * also preserving nullability.
-     *
-     * <p>Tries to expand the cast, and therefore the result may be something
-     * other than a {@link org.apache.calcite.rex.RexCall} to the CAST operator, such as a
-     * {@link RexLiteral} if {@code matchNullability} is false.
-     *
-     * @param type             Type to cast to
-     * @param exp              Expression being cast
-     * @param matchNullability Whether to ensure the result has the same
-     *                         nullability as {@code type}
-     * @return Call to CAST operator
-     */
-    @Override
-    public RexNode makeCast(RelDataType type, RexNode exp, boolean matchNullability) {
-      if (matchNullability) {
-        return makeAbstractCast(type, exp);
-      }
-      // for the case when BigDecimal literal has a scale or precision
-      // that differs from the value from specified RelDataType, cast cannot be removed
-      // TODO: remove this code when CALCITE-1468 is fixed
-      if (type.getSqlTypeName() == SqlTypeName.DECIMAL && exp instanceof RexLiteral) {
-        if (type.getPrecision() < 1) {
-          throw UserException.validationError()
-              .message("Expected precision greater than 0, but was %s.", type.getPrecision())
-              .build(logger);
-        }
-        if (type.getScale() > type.getPrecision()) {
-          throw UserException.validationError()
-              .message("Expected scale less than or equal to precision, " +
-                  "but was precision %s and scale %s.", type.getPrecision(), type.getScale())
-              .build(logger);
-        }
-        RexLiteral literal = (RexLiteral) exp;
-        Comparable value = literal.getValueAs(Comparable.class);
-        if (value instanceof BigDecimal) {
-          BigDecimal bigDecimal = (BigDecimal) value;
-          DecimalUtility.checkValueOverflow(bigDecimal, type.getPrecision(), type.getScale());
-          if (bigDecimal.scale() != type.getScale() || bigDecimal.precision() != type.getPrecision()) {
-            return makeAbstractCast(type, exp);
-          }
-        }
-      }
-      return super.makeCast(type, exp, false);
-    }
-  }
-
-  /**
-   * Key for storing / obtaining {@link TableMetadataProvider} instance from {@link LoadingCache}.
-   */
-  private static class DrillTableKey {
-    private final SchemaPath key;
-    private final DrillTable drillTable;
-
-    public DrillTableKey(SchemaPath key, DrillTable drillTable) {
-      this.key = key;
-      this.drillTable = drillTable;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (this == obj) {
-        return true;
-      }
-      if (obj == null || getClass() != obj.getClass()) {
-        return false;
-      }
-
-      DrillTableKey that = (DrillTableKey) obj;
-
-      return Objects.equals(key, that.key);
-    }
-
-    @Override
-    public int hashCode() {
-      return key != null ? key.hashCode() : 0;
-    }
-  }
-
-  /**
-   * Extension of {@link CalciteCatalogReader} to add ability to check for temporary tables first
-   * if schema is not indicated near table name during query parsing
-   * or indicated workspace is default temporary workspace.
-   */
-  private class DrillCalciteCatalogReader extends CalciteCatalogReader {
-
-    private final DrillConfig drillConfig;
-    private final UserSession session;
-    private boolean allowTemporaryTables;
-
-    private final LoadingCache<DrillTableKey, MetadataProviderManager> tableCache;
-
-    DrillCalciteCatalogReader(SchemaPlus rootSchema,
-                              boolean caseSensitive,
-                              List<String> defaultSchema,
-                              JavaTypeFactory typeFactory,
-                              DrillConfig drillConfig,
-                              UserSession session) {
-      super(DynamicSchema.from(rootSchema), defaultSchema,
-          typeFactory, getConnectionConfig(caseSensitive));
-      this.drillConfig = drillConfig;
-      this.session = session;
-      this.allowTemporaryTables = true;
-      this.tableCache =
-          CacheBuilder.newBuilder()
-            .build(new CacheLoader<DrillTableKey, MetadataProviderManager>() {
-              @Override
-              public MetadataProviderManager load(DrillTableKey key) {
-                return key.drillTable.getMetadataProviderManager();
-              }
-            });
-    }
-
-    /**
-     * Disallow temporary tables presence in sql statement (ex: in view definitions)
-     */
-    void disallowTemporaryTables() {
-      this.allowTemporaryTables = false;
-    }
-
-    private List<String> getTemporaryNames(List<String> names) {
-      if (mightBeTemporaryTable(names, session.getDefaultSchemaPath(), drillConfig)) {
-        String tableName = FileSelection.removeLeadingSlash(names.get(names.size() - 1));
-        String temporaryTableName = session.resolveTemporaryTableName(tableName);
-        if (temporaryTableName != null) {
-          List<String> temporaryNames = new ArrayList<>(SchemaUtilites.getSchemaPathAsList(temporarySchema));
-          temporaryNames.add(temporaryTableName);
-          return temporaryNames;
-        }
-      }
-      return null;
-    }
-
-    /**
-     * If schema is not indicated (only one element in the list) or schema is default temporary workspace,
-     * we need to check among session temporary tables in default temporary workspace first.
-     * If temporary table is found and temporary tables usage is allowed, its table instance will be returned,
-     * otherwise search will be conducted in original workspace.
-     *
-     * @param names list of schema and table names, table name is always the last element
-     * @return table instance, null otherwise
-     * @throws UserException if temporary tables usage is disallowed
-     */
-    @Override
-    public Prepare.PreparingTable getTable(List<String> names) {
-      String originalTableName = session.getOriginalTableNameFromTemporaryTable(names.get(names.size() - 1));
-      if (originalTableName != null) {
-        if (!allowTemporaryTables) {
-          throw UserException
-              .validationError()
-              .message("Temporary tables usage is disallowed. Used temporary table name: [%s].", originalTableName)
-              .build(logger);
-        }
-      }
-
-      Prepare.PreparingTable table = super.getTable(names);
-      DrillTable drillTable;
-      // add session options if found table is Drill table
-      if (table != null && (drillTable = table.unwrap(DrillTable.class)) != null) {
-        drillTable.setOptions(session.getOptions());
-
-        drillTable.setTableMetadataProviderManager(tableCache.getUnchecked(
-            new DrillTableKey(SchemaPath.getCompoundPath(names.toArray(new String[0])), drillTable)));
-      }
-      return table;
-    }
-
-    @Override
-    public List<List<String>> getSchemaPaths() {
-      if (useRootSchema) {
-        return ImmutableList.of(ImmutableList.of());
-      }
-      return super.getSchemaPaths();
-    }
-
-    /**
-     * Checks if the schema provided is a valid schema:
-     * <li>schema is not indicated (only one element in the names list)<li/>
-     *
-     * @param names list of schema and table names, table name is always the last element
-     * @throws UserException if the schema is not valid.
-     */
-    private void isValidSchema(List<String> names) throws UserException {
-      List<String> schemaPath = Util.skipLast(names);
-
-      for (List<String> currentSchema : getSchemaPaths()) {
-        List<String> fullSchemaPath = new ArrayList<>(currentSchema);
-        fullSchemaPath.addAll(schemaPath);
-        CalciteSchema schema = SqlValidatorUtil.getSchema(getRootSchema(),
-            fullSchemaPath, nameMatcher());
-
-        if (schema != null) {
-         return;
-        }
-      }
-      SchemaUtilites.throwSchemaNotFoundException(defaultSchema, schemaPath);
-    }
-
-    /**
-     * We should check if passed table is temporary or not if:
-     * <li>schema is not indicated (only one element in the names list)<li/>
-     * <li>current schema or indicated schema is default temporary workspace<li/>
-     *
-     * Examples (where dfs.tmp is default temporary workspace):
-     * <li>select * from t<li/>
-     * <li>select * from dfs.tmp.t<li/>
-     * <li>use dfs; select * from tmp.t<li/>
-     *
-     * @param names             list of schema and table names, table name is always the last element
-     * @param defaultSchemaPath current schema path set using USE command
-     * @param drillConfig       drill config
-     * @return true if check for temporary table should be done, false otherwise
-     */
-    private boolean mightBeTemporaryTable(List<String> names, String defaultSchemaPath, DrillConfig drillConfig) {
-      if (names.size() == 1) {
-        return true;
-      }
-
-      String schemaPath = SchemaUtilites.getSchemaPath(names.subList(0, names.size() - 1));
-      return SchemaUtilites.isTemporaryWorkspace(schemaPath, drillConfig) ||
-          SchemaUtilites.isTemporaryWorkspace(
-              SchemaUtilites.SCHEMA_PATH_JOINER.join(defaultSchemaPath, schemaPath), drillConfig);
-    }
-  }
-
-  /**
-   * Creates {@link CalciteConnectionConfigImpl} instance with specified caseSensitive property.
-   *
-   * @param caseSensitive is case sensitive.
-   * @return {@link CalciteConnectionConfigImpl} instance
-   */
-  private static CalciteConnectionConfigImpl getConnectionConfig(boolean caseSensitive) {
-    Properties properties = new Properties();
-    properties.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
-        String.valueOf(caseSensitive));
-    return new CalciteConnectionConfigImpl(properties);
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/TypeInferenceUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/TypeInferenceUtils.java
index 6d16afc..8ffab70 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/TypeInferenceUtils.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/TypeInferenceUtils.java
@@ -1018,7 +1018,7 @@
       args.add(new MajorTypeInLogicalExpression(builder.build()));
     }
 
-    final String drillFuncName = FunctionCallFactory.replaceOpWithFuncName(opBinding.getOperator().getName());
+    final String drillFuncName = FunctionCallFactory.convertToDrillFunctionName(opBinding.getOperator().getName());
     return new FunctionCall(
         drillFuncName,
         args,
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/VarArgOperandTypeChecker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/VarArgOperandTypeChecker.java
deleted file mode 100644
index 2b5925d..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/VarArgOperandTypeChecker.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.sql;
-
-import org.apache.calcite.sql.SqlCallBinding;
-import org.apache.calcite.sql.SqlOperandCountRange;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.type.SqlOperandCountRanges;
-import org.apache.calcite.sql.type.SqlOperandTypeChecker;
-
-/**
- * Argument Checker for variable number of arguments
- */
-public class VarArgOperandTypeChecker implements SqlOperandTypeChecker {
-
-  public static final VarArgOperandTypeChecker INSTANCE = new VarArgOperandTypeChecker();
-
-  private static final SqlOperandCountRange ANY_RANGE = SqlOperandCountRanges.any();
-
-  @Override
-  public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFailure) {
-    return true;
-  }
-
-  @Override
-  public Consistency getConsistency() {
-    return Consistency.NONE;
-  }
-
-  @Override
-  public SqlOperandCountRange getOperandCountRange() {
-    return ANY_RANGE;
-  }
-
-  @Override
-  public String getAllowedSignatures(SqlOperator op, String opName) {
-    return opName + "(...)";
-  }
-
-  @Override
-  public boolean isOptional(int arg) {
-    return false;
-  }
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillCalciteCatalogReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillCalciteCatalogReader.java
new file mode 100644
index 0000000..4c8b254
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillCalciteCatalogReader.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.function.BooleanSupplier;
+import java.util.function.Supplier;
+
+import org.apache.calcite.adapter.java.JavaTypeFactory;
+import org.apache.calcite.config.CalciteConnectionConfigImpl;
+import org.apache.calcite.config.CalciteConnectionProperty;
+import org.apache.calcite.jdbc.CalciteSchema;
+import org.apache.calcite.jdbc.DynamicSchema;
+import org.apache.calcite.prepare.CalciteCatalogReader;
+import org.apache.calcite.prepare.Prepare;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.validate.SqlValidatorUtil;
+import org.apache.calcite.util.Util;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.metastore.MetadataProviderManager;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.store.dfs.FileSelection;
+import org.apache.drill.shaded.guava.com.google.common.cache.CacheBuilder;
+import org.apache.drill.shaded.guava.com.google.common.cache.CacheLoader;
+import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Extension of {@link CalciteCatalogReader} to add ability to check for temporary tables first
+ * if schema is not indicated near table name during query parsing
+ * or indicated workspace is default temporary workspace.
+ */
+class DrillCalciteCatalogReader extends CalciteCatalogReader {
+
+  private static final Logger logger = LoggerFactory.getLogger(DrillCalciteCatalogReader.class);
+
+  private final DrillConfig drillConfig;
+  private final UserSession session;
+  private final String temporarySchema;
+  private boolean allowTemporaryTables;
+  private final BooleanSupplier useRootSchema;
+  private final Supplier<SchemaPlus> defaultSchemaSupplier;
+
+  private final LoadingCache<DrillTableKey, MetadataProviderManager> tableCache;
+
+  DrillCalciteCatalogReader(SchemaPlus rootSchema,
+                            boolean caseSensitive,
+                            List<String> defaultSchema,
+                            JavaTypeFactory typeFactory,
+                            DrillConfig drillConfig,
+                            UserSession session,
+                            String temporarySchema,
+                            BooleanSupplier useRootSchema,
+                            Supplier<SchemaPlus> defaultSchemaSupplier) {
+    super(DynamicSchema.from(rootSchema), defaultSchema,
+        typeFactory, getConnectionConfig(caseSensitive));
+    this.drillConfig = drillConfig;
+    this.session = session;
+    this.allowTemporaryTables = true;
+    this.tableCache =
+        CacheBuilder.newBuilder()
+          .build(new CacheLoader<DrillTableKey, MetadataProviderManager>() {
+            @Override
+            public MetadataProviderManager load(DrillTableKey key) {
+              return key.getMetadataProviderManager();
+            }
+          });
+    this.temporarySchema = temporarySchema;
+    this.useRootSchema = useRootSchema;
+    this.defaultSchemaSupplier = defaultSchemaSupplier;
+  }
+
+  /**
+   * Disallow temporary tables presence in sql statement (ex: in view definitions)
+   */
+  void disallowTemporaryTables() {
+    this.allowTemporaryTables = false;
+  }
+
+  List<String> getTemporaryNames(List<String> names) {
+    if (needsTemporaryTableCheck(names, session.getDefaultSchemaPath(), drillConfig)) {
+      String tableName = FileSelection.removeLeadingSlash(names.get(names.size() - 1));
+      String temporaryTableName = session.resolveTemporaryTableName(tableName);
+      if (temporaryTableName != null) {
+        List<String> temporaryNames = new ArrayList<>(SchemaUtilites.getSchemaPathAsList(temporarySchema));
+        temporaryNames.add(temporaryTableName);
+        return temporaryNames;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * If schema is not indicated (only one element in the list) or schema is default temporary workspace,
+   * we need to check among session temporary tables in default temporary workspace first.
+   * If temporary table is found and temporary tables usage is allowed, its table instance will be returned,
+   * otherwise search will be conducted in original workspace.
+   *
+   * @param names list of schema and table names, table name is always the last element
+   * @return table instance, null otherwise
+   * @throws UserException if temporary tables usage is disallowed
+   */
+  @Override
+  public Prepare.PreparingTable getTable(List<String> names) {
+    checkTemporaryTable(names);
+    Prepare.PreparingTable table = super.getTable(names);
+    DrillTable drillTable;
+    if (table != null && (drillTable = table.unwrap(DrillTable.class)) != null) {
+      drillTable.setOptions(session.getOptions());
+      drillTable.setTableMetadataProviderManager(tableCache.getUnchecked(DrillTableKey.of(names, drillTable)));
+    }
+    return table;
+  }
+
+  private void checkTemporaryTable(List<String> names) {
+    if (allowTemporaryTables) {
+      return;
+    }
+    String originalTableName = session.getOriginalTableNameFromTemporaryTable(names.get(names.size() - 1));
+    if (originalTableName != null) {
+      throw UserException
+          .validationError()
+          .message("Temporary tables usage is disallowed. Used temporary table name: [%s].", originalTableName)
+          .build(logger);
+    }
+  }
+
+  @Override
+  public List<List<String>> getSchemaPaths() {
+    if (useRootSchema.getAsBoolean()) {
+      return ImmutableList.of(ImmutableList.of());
+    }
+    return super.getSchemaPaths();
+  }
+
+  /**
+   * Checks if the schema provided is a valid schema:
+   * <li>schema is not indicated (only one element in the names list)<li/>
+   *
+   * @param names list of schema and table names, table name is always the last element
+   * @throws UserException if the schema is not valid.
+   */
+  void isValidSchema(List<String> names) throws UserException {
+    List<String> schemaPath = Util.skipLast(names);
+
+    for (List<String> currentSchema : getSchemaPaths()) {
+      List<String> fullSchemaPath = new ArrayList<>(currentSchema);
+      fullSchemaPath.addAll(schemaPath);
+      CalciteSchema schema = SqlValidatorUtil.getSchema(getRootSchema(),
+          fullSchemaPath, nameMatcher());
+
+      if (schema != null) {
+       return;
+      }
+    }
+    SchemaUtilites.throwSchemaNotFoundException(defaultSchemaSupplier.get(), schemaPath);
+  }
+
+  /**
+   * We should check if passed table is temporary or not if:
+   * <li>schema is not indicated (only one element in the names list)<li/>
+   * <li>current schema or indicated schema is default temporary workspace<li/>
+   *
+   * Examples (where dfs.tmp is default temporary workspace):
+   * <li>select * from t<li/>
+   * <li>select * from dfs.tmp.t<li/>
+   * <li>use dfs; select * from tmp.t<li/>
+   *
+   * @param names             list of schema and table names, table name is always the last element
+   * @param defaultSchemaPath current schema path set using USE command
+   * @param drillConfig       drill config
+   * @return true if check for temporary table should be done, false otherwise
+   */
+  private boolean needsTemporaryTableCheck(List<String> names, String defaultSchemaPath, DrillConfig drillConfig) {
+    if (names.size() == 1) {
+      return true;
+    }
+    String schemaPath = SchemaUtilites.getSchemaPath(names.subList(0, names.size() - 1));
+    return SchemaUtilites.isTemporaryWorkspace(schemaPath, drillConfig) ||
+        SchemaUtilites.isTemporaryWorkspace(
+            SchemaUtilites.SCHEMA_PATH_JOINER.join(defaultSchemaPath, schemaPath), drillConfig);
+  }
+
+  /**
+   * Creates {@link CalciteConnectionConfigImpl} instance with specified caseSensitive property.
+   *
+   * @param caseSensitive is case sensitive.
+   * @return {@link CalciteConnectionConfigImpl} instance
+   */
+  private static CalciteConnectionConfigImpl getConnectionConfig(boolean caseSensitive) {
+    Properties properties = new Properties();
+    properties.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
+        String.valueOf(caseSensitive));
+    return new CalciteConnectionConfigImpl(properties);
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillRexBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillRexBuilder.java
new file mode 100644
index 0000000..299859c
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillRexBuilder.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.math.BigDecimal;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.util.DecimalUtility;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class DrillRexBuilder extends RexBuilder {
+
+  private static final Logger logger = LoggerFactory.getLogger(DrillRexBuilder.class);
+
+  DrillRexBuilder(RelDataTypeFactory typeFactory) {
+    super(typeFactory);
+  }
+
+  /**
+   * Since Drill has different mechanism and rules for implicit casting,
+   * ensureType() is overridden to avoid conflicting cast functions being added to the expressions.
+   */
+  @Override
+  public RexNode ensureType(
+      RelDataType type,
+      RexNode node,
+      boolean matchNullability) {
+    return node;
+  }
+
+  /**
+   * Creates a call to the CAST operator, expanding if possible, and optionally
+   * also preserving nullability.
+   *
+   * <p>Tries to expand the cast, and therefore the result may be something
+   * other than a {@link org.apache.calcite.rex.RexCall} to the CAST operator, such as a
+   * {@link RexLiteral} if {@code matchNullability} is false.
+   *
+   * @param type             Type to cast to
+   * @param exp              Expression being cast
+   * @param matchNullability Whether to ensure the result has the same
+   *                         nullability as {@code type}
+   * @return Call to CAST operator
+   */
+  @Override
+  public RexNode makeCast(RelDataType type, RexNode exp, boolean matchNullability) {
+    if (matchNullability) {
+      return makeAbstractCast(type, exp);
+    }
+    // for the case when BigDecimal literal has a scale or precision
+    // that differs from the value from specified RelDataType, cast cannot be removed
+    // TODO: remove this code when CALCITE-1468 is fixed
+    if (type.getSqlTypeName() == SqlTypeName.DECIMAL && exp instanceof RexLiteral) {
+      int precision = type.getPrecision();
+      int scale = type.getScale();
+      validatePrecisionAndScale(precision, scale);
+      Comparable<?> value = ((RexLiteral) exp).getValueAs(Comparable.class);
+      if (value instanceof BigDecimal) {
+        BigDecimal bigDecimal = (BigDecimal) value;
+        DecimalUtility.checkValueOverflow(bigDecimal, precision, scale);
+        if (bigDecimal.precision() != precision || bigDecimal.scale() != scale) {
+          return makeAbstractCast(type, exp);
+        }
+      }
+    }
+    return super.makeCast(type, exp, false);
+  }
+
+  private void validatePrecisionAndScale(int precision, int scale) {
+    if (precision < 1) {
+      throw UserException.validationError()
+          .message("Expected precision greater than 0, but was %s.", precision)
+          .build(logger);
+    }
+    if (scale > precision) {
+      throw UserException.validationError()
+          .message("Expected scale less than or equal to precision, " +
+              "but was precision %s and scale %s.", precision, scale)
+          .build(logger);
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillTableKey.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillTableKey.java
new file mode 100644
index 0000000..c6d4aa8
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillTableKey.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.metastore.MetadataProviderManager;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
+
+/**
+ * Key for storing / obtaining {@link MetadataProviderManager} instance from {@link LoadingCache}.
+ */
+final class DrillTableKey {
+  private final SchemaPath key;
+  private final DrillTable drillTable;
+
+  private DrillTableKey(SchemaPath key, DrillTable drillTable) {
+    this.key = key;
+    this.drillTable = drillTable;
+  }
+
+  static DrillTableKey of(List<String> names, DrillTable drillTable) {
+    return new DrillTableKey(SchemaPath.getCompoundPath(names.toArray(new String[0])), drillTable);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    return this == obj || (obj instanceof DrillTableKey
+        && Objects.equals(key, ((DrillTableKey) obj).key));
+  }
+
+  @Override
+  public int hashCode() {
+    return key != null ? key.hashCode() : 0;
+  }
+
+  MetadataProviderManager getMetadataProviderManager() {
+    return drillTable.getMetadataProviderManager();
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillValidator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillValidator.java
new file mode 100644
index 0000000..9ad751d
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillValidator.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlOperatorTable;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.validate.SelectScope;
+import org.apache.calcite.sql.validate.SqlConformance;
+import org.apache.calcite.sql.validate.SqlValidatorCatalogReader;
+import org.apache.calcite.sql.validate.SqlValidatorImpl;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+import org.apache.calcite.sql.validate.SqlValidatorUtil;
+import org.apache.calcite.util.Static;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.store.ColumnExplorer;
+import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
+
+class DrillValidator extends SqlValidatorImpl {
+
+  private final OptionManager sessionOptions;
+
+  DrillValidator(SqlOperatorTable opTab, SqlValidatorCatalogReader catalogReader,
+                 RelDataTypeFactory typeFactory, SqlConformance conformance, OptionManager sessionOptions) {
+    super(opTab, catalogReader, typeFactory, conformance);
+    this.sessionOptions = sessionOptions;
+  }
+
+  @Override
+  protected void validateFrom(SqlNode node, RelDataType targetRowType, SqlValidatorScope scope) {
+    if (node.getKind() == SqlKind.AS) {
+      SqlCall sqlCall = (SqlCall) node;
+      SqlNode sqlNode = sqlCall.operand(0);
+      switch (sqlNode.getKind()) {
+        case IDENTIFIER:
+          SqlIdentifier tempNode = (SqlIdentifier) sqlNode;
+          changeNamesIfTableIsTemporary(tempNode);
+          // Check the schema and throw a valid SchemaNotFound exception instead of TableNotFound exception.
+          ((DrillCalciteCatalogReader) getCatalogReader()).isValidSchema(tempNode.names);
+          break;
+        case UNNEST:
+          if (sqlCall.operandCount() < 3) {
+            throw Static.RESOURCE.validationError("Alias table and column name are required for UNNEST").ex();
+          }
+      }
+    }
+    super.validateFrom(node, targetRowType, scope);
+  }
+
+  @Override
+  public String deriveAlias(SqlNode node, int ordinal) {
+    if (node instanceof SqlIdentifier) {
+      changeNamesIfTableIsTemporary(((SqlIdentifier) node));
+    }
+    return SqlValidatorUtil.getAlias(node, ordinal);
+  }
+
+  /**
+   * Checks that specified expression is not implicit column and
+   * adds it to a select list, ensuring that its alias does not
+   * clash with any existing expressions on the list.
+   * <p>
+   * This method may be used when {@link RelDataType#isDynamicStruct}
+   * method returns false. Each column from table row type except
+   * the implicit is added into specified list, aliases and fieldList.
+   * In the opposite case when {@link RelDataType#isDynamicStruct}
+   * returns true, only dynamic star is added into specified
+   * list, aliases and fieldList.
+   */
+  @Override
+  protected void addToSelectList(List<SqlNode> list,
+                                 Set<String> aliases,
+                                 List<Map.Entry<String, RelDataType>> fieldList,
+                                 SqlNode exp,
+                                 SelectScope scope,
+                                 final boolean includeSystemVars) {
+    if (!ColumnExplorer.initImplicitFileColumns(sessionOptions)
+        .containsKey(SqlValidatorUtil.getAlias(exp, -1))) {
+      super.addToSelectList(list, aliases, fieldList, exp, scope, includeSystemVars);
+    }
+  }
+
+  @Override
+  protected void inferUnknownTypes(RelDataType inferredType, SqlValidatorScope scope, SqlNode node) {
+    // calls validateQuery() for SqlSelect to be sure that temporary table name will be changed
+    // for the case when it is used in sub-select
+    if (node.getKind() == SqlKind.SELECT) {
+      validateQuery(node, scope, inferredType);
+    }
+    super.inferUnknownTypes(inferredType, scope, node);
+  }
+
+  private void changeNamesIfTableIsTemporary(SqlIdentifier tempNode) {
+    List<String> temporaryTableNames = ((DrillCalciteCatalogReader) getCatalogReader()).getTemporaryNames(tempNode.names);
+    if (temporaryTableNames != null) {
+      SqlParserPos pos = tempNode.getComponentParserPosition(0);
+      List<SqlParserPos> poses = Lists.newArrayList();
+      for (int i = 0; i < temporaryTableNames.size(); i++) {
+        poses.add(i, pos);
+      }
+      tempNode.setNames(temporaryTableNames, poses);
+    }
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillViewExpander.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillViewExpander.java
new file mode 100644
index 0000000..c2257b1
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/DrillViewExpander.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.util.List;
+
+import org.apache.calcite.plan.RelOptTable;
+import org.apache.calcite.rel.RelRoot;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.shaded.guava.com.google.common.base.Joiner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class DrillViewExpander implements RelOptTable.ViewExpander {
+  private static final Logger logger = LoggerFactory.getLogger(DrillViewExpander.class);
+
+  private final SqlConverter sqlConverter;
+
+  DrillViewExpander(SqlConverter sqlConverter) {
+    this.sqlConverter = sqlConverter;
+  }
+
+  @Override
+  public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) {
+    DrillCalciteCatalogReader catalogReader = newCatalogReader(sqlConverter.getRootSchema(), schemaPath);
+    SqlConverter parser = new SqlConverter(sqlConverter, sqlConverter.getDefaultSchema(),
+        sqlConverter.getRootSchema(), catalogReader);
+    return convertToRel(queryString, parser);
+  }
+
+  @Override
+  public RelRoot expandView(RelDataType rowType, String queryString, SchemaPlus rootSchema, List<String> schemaPath) {
+    final DrillCalciteCatalogReader catalogReader = newCatalogReader(rootSchema, schemaPath);
+    SchemaPlus schema = findSchema(queryString, rootSchema, schemaPath);
+    SqlConverter parser = new SqlConverter(sqlConverter, schema, rootSchema, catalogReader);
+    return convertToRel(queryString, parser);
+  }
+
+  private RelRoot convertToRel(String queryString, SqlConverter converter) {
+    converter.disallowTemporaryTables();
+    final SqlNode parsedNode = converter.parse(queryString);
+    final SqlNode validatedNode = converter.validate(parsedNode);
+    return converter.toRel(validatedNode);
+  }
+
+  private DrillCalciteCatalogReader newCatalogReader(SchemaPlus rootSchema, List<String> schemaPath) {
+    return new DrillCalciteCatalogReader(
+        rootSchema,
+        sqlConverter.isCaseSensitive(),
+        schemaPath,
+        sqlConverter.getTypeFactory(),
+        sqlConverter.getDrillConfig(),
+        sqlConverter.getSession(),
+        sqlConverter.getTemporarySchema(),
+        sqlConverter::useRootSchema,
+        sqlConverter::getDefaultSchema);
+  }
+
+  private SchemaPlus findSchema(String queryString, SchemaPlus rootSchema, List<String> schemaPath) {
+    SchemaPlus schema = rootSchema;
+    for (String s : schemaPath) {
+      SchemaPlus newSchema = schema.getSubSchema(s);
+      if (newSchema == null) {
+        throw UserException
+            .validationError()
+            .message("Failure while attempting to expand view. Requested schema %s not available in schema %s.",
+                s, schema.getName())
+            .addContext("View Context", Joiner.on(", ").join(schemaPath))
+            .addContext("View SQL", queryString)
+            .build(logger);
+      }
+      schema = newSchema;
+    }
+    return schema;
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/SqlConverter.java
new file mode 100644
index 0000000..522a8fb
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/conversion/SqlConverter.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.conversion;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.calcite.adapter.java.JavaTypeFactory;
+import org.apache.calcite.jdbc.DynamicSchema;
+import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
+import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCostFactory;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
+import org.apache.calcite.rel.RelCollationTraitDef;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelRoot;
+import org.apache.calcite.rel.logical.LogicalProject;
+import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.runtime.Hook;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlOperatorTable;
+import org.apache.calcite.sql.parser.SqlParseException;
+import org.apache.calcite.sql.parser.SqlParser;
+import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
+import org.apache.calcite.sql2rel.SqlToRelConverter;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
+import org.apache.drill.exec.ops.QueryContext;
+import org.apache.drill.exec.ops.UdfUtilities;
+import org.apache.drill.exec.planner.cost.DrillCostBase;
+import org.apache.drill.exec.planner.logical.DrillConstExecutor;
+import org.apache.drill.exec.planner.logical.DrillRelFactories;
+import org.apache.drill.exec.planner.physical.DrillDistributionTraitDef;
+import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.exec.planner.sql.DrillConvertletTable;
+import org.apache.drill.exec.planner.sql.DrillParserConfig;
+import org.apache.drill.exec.planner.sql.SchemaUtilites;
+import org.apache.drill.exec.planner.sql.parser.impl.DrillSqlParseException;
+import org.apache.drill.exec.planner.types.DrillRelDataTypeSystem;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.util.Utilities;
+
+/**
+ * Class responsible for managing:
+ * <ul>
+ *   <li>parsing - {@link #parse(String)}<li/>
+ *   <li>validation - {@link #validate(SqlNode)}<li/>
+ *   <li>conversion to rel - {@link #toRel(SqlNode)} (String)}<li/>
+ * <ul/>
+ */
+public class SqlConverter {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SqlConverter.class);
+
+  private final JavaTypeFactory typeFactory;
+  private final SqlParser.Config parserConfig;
+  private final DrillCalciteCatalogReader catalog;
+  private final PlannerSettings settings;
+  private final SchemaPlus rootSchema;
+  private final SchemaPlus defaultSchema;
+  private final SqlOperatorTable opTab;
+  private final RelOptCostFactory costFactory;
+  private final DrillValidator validator;
+  private final boolean isInnerQuery;
+  private final boolean isExpandedView;
+  private final UdfUtilities util;
+  private final FunctionImplementationRegistry functions;
+  private final String temporarySchema;
+  private final UserSession session;
+  private final DrillConfig drillConfig;
+  // Allow the default config to be modified using immutable configs
+  private SqlToRelConverter.Config sqlToRelConverterConfig;
+  private RelOptCluster cluster;
+  private VolcanoPlanner planner;
+  private boolean useRootSchema = false;
+
+  static {
+    /*
+     * Sets value to false to avoid simplifying project expressions
+     * during creating new projects since it may cause changing data mode
+     * which causes to assertion errors during type validation
+     */
+    Hook.REL_BUILDER_SIMPLIFY.add(Hook.propertyJ(false));
+  }
+
+  public SqlConverter(QueryContext context) {
+    this.settings = context.getPlannerSettings();
+    this.util = context;
+    this.functions = context.getFunctionRegistry();
+    this.parserConfig = new DrillParserConfig(settings);
+    this.sqlToRelConverterConfig = SqlToRelConverter.configBuilder()
+        .withInSubQueryThreshold((int) settings.getInSubqueryThreshold())
+        .withConvertTableAccess(false)
+        .withExpand(false)
+        .withRelBuilderFactory(DrillRelFactories.LOGICAL_BUILDER)
+        .build();
+    this.isInnerQuery = false;
+    this.isExpandedView = false;
+    this.typeFactory = new JavaTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM);
+    this.defaultSchema = context.getNewDefaultSchema();
+    this.rootSchema = SchemaUtilites.rootSchema(defaultSchema);
+    this.temporarySchema = context.getConfig().getString(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE);
+    this.session = context.getSession();
+    this.drillConfig = context.getConfig();
+    this.catalog = new DrillCalciteCatalogReader(
+        rootSchema,
+        parserConfig.caseSensitive(),
+        DynamicSchema.from(defaultSchema).path(null),
+        typeFactory,
+        drillConfig,
+        session,
+        temporarySchema,
+        this::useRootSchema,
+        this::getDefaultSchema);
+    this.opTab = new ChainedSqlOperatorTable(Arrays.asList(context.getDrillOperatorTable(), catalog));
+    this.costFactory = (settings.useDefaultCosting()) ? null : new DrillCostBase.DrillCostFactory();
+    this.validator = new DrillValidator(opTab, catalog, typeFactory, parserConfig.conformance(), session.getOptions());
+    validator.setIdentifierExpansion(true);
+    cluster = null;
+  }
+
+  SqlConverter(SqlConverter parent, SchemaPlus defaultSchema, SchemaPlus rootSchema,
+      DrillCalciteCatalogReader catalog) {
+    this.parserConfig = parent.parserConfig;
+    this.sqlToRelConverterConfig = parent.sqlToRelConverterConfig;
+    this.defaultSchema = defaultSchema;
+    this.functions = parent.functions;
+    this.util = parent.util;
+    this.isInnerQuery = true;
+    this.isExpandedView = true;
+    this.typeFactory = parent.typeFactory;
+    this.costFactory = parent.costFactory;
+    this.settings = parent.settings;
+    this.rootSchema = rootSchema;
+    this.catalog = catalog;
+    this.opTab = parent.opTab;
+    this.planner = parent.planner;
+    this.validator = new DrillValidator(opTab, catalog, typeFactory, parserConfig.conformance(), parent.session.getOptions());
+    this.temporarySchema = parent.temporarySchema;
+    this.session = parent.session;
+    this.drillConfig = parent.drillConfig;
+    validator.setIdentifierExpansion(true);
+    this.cluster = parent.cluster;
+  }
+
+  public SqlNode parse(String sql) {
+    try {
+      SqlParser parser = SqlParser.create(sql, parserConfig);
+      return parser.parseStmt();
+    } catch (SqlParseException parseError) {
+      DrillSqlParseException dex = new DrillSqlParseException(sql, parseError);
+      UserException.Builder builder = UserException
+          .parseError(dex)
+          .addContext(dex.getSqlWithErrorPointer());
+      if (isInnerQuery) {
+        builder.message("Failure parsing a view your query is dependent upon.");
+      }
+      throw builder.build(logger);
+    }
+  }
+
+  public SqlNode validate(final SqlNode parsedNode) {
+    try {
+      return validator.validate(parsedNode);
+    } catch (RuntimeException e) {
+      UserException.Builder builder = UserException
+          .validationError(e);
+      if (isInnerQuery) {
+        builder.message("Failure validating a view your query is dependent upon.");
+      }
+      throw builder.build(logger);
+    }
+  }
+
+  public RelRoot toRel(final SqlNode validatedNode) {
+    initCluster(initPlanner());
+    DrillViewExpander viewExpander = new DrillViewExpander(this);
+    final SqlToRelConverter sqlToRelConverter = new SqlToRelConverter(
+        viewExpander, validator, catalog, cluster,
+        DrillConvertletTable.INSTANCE, sqlToRelConverterConfig);
+
+    boolean topLevelQuery = !isInnerQuery || isExpandedView;
+    RelRoot rel = sqlToRelConverter.convertQuery(validatedNode, false, topLevelQuery);
+
+    // If extra expressions used in ORDER BY were added to the project list,
+    // add another project to remove them.
+    if (topLevelQuery && rel.rel.getRowType().getFieldCount() - rel.fields.size() > 0) {
+      RexBuilder builder = rel.rel.getCluster().getRexBuilder();
+
+      RelNode relNode = rel.rel;
+      List<RexNode> expressions = rel.fields.stream()
+          .map(f -> builder.makeInputRef(relNode, f.left))
+          .collect(Collectors.toList());
+
+      RelNode project = LogicalProject.create(rel.rel, expressions, rel.validatedRowType);
+      rel = RelRoot.of(project, rel.validatedRowType, rel.kind);
+    }
+    return rel.withRel(sqlToRelConverter.flattenTypes(rel.rel, true));
+  }
+
+  public RelDataType getOutputType(SqlNode validatedNode) {
+    return validator.getValidatedNodeType(validatedNode);
+  }
+
+  public JavaTypeFactory getTypeFactory() {
+    return typeFactory;
+  }
+
+  public DrillConfig getDrillConfig() {
+    return drillConfig;
+  }
+
+  public UserSession getSession() {
+    return session;
+  }
+
+  public RelOptCostFactory getCostFactory() {
+    return costFactory;
+  }
+
+  public SchemaPlus getRootSchema() {
+    return rootSchema;
+  }
+
+  public SchemaPlus getDefaultSchema() {
+    return defaultSchema;
+  }
+
+  public boolean isCaseSensitive() {
+    return parserConfig.caseSensitive();
+  }
+
+  /** Disallow temporary tables presence in sql statement (ex: in view definitions) */
+  public void disallowTemporaryTables() {
+    catalog.disallowTemporaryTables();
+  }
+
+  String getTemporarySchema() {
+    return temporarySchema;
+  }
+
+  boolean useRootSchema() {
+    return this.useRootSchema;
+  }
+
+  /**
+   * Is root schema path should be used as default schema path.
+   *
+   * @param useRoot flag
+   */
+  public void useRootSchemaAsDefault(boolean useRoot) {
+    useRootSchema = useRoot;
+  }
+
+  private void initCluster(RelOptPlanner planner) {
+    if (cluster == null) {
+      cluster = RelOptCluster.create(planner, new DrillRexBuilder(typeFactory));
+      JaninoRelMetadataProvider relMetadataProvider = Utilities.registerJaninoRelMetadataProvider();
+      cluster.setMetadataProvider(relMetadataProvider);
+    }
+  }
+
+  private RelOptPlanner initPlanner() {
+    if (planner == null) {
+      planner = new VolcanoPlanner(costFactory, settings);
+      planner.setExecutor(new DrillConstExecutor(functions, util, settings));
+      planner.clearRelTraitDefs();
+      planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
+      planner.addRelTraitDef(DrillDistributionTraitDef.INSTANCE);
+      planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
+    }
+    return planner;
+  }
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
index 70dd5a8..9d296c3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/DescribeTableHandler.java
@@ -44,7 +44,7 @@
 import org.apache.calcite.util.Util;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.planner.sql.SchemaUtilites;
-import org.apache.drill.exec.planner.sql.SqlConverter;
+import org.apache.drill.exec.planner.sql.conversion.SqlConverter;
 import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlDescribeTable;
 import org.apache.drill.exec.store.AbstractSchema;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
index a910f9a..c726d30 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/ShowTablesHandler.java
@@ -17,14 +17,9 @@
  */
 package org.apache.drill.exec.planner.sql.handlers;
 
-import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.IS_SCHEMA_NAME;
-import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_NAME;
-import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_SCHEMA;
-
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.sql.SqlCharStringLiteral;
 import org.apache.calcite.sql.SqlIdentifier;
@@ -37,17 +32,19 @@
 import org.apache.calcite.tools.RelConversionException;
 import org.apache.calcite.tools.ValidationException;
 import org.apache.calcite.util.NlsString;
-import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.planner.sql.SchemaUtilites;
-import org.apache.drill.exec.planner.sql.SqlConverter;
 import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
 import org.apache.drill.exec.planner.sql.parser.SqlShowTables;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.ischema.InfoSchemaTableType;
 import org.apache.drill.exec.work.foreman.ForemanSetupException;
 
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.IS_SCHEMA_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_NAME;
+import static org.apache.drill.exec.store.ischema.InfoSchemaConstants.SHRD_COL_TABLE_SCHEMA;
+
 public class ShowTablesHandler extends DefaultSqlHandler {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ShowTablesHandler.class);
 
@@ -117,14 +114,27 @@
         fromClause, where, null, null, null, null, null, null);
   }
 
+  /**
+   * Rewritten SHOW TABLES query should be executed against root schema. Otherwise if
+   * query executed against, for example, jdbc plugin, returned table_schema column values
+   * won't be consistent with Drill's values for same column. Also after jdbc filter push down
+   * schema condition will be broken because it may contain name of Drill's storage plugin or
+   * name of jdbc catalog which isn't present in table_schema column.
+   *
+   * @param sqlNode node produced by {@link #rewrite(SqlNode)}
+   * @return converted rel node
+   * @throws ForemanSetupException when fragment setup or ser/de failed
+   * @throws RelConversionException when conversion failed
+   * @throws ValidationException when sql node validation failed
+   */
   @Override
-  protected Pair<SqlNode, RelDataType> validateNode(SqlNode sqlNode) throws ValidationException,
-      RelConversionException, ForemanSetupException {
-    SqlConverter converter = config.getConverter();
-    // set this to true since INFORMATION_SCHEMA in the root schema, not in the default
-    converter.useRootSchemaAsDefault(true);
-    Pair<SqlNode, RelDataType> sqlNodeRelDataTypePair = super.validateNode(sqlNode);
-    converter.useRootSchemaAsDefault(false);
-    return sqlNodeRelDataTypePair;
+  protected ConvertedRelNode validateAndConvert(SqlNode sqlNode) throws
+      ForemanSetupException, RelConversionException, ValidationException {
+    try {
+      config.getConverter().useRootSchemaAsDefault(true);
+      return super.validateAndConvert(sqlNode);
+    } finally {
+      config.getConverter().useRootSchemaAsDefault(false);
+    }
   }
 }
\ No newline at end of file
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
index d772215..9b7305e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/SqlHandlerConfig.java
@@ -23,7 +23,7 @@
 import org.apache.calcite.tools.RuleSet;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.planner.PlannerPhase;
-import org.apache.drill.exec.planner.sql.SqlConverter;
+import org.apache.drill.exec.planner.sql.conversion.SqlConverter;
 import org.apache.drill.exec.store.StoragePlugin;
 
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillSqlParseException.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillSqlParseException.java
index f248241..b55afed 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillSqlParseException.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillSqlParseException.java
@@ -19,23 +19,33 @@
 
 import org.apache.calcite.sql.parser.SqlParseException;
 import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.drill.exec.planner.sql.parser.DrillParserUtil;
+import org.apache.drill.shaded.guava.com.google.common.annotations.VisibleForTesting;
 
 /**
  * Customized {@link SqlParseException} class
  */
 public class DrillSqlParseException extends SqlParseException {
+  private final String sql;
   private final ParseException parseException;
 
-  public DrillSqlParseException(String message, SqlParserPos pos, int[][] expectedTokenSequences,
-                                String[] tokenImages, Throwable ex) {
-    super(message, pos, expectedTokenSequences, tokenImages, ex);
-
-    parseException = (ex instanceof ParseException) ? (ParseException) ex : null;
+  public DrillSqlParseException(String sql, SqlParseException sqlParseException) {
+    this(sql, sqlParseException.getMessage(), sqlParseException.getPos(), sqlParseException.getExpectedTokenSequences(),
+        sqlParseException.getTokenImages(), sqlParseException.getCause());
   }
 
-  public DrillSqlParseException(SqlParseException sqlParseException) {
-    this(sqlParseException.getMessage(), sqlParseException.getPos(), sqlParseException.getExpectedTokenSequences(),
-        sqlParseException.getTokenImages(), sqlParseException.getCause());
+  @VisibleForTesting
+  public DrillSqlParseException(String sql, SqlParserPos pos) {
+    this(sql, null, pos, null, null, null);
+  }
+
+  private DrillSqlParseException(String sql, String message, SqlParserPos pos,
+                                 int[][] expectedTokenSequences,
+                                String[] tokenImages, Throwable ex) {
+    super(message, pos, expectedTokenSequences, tokenImages, ex);
+    this.parseException = (ex instanceof ParseException) ? (ParseException) ex : null;
+    this.sql = sql;
   }
 
   /**
@@ -102,4 +112,43 @@
 
     return sb.toString();
   }
+
+  /**
+   * Formats sql query which caused the exception by adding
+   * error pointer ^ under incorrect expression.
+   *
+   * @return The sql with a ^ character under the error
+   */
+  public String getSqlWithErrorPointer() {
+    final String sqlErrorMessageHeader = "SQL Query: ";
+    final SqlParserPos pos = getPos();
+    String formattedSql = sql;
+    if (pos != null) {
+      int issueLineNumber = pos.getLineNum() - 1;  // recalculates to base 0
+      int issueColumnNumber = pos.getColumnNum() - 1;  // recalculates to base 0
+      int messageHeaderLength = sqlErrorMessageHeader.length();
+
+      // If the issue happens on the first line, header width should be calculated alongside with the sql query
+      int shiftLength = (issueLineNumber == 0) ? issueColumnNumber + messageHeaderLength : issueColumnNumber;
+
+      StringBuilder sb = new StringBuilder();
+      String[] lines = sql.split(DrillParserUtil.EOL);
+
+      for (int i = 0; i < lines.length; i++) {
+        sb.append(lines[i]);
+
+        if (i == issueLineNumber) {
+          sb
+              .append(DrillParserUtil.EOL)
+              .append(StringUtils.repeat(' ', shiftLength))
+              .append("^");
+        }
+        if (i < lines.length - 1) {
+          sb.append(DrillParserUtil.EOL);
+        }
+      }
+      formattedSql = sb.toString();
+    }
+    return sqlErrorMessageHeader + formattedSql;
+  }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/DrillFixedRelDataTypeImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/DrillFixedRelDataTypeImpl.java
deleted file mode 100644
index 3430a2c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/DrillFixedRelDataTypeImpl.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.planner.types;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
-import org.apache.calcite.rel.type.RelDataTypeImpl;
-import org.apache.calcite.rel.type.RelDataTypePrecedenceList;
-import org.apache.calcite.sql.type.SqlTypeExplicitPrecedenceList;
-import org.apache.calcite.sql.type.SqlTypeName;
-
-import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
-
-/**
- * Implements RowType for fixed field list with ANY type.
- */
-public class DrillFixedRelDataTypeImpl extends RelDataTypeImpl {
-  private List<RelDataTypeField> fields = Lists.newArrayList();
-  private final RelDataTypeFactory typeFactory;
-
-  public DrillFixedRelDataTypeImpl(RelDataTypeFactory typeFactory, List<String> columnNames) {
-    this.typeFactory = typeFactory;
-
-    // Add the initial list of columns.
-    for (String column : columnNames) {
-      addField(column);
-    }
-    computeDigest();
-  }
-
-  private void addField(String columnName) {
-    RelDataTypeField newField = new RelDataTypeFieldImpl(
-        columnName, fields.size(), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.ANY), true));
-    fields.add(newField);
-  }
-
-  @Override
-  public RelDataTypeField getField(String fieldName, boolean caseSensitive, boolean elideRecord) {
-    // return the field with given name if available.
-    for (RelDataTypeField f : fields) {
-      if (fieldName.equalsIgnoreCase(f.getName())) {
-        return f;
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public List<RelDataTypeField> getFieldList() {
-    return fields;
-  }
-
-  @Override
-  public int getFieldCount() {
-    return fields.size();
-  }
-
-  @Override
-  public List<String> getFieldNames() {
-    List<String> fieldNames = Lists.newArrayList();
-    for (RelDataTypeField f : fields) {
-      fieldNames.add(f.getName());
-    }
-
-    return fieldNames;
-  }
-
-  @Override
-  public SqlTypeName getSqlTypeName() {
-    return SqlTypeName.ANY;
-  }
-
-  @Override
-  public RelDataTypePrecedenceList getPrecedenceList() {
-    return new SqlTypeExplicitPrecedenceList(Collections.<SqlTypeName>emptyList());
-  }
-
-  @Override
-  protected void generateTypeString(StringBuilder sb, boolean withDetail) {
-    sb.append("(DrillFixedRecordRow" + getFieldNames() + ")");
-  }
-
-  @Override
-  public boolean isStruct() {
-    return true;
-  }
-
-}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
index d232f5b..4125ad3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractSchema.java
@@ -118,6 +118,17 @@
     return Joiner.on(SCHEMA_SEPARATOR).join(schemaPath);
   }
 
+  @Override
+  public final String toString() {
+    return getFullSchemaName();
+  }
+
+  /**
+   * Returns string describing schema type which shows where the schema came from.
+   * Good practice here is to return json type name of storage plugin's config.
+   *
+   * @return schema type name
+   */
   public abstract String getTypeName();
 
   /**
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
index 0e97c53..1dcd355 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
@@ -134,7 +134,7 @@
 
   @Override
   public AbstractGroupScan getPhysicalScan(String userName, JSONOptions selection, List<SchemaPath> columns) throws IOException {
-    throw new UnsupportedOperationException();
+    throw new UnsupportedOperationException("Physical scan is not supported by '" + getName() + "' storage plugin.");
   }
 
   @Override
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
index 70e110a..8943721 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaRecordGenerator.java
@@ -22,7 +22,9 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 /**
@@ -58,14 +60,24 @@
    * @param  schema  the given schema
    */
   protected void scanSchema(String schemaPath, SchemaPlus schema) {
-    // Recursively scan any sub-schema
+    scanSchemaImpl(schemaPath, schema, new HashSet<>());
+  }
+
+  /**
+   *  Recursively scan given schema and any sub-schema in it. In case when scan schema is root,
+   *  set of visited paths is used to prevent visiting same sub-schema twice.
+   *
+   * @param schemaPath path to scan
+   * @param schema schema associated with path
+   * @param visitedPaths set used to ensure same path won't be visited twice
+   */
+  private void scanSchemaImpl(String schemaPath, SchemaPlus schema, Set<String> visitedPaths) {
     for (String name: schema.getSubSchemaNames()) {
-      scanSchema(schemaPath +
-        ("".equals(schemaPath) ? "" : ".") + // If we have an empty schema path, then don't insert a leading dot.
-        name, schema.getSubSchema(name));
+      String subSchemaPath = schemaPath.isEmpty() ? name : schemaPath + "." + name;
+      scanSchemaImpl(subSchemaPath, schema.getSubSchema(name), visitedPaths);
     }
 
-    if (filterEvaluator.shouldVisitSchema(schemaPath, schema)) {
+    if (filterEvaluator.shouldVisitSchema(schemaPath, schema) && visitedPaths.add(schemaPath)) {
       visit(schemaPath, schema);
     }
   }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
index ddb1e5b..a2d0ef6 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
@@ -1226,4 +1226,17 @@
         new String[]{"Project.*columns"});
 
   }
+
+  @Test // DRILL-6905
+  public void testCombineFilterWithNumericAndVarcharLiteral() throws Exception {
+    String query = "select n_nationkey from cp.`tpch/nation.parquet` where n_nationkey < 2 or n_nationkey = '10'";
+    testBuilder()
+        .sqlQuery(query)
+        .unOrdered()
+        .baselineColumns("n_nationkey")
+        .baselineValues(0)
+        .baselineValues(1)
+        .baselineValues(10)
+        .go();
+  }
 }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java b/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
index 346fc26..6be3644 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/planner/sql/TestDrillSQLWorker.java
@@ -32,8 +32,8 @@
 public class TestDrillSQLWorker extends BaseTestQuery {
 
   private void validateFormattedIs(String sql, SqlParserPos pos, String expected) {
-    DrillSqlParseException ex = new DrillSqlParseException(null, pos, null, null, null);
-    String formatted = SqlConverter.formatSQLParsingError(sql, ex);
+    DrillSqlParseException ex = new DrillSqlParseException(sql, pos);
+    String formatted = ex.getSqlWithErrorPointer();
     assertEquals(expected, formatted);
   }
 
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
index d1391da..735c97a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
@@ -36,6 +36,7 @@
 import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
+import java.util.function.Function;
 
 import org.apache.drill.common.config.DrillProperties;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
@@ -49,8 +50,10 @@
 import org.apache.drill.exec.proto.UserBitShared.QueryType;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
 import org.apache.drill.exec.server.Drillbit;
+import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
 import org.apache.drill.exec.store.SchemaFactory;
+import org.apache.drill.exec.store.StoragePlugin;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.StoragePluginRegistryImpl;
 import org.apache.drill.exec.store.dfs.FileSystemConfig;
@@ -469,6 +472,14 @@
     return ex;
   }
 
+  public void defineStoragePlugin(Function<DrillbitContext, StoragePlugin> pluginFactory) {
+    for (Drillbit drillbit : drillbits()) {
+      StoragePluginRegistryImpl registry = (StoragePluginRegistryImpl) drillbit.getContext().getStorage();
+      StoragePlugin plugin = pluginFactory.apply(drillbit.getContext());
+      registry.addPluginToPersistentStoreIfAbsent(plugin.getName(), plugin.getConfig(), plugin);
+    }
+  }
+
   /**
    * Define a workspace within an existing storage plugin. Useful for
    * pointing to local file system files outside the Drill source tree.
diff --git a/logical/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java b/logical/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
index e8de507..a2c70f1 100644
--- a/logical/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
+++ b/logical/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
@@ -18,57 +18,53 @@
 package org.apache.drill.common.expression;
 
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 
+import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 
 public class FunctionCallFactory {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionCallFactory.class);
 
-  private static Map<String, String> opToFuncTable = new HashMap<>();
+  private static final Map<String, String> OP_TO_FUNC_NAME = ImmutableMap.<String, String>builder()
+      .put("+", "add")
+      .put("-", "subtract")
+      .put("/", "divide")
+      .put("*", "multiply")
+      .put("%", "modulo")
+      .put("^", "xor")
+      .put("||", "concatOperator")
+      .put("or", "booleanOr")
+      .put("and", "booleanAnd")
+      .put(">", "greater_than")
+      .put("<", "less_than")
+      .put("==", "equal")
+      .put("=", "equal")
+      .put("!=", "not_equal")
+      .put("<>", "not_equal")
+      .put(">=", "greater_than_or_equal_to")
+      .put("<=", "less_than_or_equal_to")
+      .put("is null", "isnull")
+      .put("is not null", "isnotnull")
+      .put("is true", "istrue")
+      .put("is not true", "isnottrue")
+      .put("is false", "isfalse")
+      .put("is not false", "isnotfalse")
+      .put("similar to", "similar_to")
+      .put("!", "not")
+      .put("u-", "negative")
+      .build();
 
-  static {
-    opToFuncTable.put("+", "add");
-    opToFuncTable.put("-", "subtract");
-    opToFuncTable.put("/", "divide");
-    opToFuncTable.put("*", "multiply");
-    opToFuncTable.put("%", "modulo");
-    opToFuncTable.put("^", "xor");
-    opToFuncTable.put("||", "concatOperator");
-    opToFuncTable.put("or", "booleanOr");
-    opToFuncTable.put("and", "booleanAnd");
-    opToFuncTable.put(">", "greater_than");
-    opToFuncTable.put("<", "less_than");
-    opToFuncTable.put("==", "equal");
-    opToFuncTable.put("=", "equal");
-    opToFuncTable.put("!=", "not_equal");
-    opToFuncTable.put("<>", "not_equal");
-    opToFuncTable.put(">=", "greater_than_or_equal_to");
-    opToFuncTable.put("<=", "less_than_or_equal_to");
-    opToFuncTable.put("is null", "isnull");
-    opToFuncTable.put("is not null", "isnotnull");
-    opToFuncTable.put("is true", "istrue");
-    opToFuncTable.put("is not true", "isnottrue");
-    opToFuncTable.put("is false", "isfalse");
-    opToFuncTable.put("is not false", "isnotfalse");
-    opToFuncTable.put("similar to", "similar_to");
-
-    opToFuncTable.put("!", "not");
-    opToFuncTable.put("u-", "negative");
-  }
-
-  public static String replaceOpWithFuncName(String op) {
-    return (opToFuncTable.containsKey(op)) ? (opToFuncTable.get(op)) : op;
+  public static String convertToDrillFunctionName(String op) {
+    return OP_TO_FUNC_NAME.getOrDefault(op, op);
   }
 
   public static boolean isBooleanOperator(String funcName) {
-    String opName  = replaceOpWithFuncName(funcName);
-    return opName.equals("booleanAnd") || opName.equals("booleanOr");
+    String drillFuncName  = convertToDrillFunctionName(funcName);
+    return drillFuncName.equals("booleanAnd") || drillFuncName.equals("booleanOr");
   }
 
   /*
@@ -98,7 +94,7 @@
   }
 
   public static LogicalExpression createExpression(String functionName, ExpressionPosition ep, List<LogicalExpression> args){
-    String name = replaceOpWithFuncName(functionName);
+    String name = convertToDrillFunctionName(functionName);
     if (isBooleanOperator(name)) {
       return new BooleanOperator(name, args, ep);
     } else {
@@ -115,7 +111,7 @@
   }
 
   public static LogicalExpression createBooleanOperator(String functionName, ExpressionPosition ep, List<LogicalExpression> args){
-    return new BooleanOperator(replaceOpWithFuncName(functionName), args, ep);
+    return new BooleanOperator(convertToDrillFunctionName(functionName), args, ep);
   }
 
   public static LogicalExpression createByOp(List<LogicalExpression> args, ExpressionPosition ep, List<String> opTypes) {
diff --git a/pom.xml b/pom.xml
index cc8246e..180238d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -57,7 +57,7 @@
       avoid_bad_dependencies plugin found in the file.
     -->
     <calcite.groupId>com.github.vvysotskyi.drill-calcite</calcite.groupId>
-    <calcite.version>1.20.0-drill-r2</calcite.version>
+    <calcite.version>1.21.0-drill-r0</calcite.version>
     <avatica.version>1.15.0</avatica.version>
     <janino.version>3.0.11</janino.version>
     <sqlline.version>1.9.0</sqlline.version>